batman-adv: Add missing hardif_free_ref in forw_packet_free
add_bcast_packet_to_list increases the refcount for if_incoming but the
reference count is never decreased. The reference count must be
increased for all kinds of forwarded packets which have the primary
interface stored and forw_packet_free must decrease them. Also
purge_outstanding_packets has to invoke forw_packet_free when a work
item was really cancelled.
This regression was introduced in
32ae9b221e788413ce68feaae2ca39e406211a0a.
Reported-by: Antonio Quartulli <ordex@autistici.org>
Signed-off-by: Sven Eckelmann <sven@narfation.org>
diff --git a/net/batman-adv/send.c b/net/batman-adv/send.c
index f30d0c6..76daa46 100644
--- a/net/batman-adv/send.c
+++ b/net/batman-adv/send.c
@@ -377,6 +377,8 @@
{
if (forw_packet->skb)
kfree_skb(forw_packet->skb);
+ if (forw_packet->if_incoming)
+ hardif_free_ref(forw_packet->if_incoming);
kfree(forw_packet);
}
@@ -539,6 +541,7 @@
{
struct forw_packet *forw_packet;
struct hlist_node *tmp_node, *safe_tmp_node;
+ bool pending;
if (hard_iface)
bat_dbg(DBG_BATMAN, bat_priv,
@@ -567,8 +570,13 @@
* send_outstanding_bcast_packet() will lock the list to
* delete the item from the list
*/
- cancel_delayed_work_sync(&forw_packet->delayed_work);
+ pending = cancel_delayed_work_sync(&forw_packet->delayed_work);
spin_lock_bh(&bat_priv->forw_bcast_list_lock);
+
+ if (pending) {
+ hlist_del(&forw_packet->list);
+ forw_packet_free(forw_packet);
+ }
}
spin_unlock_bh(&bat_priv->forw_bcast_list_lock);
@@ -591,8 +599,13 @@
* send_outstanding_bat_packet() will lock the list to
* delete the item from the list
*/
- cancel_delayed_work_sync(&forw_packet->delayed_work);
+ pending = cancel_delayed_work_sync(&forw_packet->delayed_work);
spin_lock_bh(&bat_priv->forw_bat_list_lock);
+
+ if (pending) {
+ hlist_del(&forw_packet->list);
+ forw_packet_free(forw_packet);
+ }
}
spin_unlock_bh(&bat_priv->forw_bat_list_lock);
}