mac80211: protect rx-path with spinlock
This patch fixes the problem which was discussed in
"mac80211: Fix PN corruption in case of multiple
virtual interface" [1].
Amit Shakya reported a serious issue with my patch:
mac80211: serialize rx path workers" [2]:
In case, ieee80211_rx_handlers processing is going on
for skbs received on one vif and at the same time, rx
aggregation reorder timer expires on another vif then
sta_rx_agg_reorder_timer_expired is invoked and it will
push skbs into the single queue (local->rx_skb_queue).
ieee80211_rx_handlers in the while loop assumes that
the skbs are for the same sdata and sta. This assumption
doesn't hold good in this scenario and the PN gets
corrupted by PN received in other vif's skb, causing
traffic to stop due to PN mismatch."
[1] Message-Id: http://mid.gmane.org/201302041844.44436.chunkeey@googlemail.com
[2] Commit-Id: 24a8fdad35835e8d71f7
Reported-by: Amit Shakya <amit.shakya@stericsson.com>
Signed-off-by: Christian Lamparter <chunkeey@googlemail.com>
Signed-off-by: Johannes Berg <johannes.berg@intel.com>
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
index 5fe9db7..080cf09 100644
--- a/net/mac80211/ieee80211_i.h
+++ b/net/mac80211/ieee80211_i.h
@@ -958,14 +958,7 @@
struct sk_buff_head skb_queue;
struct sk_buff_head skb_queue_unreliable;
- /*
- * Internal FIFO queue which is shared between multiple rx path
- * stages. Its main task is to provide a serialization mechanism,
- * so all rx handlers can enjoy having exclusive access to their
- * private data structures.
- */
- struct sk_buff_head rx_skb_queue;
- bool running_rx_handler; /* protected by rx_skb_queue.lock */
+ spinlock_t rx_path_lock;
/* Station data */
/*
diff --git a/net/mac80211/main.c b/net/mac80211/main.c
index 2bdd454..2220331 100644
--- a/net/mac80211/main.c
+++ b/net/mac80211/main.c
@@ -34,8 +34,6 @@
#include "cfg.h"
#include "debugfs.h"
-static struct lock_class_key ieee80211_rx_skb_queue_class;
-
void ieee80211_configure_filter(struct ieee80211_local *local)
{
u64 mc;
@@ -613,21 +611,12 @@
mutex_init(&local->key_mtx);
spin_lock_init(&local->filter_lock);
+ spin_lock_init(&local->rx_path_lock);
spin_lock_init(&local->queue_stop_reason_lock);
INIT_LIST_HEAD(&local->chanctx_list);
mutex_init(&local->chanctx_mtx);
- /*
- * The rx_skb_queue is only accessed from tasklets,
- * but other SKB queues are used from within IRQ
- * context. Therefore, this one needs a different
- * locking class so our direct, non-irq-safe use of
- * the queue's lock doesn't throw lockdep warnings.
- */
- skb_queue_head_init_class(&local->rx_skb_queue,
- &ieee80211_rx_skb_queue_class);
-
INIT_DELAYED_WORK(&local->scan_work, ieee80211_scan_work);
INIT_WORK(&local->restart_work, ieee80211_restart_work);
@@ -1089,7 +1078,6 @@
wiphy_warn(local->hw.wiphy, "skb_queue not empty\n");
skb_queue_purge(&local->skb_queue);
skb_queue_purge(&local->skb_queue_unreliable);
- skb_queue_purge(&local->rx_skb_queue);
destroy_workqueue(local->workqueue);
wiphy_unregister(local->hw.wiphy);
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index c98be05..b5f1bba 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -668,9 +668,9 @@
static void ieee80211_release_reorder_frame(struct ieee80211_sub_if_data *sdata,
struct tid_ampdu_rx *tid_agg_rx,
- int index)
+ int index,
+ struct sk_buff_head *frames)
{
- struct ieee80211_local *local = sdata->local;
struct sk_buff *skb = tid_agg_rx->reorder_buf[index];
struct ieee80211_rx_status *status;
@@ -684,7 +684,7 @@
tid_agg_rx->reorder_buf[index] = NULL;
status = IEEE80211_SKB_RXCB(skb);
status->rx_flags |= IEEE80211_RX_DEFERRED_RELEASE;
- skb_queue_tail(&local->rx_skb_queue, skb);
+ __skb_queue_tail(frames, skb);
no_frame:
tid_agg_rx->head_seq_num = seq_inc(tid_agg_rx->head_seq_num);
@@ -692,7 +692,8 @@
static void ieee80211_release_reorder_frames(struct ieee80211_sub_if_data *sdata,
struct tid_ampdu_rx *tid_agg_rx,
- u16 head_seq_num)
+ u16 head_seq_num,
+ struct sk_buff_head *frames)
{
int index;
@@ -701,7 +702,8 @@
while (seq_less(tid_agg_rx->head_seq_num, head_seq_num)) {
index = seq_sub(tid_agg_rx->head_seq_num, tid_agg_rx->ssn) %
tid_agg_rx->buf_size;
- ieee80211_release_reorder_frame(sdata, tid_agg_rx, index);
+ ieee80211_release_reorder_frame(sdata, tid_agg_rx, index,
+ frames);
}
}
@@ -717,7 +719,8 @@
#define HT_RX_REORDER_BUF_TIMEOUT (HZ / 10)
static void ieee80211_sta_reorder_release(struct ieee80211_sub_if_data *sdata,
- struct tid_ampdu_rx *tid_agg_rx)
+ struct tid_ampdu_rx *tid_agg_rx,
+ struct sk_buff_head *frames)
{
int index, j;
@@ -746,7 +749,8 @@
ht_dbg_ratelimited(sdata,
"release an RX reorder frame due to timeout on earlier frames\n");
- ieee80211_release_reorder_frame(sdata, tid_agg_rx, j);
+ ieee80211_release_reorder_frame(sdata, tid_agg_rx, j,
+ frames);
/*
* Increment the head seq# also for the skipped slots.
@@ -756,7 +760,8 @@
skipped = 0;
}
} else while (tid_agg_rx->reorder_buf[index]) {
- ieee80211_release_reorder_frame(sdata, tid_agg_rx, index);
+ ieee80211_release_reorder_frame(sdata, tid_agg_rx, index,
+ frames);
index = seq_sub(tid_agg_rx->head_seq_num, tid_agg_rx->ssn) %
tid_agg_rx->buf_size;
}
@@ -788,7 +793,8 @@
*/
static bool ieee80211_sta_manage_reorder_buf(struct ieee80211_sub_if_data *sdata,
struct tid_ampdu_rx *tid_agg_rx,
- struct sk_buff *skb)
+ struct sk_buff *skb,
+ struct sk_buff_head *frames)
{
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
u16 sc = le16_to_cpu(hdr->seq_ctrl);
@@ -816,7 +822,7 @@
head_seq_num = seq_inc(seq_sub(mpdu_seq_num, buf_size));
/* release stored frames up to new head to stack */
ieee80211_release_reorder_frames(sdata, tid_agg_rx,
- head_seq_num);
+ head_seq_num, frames);
}
/* Now the new frame is always in the range of the reordering buffer */
@@ -846,7 +852,7 @@
tid_agg_rx->reorder_buf[index] = skb;
tid_agg_rx->reorder_time[index] = jiffies;
tid_agg_rx->stored_mpdu_num++;
- ieee80211_sta_reorder_release(sdata, tid_agg_rx);
+ ieee80211_sta_reorder_release(sdata, tid_agg_rx, frames);
out:
spin_unlock(&tid_agg_rx->reorder_lock);
@@ -857,7 +863,8 @@
* Reorder MPDUs from A-MPDUs, keeping them on a buffer. Returns
* true if the MPDU was buffered, false if it should be processed.
*/
-static void ieee80211_rx_reorder_ampdu(struct ieee80211_rx_data *rx)
+static void ieee80211_rx_reorder_ampdu(struct ieee80211_rx_data *rx,
+ struct sk_buff_head *frames)
{
struct sk_buff *skb = rx->skb;
struct ieee80211_local *local = rx->local;
@@ -922,11 +929,12 @@
* sure that we cannot get to it any more before doing
* anything with it.
*/
- if (ieee80211_sta_manage_reorder_buf(rx->sdata, tid_agg_rx, skb))
+ if (ieee80211_sta_manage_reorder_buf(rx->sdata, tid_agg_rx, skb,
+ frames))
return;
dont_reorder:
- skb_queue_tail(&local->rx_skb_queue, skb);
+ __skb_queue_tail(frames, skb);
}
static ieee80211_rx_result debug_noinline
@@ -2184,7 +2192,7 @@
}
static ieee80211_rx_result debug_noinline
-ieee80211_rx_h_ctrl(struct ieee80211_rx_data *rx)
+ieee80211_rx_h_ctrl(struct ieee80211_rx_data *rx, struct sk_buff_head *frames)
{
struct sk_buff *skb = rx->skb;
struct ieee80211_bar *bar = (struct ieee80211_bar *)skb->data;
@@ -2223,7 +2231,7 @@
spin_lock(&tid_agg_rx->reorder_lock);
/* release stored frames up to start of BAR */
ieee80211_release_reorder_frames(rx->sdata, tid_agg_rx,
- start_seq_num);
+ start_seq_num, frames);
spin_unlock(&tid_agg_rx->reorder_lock);
kfree_skb(skb);
@@ -2808,7 +2816,8 @@
}
}
-static void ieee80211_rx_handlers(struct ieee80211_rx_data *rx)
+static void ieee80211_rx_handlers(struct ieee80211_rx_data *rx,
+ struct sk_buff_head *frames)
{
ieee80211_rx_result res = RX_DROP_MONITOR;
struct sk_buff *skb;
@@ -2820,15 +2829,9 @@
goto rxh_next; \
} while (0);
- spin_lock(&rx->local->rx_skb_queue.lock);
- if (rx->local->running_rx_handler)
- goto unlock;
+ spin_lock_bh(&rx->local->rx_path_lock);
- rx->local->running_rx_handler = true;
-
- while ((skb = __skb_dequeue(&rx->local->rx_skb_queue))) {
- spin_unlock(&rx->local->rx_skb_queue.lock);
-
+ while ((skb = __skb_dequeue(frames))) {
/*
* all the other fields are valid across frames
* that belong to an aMPDU since they are on the
@@ -2849,7 +2852,12 @@
#endif
CALL_RXH(ieee80211_rx_h_amsdu)
CALL_RXH(ieee80211_rx_h_data)
- CALL_RXH(ieee80211_rx_h_ctrl);
+
+ /* special treatment -- needs the queue */
+ res = ieee80211_rx_h_ctrl(rx, frames);
+ if (res != RX_CONTINUE)
+ goto rxh_next;
+
CALL_RXH(ieee80211_rx_h_mgmt_check)
CALL_RXH(ieee80211_rx_h_action)
CALL_RXH(ieee80211_rx_h_userspace_mgmt)
@@ -2858,20 +2866,20 @@
rxh_next:
ieee80211_rx_handlers_result(rx, res);
- spin_lock(&rx->local->rx_skb_queue.lock);
+
#undef CALL_RXH
}
- rx->local->running_rx_handler = false;
-
- unlock:
- spin_unlock(&rx->local->rx_skb_queue.lock);
+ spin_unlock_bh(&rx->local->rx_path_lock);
}
static void ieee80211_invoke_rx_handlers(struct ieee80211_rx_data *rx)
{
+ struct sk_buff_head reorder_release;
ieee80211_rx_result res = RX_DROP_MONITOR;
+ __skb_queue_head_init(&reorder_release);
+
#define CALL_RXH(rxh) \
do { \
res = rxh(rx); \
@@ -2881,9 +2889,9 @@
CALL_RXH(ieee80211_rx_h_check)
- ieee80211_rx_reorder_ampdu(rx);
+ ieee80211_rx_reorder_ampdu(rx, &reorder_release);
- ieee80211_rx_handlers(rx);
+ ieee80211_rx_handlers(rx, &reorder_release);
return;
rxh_next:
@@ -2898,6 +2906,7 @@
*/
void ieee80211_release_reorder_timeout(struct sta_info *sta, int tid)
{
+ struct sk_buff_head frames;
struct ieee80211_rx_data rx = {
.sta = sta,
.sdata = sta->sdata,
@@ -2913,11 +2922,13 @@
if (!tid_agg_rx)
return;
+ __skb_queue_head_init(&frames);
+
spin_lock(&tid_agg_rx->reorder_lock);
- ieee80211_sta_reorder_release(sta->sdata, tid_agg_rx);
+ ieee80211_sta_reorder_release(sta->sdata, tid_agg_rx, &frames);
spin_unlock(&tid_agg_rx->reorder_lock);
- ieee80211_rx_handlers(&rx);
+ ieee80211_rx_handlers(&rx, &frames);
}
/* main receive path */