summaryrefslogtreecommitdiff
path: root/net/mac80211/sta_info.c
diff options
context:
space:
mode:
authorJohannes Berg <johannes@sipsolutions.net>2008-03-31 19:23:03 +0200
committerJohn W. Linville <linville@tuxdriver.com>2008-04-01 17:14:10 -0400
commitdc6676b7f2c2072ec05254aaca32e99f87a8a417 (patch)
tree6caf7e007063f9ae6a16fdcb1912bf72d31237c2 /net/mac80211/sta_info.c
parent4f6fab472c4c7c21d577f85fabec7628d4a05637 (diff)
downloadlwn-dc6676b7f2c2072ec05254aaca32e99f87a8a417.tar.gz
lwn-dc6676b7f2c2072ec05254aaca32e99f87a8a417.zip
mac80211: sta_info_flush() fixes
When the IBSS code tries to flush the STA list, it does so in an atomic context. Flushing isn't safe there, however, and requires the RTNL, so we need to defer it to a workqueue. Signed-off-by: Johannes Berg <johannes@sipsolutions.net> Signed-off-by: John W. Linville <linville@tuxdriver.com>
Diffstat (limited to 'net/mac80211/sta_info.c')
-rw-r--r--net/mac80211/sta_info.c70
1 files changed, 70 insertions, 0 deletions
diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
index dfca96e05d69..f5c65e891288 100644
--- a/net/mac80211/sta_info.c
+++ b/net/mac80211/sta_info.c
@@ -644,10 +644,41 @@ static void sta_info_debugfs_add_work(struct work_struct *work)
}
#endif
+void __ieee80211_run_pending_flush(struct ieee80211_local *local)
+{
+ struct sta_info *sta;
+ unsigned long flags;
+
+ ASSERT_RTNL();
+
+ spin_lock_irqsave(&local->sta_lock, flags);
+ while (!list_empty(&local->sta_flush_list)) {
+ sta = list_first_entry(&local->sta_flush_list,
+ struct sta_info, list);
+ list_del(&sta->list);
+ spin_unlock_irqrestore(&local->sta_lock, flags);
+ sta_info_destroy(sta);
+ spin_lock_irqsave(&local->sta_lock, flags);
+ }
+ spin_unlock_irqrestore(&local->sta_lock, flags);
+}
+
+static void ieee80211_sta_flush_work(struct work_struct *work)
+{
+ struct ieee80211_local *local =
+ container_of(work, struct ieee80211_local, sta_flush_work);
+
+ rtnl_lock();
+ __ieee80211_run_pending_flush(local);
+ rtnl_unlock();
+}
+
void sta_info_init(struct ieee80211_local *local)
{
spin_lock_init(&local->sta_lock);
INIT_LIST_HEAD(&local->sta_list);
+ INIT_LIST_HEAD(&local->sta_flush_list);
+ INIT_WORK(&local->sta_flush_work, ieee80211_sta_flush_work);
setup_timer(&local->sta_cleanup, sta_info_cleanup,
(unsigned long)local);
@@ -668,7 +699,12 @@ int sta_info_start(struct ieee80211_local *local)
void sta_info_stop(struct ieee80211_local *local)
{
del_timer(&local->sta_cleanup);
+ cancel_work_sync(&local->sta_flush_work);
+
+ rtnl_lock();
sta_info_flush(local, NULL);
+ __ieee80211_run_pending_flush(local);
+ rtnl_unlock();
}
/**
@@ -688,6 +724,7 @@ int sta_info_flush(struct ieee80211_local *local,
unsigned long flags;
might_sleep();
+ ASSERT_RTNL();
spin_lock_irqsave(&local->sta_lock, flags);
list_for_each_entry_safe(sta, tmp, &local->sta_list, list) {
@@ -706,3 +743,36 @@ int sta_info_flush(struct ieee80211_local *local,
return ret;
}
+
+/**
+ * sta_info_flush_delayed - flush matching STA entries from the STA table
+ *
+ * This function unlinks all stations for a given interface and queues
+ * them for freeing. Note that the workqueue function scheduled here has
+ * to run before any new keys can be added to the system to avoid set_key()
+ * callback ordering issues.
+ *
+ * @sdata: the interface
+ */
+void sta_info_flush_delayed(struct ieee80211_sub_if_data *sdata)
+{
+ struct ieee80211_local *local = sdata->local;
+ struct sta_info *sta, *tmp;
+ unsigned long flags;
+ bool work = false;
+
+ spin_lock_irqsave(&local->sta_lock, flags);
+ list_for_each_entry_safe(sta, tmp, &local->sta_list, list) {
+ if (sdata == sta->sdata) {
+ __sta_info_unlink(&sta);
+ if (sta) {
+ list_add_tail(&sta->list,
+ &local->sta_flush_list);
+ work = true;
+ }
+ }
+ }
+ if (work)
+ schedule_work(&local->sta_flush_work);
+ spin_unlock_irqrestore(&local->sta_lock, flags);
+}