diff options
author | Thomas Gleixner <tglx@linutronix.de> | 2009-10-04 16:58:07 +0200 |
---|---|---|
committer | Thomas Gleixner <tglx@linutronix.de> | 2009-10-04 17:44:48 +0200 |
commit | 00ef66ebb37437ca205d06224e7b956d205f7886 (patch) | |
tree | 40ebb5d10340e774c764afac58c0077a3721c158 /include/linux/netfilter | |
parent | 82c07cbbdf7d44564497122eb6d984ffe2497fa3 (diff) | |
download | lwn-00ef66ebb37437ca205d06224e7b956d205f7886.tar.gz lwn-00ef66ebb37437ca205d06224e7b956d205f7886.zip |
net: Fix netfilter percpu assumptions for real
commit 21ece08 (net: fix the xtables smp_processor_id assumptions for
-rt) fixed only half of the problem. The filter functions might run in
thread context and can be preempted and migrated on -RT.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'include/linux/netfilter')
-rw-r--r-- | include/linux/netfilter/x_tables.h | 25 |
1 files changed, 19 insertions, 6 deletions
diff --git a/include/linux/netfilter/x_tables.h b/include/linux/netfilter/x_tables.h index f7ab3f917048..a3f5427947b8 100644 --- a/include/linux/netfilter/x_tables.h +++ b/include/linux/netfilter/x_tables.h @@ -468,22 +468,35 @@ DECLARE_PER_CPU(struct xt_info_lock, xt_info_locks); * _Only_ that special combination of being per-cpu and never getting * re-entered asynchronously means that the count is safe. */ -static inline void xt_info_rdlock_bh(void) +static inline int xt_info_rdlock_bh(void) { struct xt_info_lock *lock; + int cpu; local_bh_disable(); - lock = &__raw_get_cpu_var(xt_info_locks); - if (likely(!lock->readers++)) + preempt_disable_rt(); + cpu = smp_processor_id(); + lock = &per_cpu(xt_info_locks, cpu); + if (likely(!lock->readers++)) { + preempt_enable_rt(); spin_lock(&lock->lock); + } else + preempt_enable_rt(); + return cpu; } -static inline void xt_info_rdunlock_bh(void) +static inline void xt_info_rdunlock_bh(int cpu) { - struct xt_info_lock *lock = &__raw_get_cpu_var(xt_info_locks); + struct xt_info_lock *lock = &per_cpu(xt_info_locks, cpu); - if (likely(!--lock->readers)) + preempt_disable_rt(); + + if (likely(!--lock->readers)) { + preempt_enable_rt(); spin_unlock(&lock->lock); + } else + preempt_enable_rt(); + local_bh_enable(); } |