diff options
author | Vlad Buslov <vladbu@mellanox.com> | 2018-05-21 23:03:04 +0300 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2018-05-22 15:34:34 -0400 |
commit | 290aa0ad74c995c60d94fb4f1d66d411efa13dd5 (patch) | |
tree | 2d4c82f52352237e4edf0fb96d8e50088b4dc062 /net/sched/act_api.c | |
parent | 73bf1fc58dc4376d0111a4c1c9eab27e2759f468 (diff) | |
download | lwn-290aa0ad74c995c60d94fb4f1d66d411efa13dd5.tar.gz lwn-290aa0ad74c995c60d94fb4f1d66d411efa13dd5.zip |
net: sched: don't disable bh when accessing action idr
Initial net_device implementation used ingress_lock spinlock to synchronize
ingress path of device. This lock was used in both process and bh context.
In some code paths action map lock was obtained while holding ingress_lock.
Commit e1e992e52faa ("[NET_SCHED] protect action config/dump from irqs")
modified actions to always disable bh, while using action map lock, in
order to prevent deadlock on ingress_lock in softirq. This lock was removed
from net_device, so disabling bh, while accessing action map, is no longer
necessary.
Replace all action idr spinlock usage with regular calls that do not
disable bh.
Signed-off-by: Vlad Buslov <vladbu@mellanox.com>
Acked-by: Jamal Hadi Salim <jhs@mojatatu.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/sched/act_api.c')
-rw-r--r-- | net/sched/act_api.c | 20 |
1 files changed, 10 insertions, 10 deletions
diff --git a/net/sched/act_api.c b/net/sched/act_api.c index 72251241665a..3f4cf930f809 100644 --- a/net/sched/act_api.c +++ b/net/sched/act_api.c @@ -77,9 +77,9 @@ static void free_tcf(struct tc_action *p) static void tcf_idr_remove(struct tcf_idrinfo *idrinfo, struct tc_action *p) { - spin_lock_bh(&idrinfo->lock); + spin_lock(&idrinfo->lock); idr_remove(&idrinfo->action_idr, p->tcfa_index); - spin_unlock_bh(&idrinfo->lock); + spin_unlock(&idrinfo->lock); gen_kill_estimator(&p->tcfa_rate_est); free_tcf(p); } @@ -156,7 +156,7 @@ static int tcf_dump_walker(struct tcf_idrinfo *idrinfo, struct sk_buff *skb, struct tc_action *p; unsigned long id = 1; - spin_lock_bh(&idrinfo->lock); + spin_lock(&idrinfo->lock); s_i = cb->args[0]; @@ -191,7 +191,7 @@ done: if (index >= 0) cb->args[0] = index + 1; - spin_unlock_bh(&idrinfo->lock); + spin_unlock(&idrinfo->lock); if (n_i) { if (act_flags & TCA_FLAG_LARGE_DUMP_ON) cb->args[1] = n_i; @@ -261,9 +261,9 @@ static struct tc_action *tcf_idr_lookup(u32 index, struct tcf_idrinfo *idrinfo) { struct tc_action *p = NULL; - spin_lock_bh(&idrinfo->lock); + spin_lock(&idrinfo->lock); p = idr_find(&idrinfo->action_idr, index); - spin_unlock_bh(&idrinfo->lock); + spin_unlock(&idrinfo->lock); return p; } @@ -323,7 +323,7 @@ int tcf_idr_create(struct tc_action_net *tn, u32 index, struct nlattr *est, } spin_lock_init(&p->tcfa_lock); idr_preload(GFP_KERNEL); - spin_lock_bh(&idrinfo->lock); + spin_lock(&idrinfo->lock); /* user doesn't specify an index */ if (!index) { index = 1; @@ -331,7 +331,7 @@ int tcf_idr_create(struct tc_action_net *tn, u32 index, struct nlattr *est, } else { err = idr_alloc_u32(idr, NULL, &index, index, GFP_ATOMIC); } - spin_unlock_bh(&idrinfo->lock); + spin_unlock(&idrinfo->lock); idr_preload_end(); if (err) goto err3; @@ -369,9 +369,9 @@ void tcf_idr_insert(struct tc_action_net *tn, struct tc_action *a) { struct tcf_idrinfo *idrinfo = tn->idrinfo; - spin_lock_bh(&idrinfo->lock); + spin_lock(&idrinfo->lock); idr_replace(&idrinfo->action_idr, a, a->tcfa_index); - spin_unlock_bh(&idrinfo->lock); + spin_unlock(&idrinfo->lock); } EXPORT_SYMBOL(tcf_idr_insert); |