summaryrefslogtreecommitdiff
path: root/net/sched/act_nat.c
diff options
context:
space:
mode:
authorVlad Buslov <vladbu@mellanox.com>2018-09-03 10:09:20 +0300
committerDavid S. Miller <davem@davemloft.net>2018-09-08 10:18:25 -0700
commitf20a4d01175ad681a88d571c6391d188453ca6d0 (patch)
treed2f8e7d03b7aa532e494ccd9fdabc106de3c7bac /net/sched/act_nat.c
parent6d7a8df6dfe4d62335673fb15407d79180a33ea2 (diff)
downloadlwn-f20a4d01175ad681a88d571c6391d188453ca6d0.tar.gz
lwn-f20a4d01175ad681a88d571c6391d188453ca6d0.zip
net: sched: act_nat: remove dependency on rtnl lock
According to the new locking rule, we have to take tcf_lock for both ->init() and ->dump(), as RTNL will be removed. Use tcf spinlock to protect private nat action data from concurrent modification during dump. (nat init already uses tcf spinlock when changing action state) Signed-off-by: Vlad Buslov <vladbu@mellanox.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/sched/act_nat.c')
-rw-r--r--net/sched/act_nat.c15
1 files changed, 9 insertions, 6 deletions
diff --git a/net/sched/act_nat.c b/net/sched/act_nat.c
index d98f33fdffe2..c5c1e23add77 100644
--- a/net/sched/act_nat.c
+++ b/net/sched/act_nat.c
@@ -256,28 +256,31 @@ static int tcf_nat_dump(struct sk_buff *skb, struct tc_action *a,
unsigned char *b = skb_tail_pointer(skb);
struct tcf_nat *p = to_tcf_nat(a);
struct tc_nat opt = {
- .old_addr = p->old_addr,
- .new_addr = p->new_addr,
- .mask = p->mask,
- .flags = p->flags,
-
.index = p->tcf_index,
- .action = p->tcf_action,
.refcnt = refcount_read(&p->tcf_refcnt) - ref,
.bindcnt = atomic_read(&p->tcf_bindcnt) - bind,
};
struct tcf_t t;
+ spin_lock_bh(&p->tcf_lock);
+ opt.old_addr = p->old_addr;
+ opt.new_addr = p->new_addr;
+ opt.mask = p->mask;
+ opt.flags = p->flags;
+ opt.action = p->tcf_action;
+
if (nla_put(skb, TCA_NAT_PARMS, sizeof(opt), &opt))
goto nla_put_failure;
tcf_tm_dump(&t, &p->tcf_tm);
if (nla_put_64bit(skb, TCA_NAT_TM, sizeof(t), &t, TCA_NAT_PAD))
goto nla_put_failure;
+ spin_unlock_bh(&p->tcf_lock);
return skb->len;
nla_put_failure:
+ spin_unlock_bh(&p->tcf_lock);
nlmsg_trim(skb, b);
return -1;
}