summaryrefslogtreecommitdiff
path: root/net/sched/sch_mqprio.c
diff options
context:
space:
mode:
authorJohn Fastabend <john.fastabend@gmail.com>2014-09-28 11:52:56 -0700
committerDavid S. Miller <davem@davemloft.net>2014-09-30 01:02:26 -0400
commit22e0f8b9322cb1a48b1357e8f4ae6f5a9eca8cfa (patch)
tree2c9ef18dca9d9a441d92ea57cf7f7a292f4ceb3f /net/sched/sch_mqprio.c
parent79cf79abce71eb7dbc40e2f3121048ca5405cb47 (diff)
downloadlwn-22e0f8b9322cb1a48b1357e8f4ae6f5a9eca8cfa.tar.gz
lwn-22e0f8b9322cb1a48b1357e8f4ae6f5a9eca8cfa.zip
net: sched: make bstats per cpu and estimator RCU safe
In order to run qdisc's without locking statistics and estimators need to be handled correctly. To resolve bstats make the statistics per cpu. And because this is only needed for qdiscs that are running without locks which is not the case for most qdiscs in the near future only create percpu stats when qdiscs set the TCQ_F_CPUSTATS flag. Next because estimators use the bstats to calculate packets per second and bytes per second the estimator code paths are updated to use the per cpu statistics. Signed-off-by: John Fastabend <john.r.fastabend@intel.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/sched/sch_mqprio.c')
-rw-r--r--net/sched/sch_mqprio.c4
1 files changed, 2 insertions, 2 deletions
diff --git a/net/sched/sch_mqprio.c b/net/sched/sch_mqprio.c
index 37e7d25d21f1..8917372fddc6 100644
--- a/net/sched/sch_mqprio.c
+++ b/net/sched/sch_mqprio.c
@@ -355,7 +355,7 @@ static int mqprio_dump_class_stats(struct Qdisc *sch, unsigned long cl,
}
/* Reclaim root sleeping lock before completing stats */
spin_lock_bh(d->lock);
- if (gnet_stats_copy_basic(d, &bstats) < 0 ||
+ if (gnet_stats_copy_basic(d, NULL, &bstats) < 0 ||
gnet_stats_copy_queue(d, &qstats) < 0)
return -1;
} else {
@@ -363,7 +363,7 @@ static int mqprio_dump_class_stats(struct Qdisc *sch, unsigned long cl,
sch = dev_queue->qdisc_sleeping;
sch->qstats.qlen = sch->q.qlen;
- if (gnet_stats_copy_basic(d, &sch->bstats) < 0 ||
+ if (gnet_stats_copy_basic(d, NULL, &sch->bstats) < 0 ||
gnet_stats_copy_queue(d, &sch->qstats) < 0)
return -1;
}