summaryrefslogtreecommitdiff
path: root/kernel/rcu
diff options
context:
space:
mode:
authorPaul E. McKenney <paulmck@kernel.org>2020-03-17 11:39:26 -0700
committerPaul E. McKenney <paulmck@kernel.org>2020-04-27 11:03:52 -0700
commitb0afa0f056676ffe0a7213818f09d2460adbcc16 (patch)
tree4886fdc32d67b8b4ce347d07f65b3bb5d95b5709 /kernel/rcu
parent88092d0c99d7584d50cc8caadb8fa9ff8a1d4ea0 (diff)
downloadlwn-b0afa0f056676ffe0a7213818f09d2460adbcc16.tar.gz
lwn-b0afa0f056676ffe0a7213818f09d2460adbcc16.zip
rcu-tasks: Provide boot parameter to delay IPIs until late in grace period
This commit provides a rcupdate.rcu_task_ipi_delay kernel boot parameter that specifies how old the RCU tasks trace grace period must be before the grace-period kthread starts sending IPIs. This delay allows more tasks to pass through rcu_tasks_qs() quiescent states, thus reducing (or even eliminating) the number of IPIs that must be sent. On a short rcutorture test setting this kernel boot parameter to HZ/2 resulted in zero IPIs for all 877 RCU-tasks trace grace periods that elapsed during that test. Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
Diffstat (limited to 'kernel/rcu')
-rw-r--r--kernel/rcu/tasks.h13
1 files changed, 9 insertions, 4 deletions
diff --git a/kernel/rcu/tasks.h b/kernel/rcu/tasks.h
index 71462cf3d4bd..eeac4a122234 100644
--- a/kernel/rcu/tasks.h
+++ b/kernel/rcu/tasks.h
@@ -74,6 +74,11 @@ static struct rcu_tasks rt_name = \
/* Track exiting tasks in order to allow them to be waited for. */
DEFINE_STATIC_SRCU(tasks_rcu_exit_srcu);
+/* Avoid IPIing CPUs early in the grace period. */
+#define RCU_TASK_IPI_DELAY (HZ / 2)
+static int rcu_task_ipi_delay __read_mostly = RCU_TASK_IPI_DELAY;
+module_param(rcu_task_ipi_delay, int, 0644);
+
/* Control stall timeouts. Disable with <= 0, otherwise jiffies till stall. */
#define RCU_TASK_STALL_TIMEOUT (HZ * 60 * 10)
static int rcu_task_stall_timeout __read_mostly = RCU_TASK_STALL_TIMEOUT;
@@ -713,6 +718,10 @@ DECLARE_WAIT_QUEUE_HEAD(trc_wait); // List of holdout tasks.
// Record outstanding IPIs to each CPU. No point in sending two...
static DEFINE_PER_CPU(bool, trc_ipi_to_cpu);
+void call_rcu_tasks_trace(struct rcu_head *rhp, rcu_callback_t func);
+DEFINE_RCU_TASKS(rcu_tasks_trace, rcu_tasks_wait_gp, call_rcu_tasks_trace,
+ "RCU Tasks Trace");
+
/* If we are the last reader, wake up the grace-period kthread. */
void rcu_read_unlock_trace_special(struct task_struct *t)
{
@@ -998,10 +1007,6 @@ void exit_tasks_rcu_finish_trace(struct task_struct *t)
rcu_read_unlock_trace_special(t);
}
-void call_rcu_tasks_trace(struct rcu_head *rhp, rcu_callback_t func);
-DEFINE_RCU_TASKS(rcu_tasks_trace, rcu_tasks_wait_gp, call_rcu_tasks_trace,
- "RCU Tasks Trace");
-
/**
* call_rcu_tasks_trace() - Queue a callback trace task-based grace period
* @rhp: structure to be used for queueing the RCU updates.