summaryrefslogtreecommitdiff
path: root/kernel/rcu
diff options
context:
space:
mode:
authorPaul E. McKenney <paulmck@linux.ibm.com>2019-03-26 10:22:22 -0700
committerPaul E. McKenney <paulmck@linux.ibm.com>2019-03-26 14:38:38 -0700
commitadd0d37b4f1e77de7d170ece43c8d765572a1eab (patch)
tree7f0407007feaba2e006c838e8728386e4de41cc3 /kernel/rcu
parentf1a98045abd824df833354b309b5fa64ff95f792 (diff)
downloadlwn-add0d37b4f1e77de7d170ece43c8d765572a1eab.tar.gz
lwn-add0d37b4f1e77de7d170ece43c8d765572a1eab.zip
rcu: Correct READ_ONCE()/WRITE_ONCE() for ->rcu_read_unlock_special
The task_struct structure's ->rcu_read_unlock_special field is only ever read or written by the owning task, but it is accessed both at process and interrupt levels. It may therefore be accessed using plain reads and writes while interrupts are disabled, but must be accessed using READ_ONCE() and WRITE_ONCE() or better otherwise. This commit makes a few adjustments to align with this discipline. Signed-off-by: Paul E. McKenney <paulmck@linux.ibm.com>
Diffstat (limited to 'kernel/rcu')
-rw-r--r--kernel/rcu/tree_exp.h2
-rw-r--r--kernel/rcu/tree_plugin.h4
2 files changed, 3 insertions, 3 deletions
diff --git a/kernel/rcu/tree_exp.h b/kernel/rcu/tree_exp.h
index ec4fb93a5dbe..1ee0782213b8 100644
--- a/kernel/rcu/tree_exp.h
+++ b/kernel/rcu/tree_exp.h
@@ -633,7 +633,7 @@ static void rcu_exp_handler(void *unused)
raw_spin_lock_irqsave_rcu_node(rnp, flags);
if (rnp->expmask & rdp->grpmask) {
rdp->deferred_qs = true;
- WRITE_ONCE(t->rcu_read_unlock_special.b.exp_hint, true);
+ t->rcu_read_unlock_special.b.exp_hint = true;
}
raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
return;
diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h
index 81d3cd821891..6ddb3c05e88f 100644
--- a/kernel/rcu/tree_plugin.h
+++ b/kernel/rcu/tree_plugin.h
@@ -285,7 +285,7 @@ static void rcu_qs(void)
TPS("cpuqs"));
__this_cpu_write(rcu_data.cpu_no_qs.b.norm, false);
barrier(); /* Coordinate with rcu_flavor_sched_clock_irq(). */
- current->rcu_read_unlock_special.b.need_qs = false;
+ WRITE_ONCE(current->rcu_read_unlock_special.b.need_qs, false);
}
}
@@ -817,7 +817,7 @@ void exit_rcu(void)
if (unlikely(!list_empty(&current->rcu_node_entry))) {
t->rcu_read_lock_nesting = 1;
barrier();
- t->rcu_read_unlock_special.b.blocked = true;
+ WRITE_ONCE(t->rcu_read_unlock_special.b.blocked, true);
} else if (unlikely(t->rcu_read_lock_nesting)) {
t->rcu_read_lock_nesting = 1;
} else {