summaryrefslogtreecommitdiff
path: root/kernel/rcutree_plugin.h
diff options
context:
space:
mode:
authorPaul E. McKenney <paulmck@linux.vnet.ibm.com>2009-08-27 15:00:12 -0700
committerIngo Molnar <mingo@elte.hu>2009-08-29 15:34:40 +0200
commit868489660dabc0c28087cca3dbc1adbbc398c6fe (patch)
treecf991ec94ce29bccfef27213107748810c51a1ca /kernel/rcutree_plugin.h
parentdd5d19bafd90d33043a4a14b2e2d98612caa293c (diff)
downloadlwn-868489660dabc0c28087cca3dbc1adbbc398c6fe.tar.gz
lwn-868489660dabc0c28087cca3dbc1adbbc398c6fe.zip
rcu: Changes from reviews: avoid casts, fix/add warnings, improve comments
Changes suggested by review comments from Josh Triplett and Mathieu Desnoyers. Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Acked-by: Josh Triplett <josh@joshtriplett.org> Acked-by: Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca> Cc: laijs@cn.fujitsu.com Cc: dipankar@in.ibm.com Cc: akpm@linux-foundation.org Cc: dvhltc@us.ibm.com Cc: niv@us.ibm.com Cc: peterz@infradead.org Cc: rostedt@goodmis.org LKML-Reference: <20090827220012.GA30525@linux.vnet.ibm.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/rcutree_plugin.h')
-rw-r--r--kernel/rcutree_plugin.h10
1 files changed, 6 insertions, 4 deletions
diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h
index 04343bee646d..47789369ea59 100644
--- a/kernel/rcutree_plugin.h
+++ b/kernel/rcutree_plugin.h
@@ -92,7 +92,7 @@ static void rcu_preempt_qs(int cpu)
rnp = rdp->mynode;
spin_lock(&rnp->lock);
t->rcu_read_unlock_special |= RCU_READ_UNLOCK_BLOCKED;
- t->rcu_blocked_node = (void *)rnp;
+ t->rcu_blocked_node = rnp;
/*
* If this CPU has already checked in, then this task
@@ -176,9 +176,9 @@ static void rcu_read_unlock_special(struct task_struct *t)
* most one time. So at most two passes through loop.
*/
for (;;) {
- rnp = (struct rcu_node *)t->rcu_blocked_node;
+ rnp = t->rcu_blocked_node;
spin_lock(&rnp->lock);
- if (rnp == (struct rcu_node *)t->rcu_blocked_node)
+ if (rnp == t->rcu_blocked_node)
break;
spin_unlock(&rnp->lock);
}
@@ -288,8 +288,10 @@ static void rcu_preempt_offline_tasks(struct rcu_state *rsp,
struct rcu_node *rnp_root = rcu_get_root(rsp);
struct task_struct *tp;
- if (rnp == rnp_root)
+ if (rnp == rnp_root) {
+ WARN_ONCE(1, "Last CPU thought to be offlined?");
return; /* Shouldn't happen: at least one CPU online. */
+ }
/*
* Move tasks up to root rcu_node. Rely on the fact that the