Message ID | 1311121103-16978-4-git-send-email-paulmck@linux.vnet.ibm.com |
---|---|
State | New |
Headers | show |
On Tue, 2011-07-19 at 17:18 -0700, Paul E. McKenney wrote: > @@ -391,10 +400,15 @@ void __rcu_read_unlock(void) > struct task_struct *t = current; > > barrier(); /* needed if we ever invoke rcu_read_unlock in rcutree.c */ > - if (--t->rcu_read_lock_nesting == 0) { > - barrier(); /* decr before ->rcu_read_unlock_special load */ > + if (t->rcu_read_lock_nesting != 1) > + --t->rcu_read_lock_nesting; > + else { > + t->rcu_read_lock_nesting = INT_MIN; > + barrier(); /* assign before ->rcu_read_unlock_special load */ > if (unlikely(ACCESS_ONCE(t->rcu_read_unlock_special))) > rcu_read_unlock_special(t); > + barrier(); /* ->rcu_read_unlock_special load before assign */ > + t->rcu_read_lock_nesting = 0; > } > #ifdef CONFIG_PROVE_LOCKING > WARN_ON_ONCE(ACCESS_ONCE(t->rcu_read_lock_nesting) < 0); But won't the above change make that WARN_ON_ONCE() invalid?
On Wed, Jul 20, 2011 at 02:54:27PM +0200, Peter Zijlstra wrote: > On Tue, 2011-07-19 at 17:18 -0700, Paul E. McKenney wrote: > > @@ -391,10 +400,15 @@ void __rcu_read_unlock(void) > > struct task_struct *t = current; > > > > barrier(); /* needed if we ever invoke rcu_read_unlock in rcutree.c */ > > - if (--t->rcu_read_lock_nesting == 0) { > > - barrier(); /* decr before ->rcu_read_unlock_special load */ > > + if (t->rcu_read_lock_nesting != 1) > > + --t->rcu_read_lock_nesting; > > + else { > > + t->rcu_read_lock_nesting = INT_MIN; > > + barrier(); /* assign before ->rcu_read_unlock_special load */ > > if (unlikely(ACCESS_ONCE(t->rcu_read_unlock_special))) > > rcu_read_unlock_special(t); > > + barrier(); /* ->rcu_read_unlock_special load before assign */ > > + t->rcu_read_lock_nesting = 0; > > } > > #ifdef CONFIG_PROVE_LOCKING > > WARN_ON_ONCE(ACCESS_ONCE(t->rcu_read_lock_nesting) < 0); > > But won't the above change make that WARN_ON_ONCE() invalid? Yes, please see the patch I just sent. So that warning was spurious, and if that was the only problem... Thanx, Paul
diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h index 9439a25..ad4539a 100644 --- a/kernel/rcutree_plugin.h +++ b/kernel/rcutree_plugin.h @@ -68,6 +68,7 @@ struct rcu_state rcu_preempt_state = RCU_STATE_INITIALIZER(rcu_preempt_state); DEFINE_PER_CPU(struct rcu_data, rcu_preempt_data); static struct rcu_state *rcu_state = &rcu_preempt_state; +static void rcu_read_unlock_special(struct task_struct *t); static int rcu_preempted_readers_exp(struct rcu_node *rnp); /* @@ -147,7 +148,7 @@ static void rcu_preempt_note_context_switch(int cpu) struct rcu_data *rdp; struct rcu_node *rnp; - if (t->rcu_read_lock_nesting && + if (t->rcu_read_lock_nesting > 0 && (t->rcu_read_unlock_special & RCU_READ_UNLOCK_BLOCKED) == 0) { /* Possibly blocking in an RCU read-side critical section. */ @@ -190,6 +191,14 @@ static void rcu_preempt_note_context_switch(int cpu) rnp->gp_tasks = &t->rcu_node_entry; } raw_spin_unlock_irqrestore(&rnp->lock, flags); + } else if (t->rcu_read_lock_nesting < 0 && + t->rcu_read_unlock_special) { + + /* + * Complete exit from RCU read-side critical section on + * behalf of preempted instance of __rcu_read_unlock(). + */ + rcu_read_unlock_special(t); } /* @@ -391,10 +400,15 @@ void __rcu_read_unlock(void) struct task_struct *t = current; barrier(); /* needed if we ever invoke rcu_read_unlock in rcutree.c */ - if (--t->rcu_read_lock_nesting == 0) { - barrier(); /* decr before ->rcu_read_unlock_special load */ + if (t->rcu_read_lock_nesting != 1) + --t->rcu_read_lock_nesting; + else { + t->rcu_read_lock_nesting = INT_MIN; + barrier(); /* assign before ->rcu_read_unlock_special load */ if (unlikely(ACCESS_ONCE(t->rcu_read_unlock_special))) rcu_read_unlock_special(t); + barrier(); /* ->rcu_read_unlock_special load before assign */ + t->rcu_read_lock_nesting = 0; } #ifdef CONFIG_PROVE_LOCKING WARN_ON_ONCE(ACCESS_ONCE(t->rcu_read_lock_nesting) < 0); @@ -593,7 +607,8 @@ static void rcu_preempt_check_callbacks(int cpu) rcu_preempt_qs(cpu); return; } - if (per_cpu(rcu_preempt_data, cpu).qs_pending) + if (t->rcu_read_lock_nesting > 0 && + per_cpu(rcu_preempt_data, cpu).qs_pending) t->rcu_read_unlock_special |= RCU_READ_UNLOCK_NEED_QS; }