@@ -2490,10 +2490,22 @@ static void rcu_do_batch(struct rcu_data *rdp)
* Stop only if limit reached and CPU has something to do.
* Note: The rcl structure counts down from zero.
*/
- if (-rcl.len >= bl && !offloaded &&
- (need_resched() ||
- (!is_idle_task(current) && !rcu_is_callbacks_kthread())))
- break;
+ if (in_serving_softirq()) {
+ if (-rcl.len >= bl && (need_resched() ||
+ (!is_idle_task(current) && !rcu_is_callbacks_kthread())))
+ break;
+ } else {
+ local_bh_enable();
+ lockdep_assert_irqs_enabled();
+ cond_resched_tasks_rcu_qs();
+ lockdep_assert_irqs_enabled();
+ local_bh_disable();
+ }
+
+ /*
+ * Make sure we don't spend too much time here and deprive other
+ * softirq vectors of CPU cycles.
+ */
if (unlikely(tlimit)) {
/* only call local_clock() every 32 callbacks */
if (likely((-rcl.len & 31) || local_clock() < tlimit))
@@ -2501,14 +2513,6 @@ static void rcu_do_batch(struct rcu_data *rdp)
/* Exceeded the time limit, so leave. */
break;
}
- if (offloaded) {
- WARN_ON_ONCE(in_serving_softirq());
- local_bh_enable();
- lockdep_assert_irqs_enabled();
- cond_resched_tasks_rcu_qs();
- lockdep_assert_irqs_enabled();
- local_bh_disable();
- }
}
local_irq_save(flags);