@@ -162,7 +162,40 @@ static inline void __rcu_read_lock(void)
barrier(); /* Keep code within RCU read-side critical section. */
}
-extern void __rcu_read_unlock(void);
+extern void rcu_read_unlock_do_special(void);
+
+/*
+ * Tree-preemptible RCU implementation for rcu_read_unlock().
+ * Decrement rcu_read_lock_nesting. If the result is zero (outermost
+ * rcu_read_unlock()) and rcu_read_unlock_special is non-zero, then
+ * invoke rcu_read_unlock_do_special() to clean up after a context switch
+ * in an RCU read-side critical section and other special cases.
+ * Set rcu_read_lock_nesting to a large negative value during cleanup
+ * in order to ensure that if rcu_read_unlock_special is non-zero, then
+ * rcu_read_lock_nesting is also non-zero.
+ */
+static inline void __rcu_read_unlock(void)
+{
+ if (__this_cpu_read(rcu_read_lock_nesting) != 1)
+ __this_cpu_dec(rcu_read_lock_nesting);
+ else {
+ barrier(); /* critical section before exit code. */
+ __this_cpu_write(rcu_read_lock_nesting, INT_MIN);
+ barrier(); /* assign before ->rcu_read_unlock_special load */
+ if (unlikely(__this_cpu_read(rcu_read_unlock_special)))
+ rcu_read_unlock_do_special();
+ barrier(); /* ->rcu_read_unlock_special load before assign */
+ __this_cpu_write(rcu_read_lock_nesting, 0);
+ }
+#ifdef CONFIG_PROVE_LOCKING
+ {
+ int rln = __this_cpu_read(rcu_read_lock_nesting);
+
+ WARN_ON_ONCE(rln < 0 && rln > INT_MIN / 2);
+ }
+#endif /* #ifdef CONFIG_PROVE_LOCKING */
+}
+
void synchronize_rcu(void);
/*
@@ -109,8 +109,4 @@ static inline bool __rcu_reclaim(char *rn, struct rcu_head *head)
}
}
-#ifdef CONFIG_PREEMPT_RCU
-extern void rcu_read_unlock_do_special(void);
-#endif /* #ifdef CONFIG_PREEMPT_RCU */
-
#endif /* __LINUX_RCU_H */
@@ -59,39 +59,6 @@ DEFINE_PER_CPU(struct task_struct *, rcu_current_task);
#endif /* #ifdef CONFIG_PROVE_RCU */
/*
- * Tree-preemptible RCU implementation for rcu_read_unlock().
- * Decrement rcu_read_lock_nesting. If the result is zero (outermost
- * rcu_read_unlock()) and rcu_read_unlock_special is non-zero, then
- * invoke rcu_read_unlock_do_special() to clean up after a context switch
- * in an RCU read-side critical section and other special cases.
- * Set rcu_read_lock_nesting to a large negative value during cleanup
- * in order to ensure that if rcu_read_unlock_special is non-zero, then
- * rcu_read_lock_nesting is also non-zero.
- */
-void __rcu_read_unlock(void)
-{
- if (__this_cpu_read(rcu_read_lock_nesting) != 1)
- __this_cpu_dec(rcu_read_lock_nesting);
- else {
- barrier(); /* critical section before exit code. */
- __this_cpu_write(rcu_read_lock_nesting, INT_MIN);
- barrier(); /* assign before ->rcu_read_unlock_special load */
- if (unlikely(__this_cpu_read(rcu_read_unlock_special)))
- rcu_read_unlock_do_special();
- barrier(); /* ->rcu_read_unlock_special load before assign */
- __this_cpu_write(rcu_read_lock_nesting, 0);
- }
-#ifdef CONFIG_PROVE_LOCKING
- {
- int rln = __this_cpu_read(rcu_read_lock_nesting);
-
- WARN_ON_ONCE(rln < 0 && rln > INT_MIN / 2);
- }
-#endif /* #ifdef CONFIG_PROVE_LOCKING */
-}
-EXPORT_SYMBOL_GPL(__rcu_read_unlock);
-
-/*
* Check for a task exiting while in a preemptible-RCU read-side
* critical section, clean up if so. No need to issue warnings,
* as debug_check_no_locks_held() already does this if lockdep
@@ -598,6 +598,7 @@ void rcu_read_unlock_do_special(void)
}
local_irq_restore(flags);
}
+EXPORT_SYMBOL_GPL(rcu_read_unlock_do_special);
/*
* Check for a quiescent state from the current CPU. When a task blocks,
@@ -409,6 +409,7 @@ void rcu_read_unlock_do_special(void)
local_irq_restore(flags);
}
}
+EXPORT_SYMBOL_GPL(rcu_read_unlock_do_special);
#ifdef CONFIG_RCU_CPU_STALL_VERBOSE