@@ -127,33 +127,34 @@ struct task_group;
((state) & (__TASK_STOPPED | __TASK_TRACED | TASK_PARKED | TASK_DEAD))
#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
-# debug_normal_state_change(state_value) \
+# define debug_normal_state_change(state_value) \
do { \
WARN_ON_ONCE(is_special_task_state(state_value)); \
current->task_state_change = _THIS_IP_; \
} while (0)
-# debug_special_state_change(state_value) \
+# define debug_special_state_change(state_value) \
do { \
WARN_ON_ONCE(!is_special_task_state(state_value)); \
current->task_state_change = _THIS_IP_; \
} while (0)
-# debug_rtlock_wait_set_state() \
+# define debug_rtlock_wait_set_state() \
do { \
current->saved_state_change = current->task_state_change;\
current->task_state_change = _THIS_IP_; \
} while (0)
-# debug_rtlock_wait_restore_state() \
+# define debug_rtlock_wait_restore_state() \
do { \
current->task_state_change = current->saved_state_change;\
} while (0)
+
#else
-# debug_normal_state_change(cond) do { } while (0)
-# debug_special_state_change(cond) do { } while (0)
-# debug_rtlock_wait_set_state() do { } while (0)
-# debug_rtlock_wait_restore_state() do { } while (0)
+# define debug_normal_state_change(cond) do { } while (0)
+# define debug_special_state_change(cond) do { } while (0)
+# define debug_rtlock_wait_set_state() do { } while (0)
+# define debug_rtlock_wait_restore_state() do { } while (0)
#endif
/*
@@ -11,46 +11,6 @@
#include <linux/spinlock_types_raw.h>
-#define SPINLOCK_MAGIC 0xdead4ead
-
-#define SPINLOCK_OWNER_INIT ((void *)-1L)
-
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
-# define RAW_SPIN_DEP_MAP_INIT(lockname) \
- .dep_map = { \
- .name = #lockname, \
- .wait_type_inner = LD_WAIT_SPIN, \
- }
-# define SPIN_DEP_MAP_INIT(lockname) \
- .dep_map = { \
- .name = #lockname, \
- .wait_type_inner = LD_WAIT_CONFIG, \
- }
-#else
-# define RAW_SPIN_DEP_MAP_INIT(lockname)
-# define SPIN_DEP_MAP_INIT(lockname)
-#endif
-
-#ifdef CONFIG_DEBUG_SPINLOCK
-# define SPIN_DEBUG_INIT(lockname) \
- .magic = SPINLOCK_MAGIC, \
- .owner_cpu = -1, \
- .owner = SPINLOCK_OWNER_INIT,
-#else
-# define SPIN_DEBUG_INIT(lockname)
-#endif
-
-#define __RAW_SPIN_LOCK_INITIALIZER(lockname) \
- { \
- .raw_lock = __ARCH_SPIN_LOCK_UNLOCKED, \
- SPIN_DEBUG_INIT(lockname) \
- RAW_SPIN_DEP_MAP_INIT(lockname) }
-
-#define __RAW_SPIN_LOCK_UNLOCKED(lockname) \
- (raw_spinlock_t) __RAW_SPIN_LOCK_INITIALIZER(lockname)
-
-#define DEFINE_RAW_SPINLOCK(x) raw_spinlock_t x = __RAW_SPIN_LOCK_UNLOCKED(x)
-
#ifndef CONFIG_PREEMPT_RT
/* Non PREEMPT_RT kernels map spinlock to raw_spinlock */
@@ -1896,14 +1896,14 @@ static inline void futex_requeue_pi_complete(struct futex_q *q, int locked)
if (locked >= 0) {
/* Requeue succeeded. Set DONE or LOCKED */
WARN_ON_ONCE(old != Q_REQUEUE_PI_IN_PROGRESS &&
- old != Q_REQUEUE_PI_WAIT)
+ old != Q_REQUEUE_PI_WAIT);
new = Q_REQUEUE_PI_DONE + locked;
} else if (old == Q_REQUEUE_PI_IN_PROGRESS) {
/* Deadlock, no early wakeup interleave */
new = Q_REQUEUE_PI_NONE;
} else {
/* Deadlock, early wakeup interleave. */
- WARN_ON_ONCE(old != Q_REQUEUE_PI_IN_WAIT);
+ WARN_ON_ONCE(old != Q_REQUEUE_PI_WAIT);
new = Q_REQUEUE_PI_IGNORE;
}
} while (!atomic_try_cmpxchg(&q->requeue_state, &old, new));
@@ -1303,7 +1303,8 @@ static bool rtmutex_adaptive_spinwait(struct rt_mutex_base *lock,
* checking the above to be valid.
*/
barrier();
- if (!owner->on_cpu) {
+ if (!owner->on_cpu || need_resched() ||
+ vcpu_is_preempted(task_cpu(owner))) {
res = false;
break;
}
@@ -11,7 +11,7 @@
* during that time are redirected to the saved state so no wake up is
* missed.
*
- * - Non RT spin/rw_locks disable preemption and evtl. interrupts.
+ * - Non RT spin/rw_locks disable preemption and eventually interrupts.
* Disabling preemption has the side effect of disabling migration and
* preventing RCU grace periods.
*
@@ -1 +1 @@
--rt5
+-rt6