@@ -241,7 +241,7 @@ static int __sched rwbase_write_lock(struct rwbase_rt *rwb,
/* Force readers into slow path */
atomic_sub(READER_BIAS, &rwb->readers);
- rt_mutex_pre_schedule();
+ rwbase_pre_schedule();
raw_spin_lock_irqsave(&rtm->wait_lock, flags);
if (__rwbase_write_trylock(rwb))
@@ -254,7 +254,7 @@ static int __sched rwbase_write_lock(struct rwbase_rt *rwb,
if (rwbase_signal_pending_state(state, current)) {
rwbase_restore_current_state();
__rwbase_write_unlock(rwb, 0, flags);
- rt_mutex_post_schedule();
+ rwbase_post_schedule();
trace_contention_end(rwb, -EINTR);
return -EINTR;
}
@@ -273,7 +273,7 @@ static int __sched rwbase_write_lock(struct rwbase_rt *rwb,
out_unlock:
raw_spin_unlock_irqrestore(&rtm->wait_lock, flags);
- rt_mutex_post_schedule();
+ rwbase_post_schedule();
return 0;
}
@@ -1 +1 @@
--rt7
+-rt8