@@ -95,6 +95,7 @@ typedef struct {
odp_schedule_group_info_t *);
void (*schedule_order_lock)(unsigned);
void (*schedule_order_unlock)(unsigned);
+ void (*schedule_order_unlock_lock)(unsigned);
} schedule_api_t;
@@ -79,6 +79,7 @@ typedef struct reorder_window {
uint32_t tail;
uint32_t turn;
uint32_t olock[CONFIG_QUEUE_MAX_ORD_LOCKS];
+ uint32_t lock_acquired;
uint16_t lock_count;
/* Reorder contexts in this window */
reorder_context_t *ring[RWIN_SIZE];
@@ -214,6 +214,8 @@ typedef struct {
/* Array of ordered locks */
odp_atomic_u64_t lock[CONFIG_QUEUE_MAX_ORD_LOCKS];
+ odp_atomic_u64_t lock_acquired;
+
} order_context_t ODP_ALIGNED_CACHE;
typedef struct {
@@ -1103,6 +1105,7 @@ static void order_unlock(void)
static void schedule_order_lock(unsigned lock_index)
{
odp_atomic_u64_t *ord_lock;
+ odp_atomic_u64_t *lock_acquired;
uint32_t queue_index;
queue_index = sched_local.ordered.src_queue;
@@ -1112,6 +1115,7 @@ static void schedule_order_lock(unsigned lock_index)
!sched_local.ordered.lock_called.u8[lock_index]);
ord_lock = &sched->order[queue_index].lock[lock_index];
+ lock_acquired = &sched->order[queue_index].lock_acquired;
/* Busy loop to synchronize ordered processing */
while (1) {
@@ -1121,6 +1125,7 @@ static void schedule_order_lock(unsigned lock_index)
if (lock_seq == sched_local.ordered.ctx) {
sched_local.ordered.lock_called.u8[lock_index] = 1;
+ odp_atomic_store_rel_u64(lock_acquired, lock_index);
return;
}
odp_cpu_pause();
@@ -1130,6 +1135,7 @@ static void schedule_order_lock(unsigned lock_index)
static void schedule_order_unlock(unsigned lock_index)
{
odp_atomic_u64_t *ord_lock;
+ odp_atomic_u64_t *lock_acquired;
uint32_t queue_index;
queue_index = sched_local.ordered.src_queue;
@@ -1138,12 +1144,28 @@ static void schedule_order_unlock(unsigned lock_index)
lock_index <= sched->queue[queue_index].order_lock_count);
ord_lock = &sched->order[queue_index].lock[lock_index];
+ lock_acquired = &sched->order[queue_index].lock_acquired;
ODP_ASSERT(sched_local.ordered.ctx == odp_atomic_load_u64(ord_lock));
+ odp_atomic_store_rel_u64(lock_acquired,
+ sched->queue[queue_index].
+ order_lock_count + 1);
odp_atomic_store_rel_u64(ord_lock, sched_local.ordered.ctx + 1);
}
+static void schedule_order_unlock_lock(unsigned lock_index)
+{
+ odp_atomic_u64_t *lock_acquired;
+ uint32_t queue_index;
+
+ queue_index = sched_local.ordered.src_queue;
+
+ lock_acquired = &sched->order[queue_index].lock_acquired;
+ schedule_order_unlock(odp_atomic_load_u64(lock_acquired));
+ schedule_order_lock(lock_index);
+}
+
static void schedule_pause(void)
{
sched_local.pause = 1;
@@ -1429,5 +1451,6 @@ const schedule_api_t schedule_default_api = {
.schedule_group_thrmask = schedule_group_thrmask,
.schedule_group_info = schedule_group_info,
.schedule_order_lock = schedule_order_lock,
- .schedule_order_unlock = schedule_order_unlock
+ .schedule_order_unlock = schedule_order_unlock,
+ .schedule_order_unlock_lock = schedule_order_unlock_lock
};
@@ -129,3 +129,8 @@ void odp_schedule_order_unlock(unsigned lock_index)
{
return sched_api->schedule_order_unlock(lock_index);
}
+
+void odp_schedule_order_unlock_lock(uint32_t lock_index)
+{
+ sched_api->schedule_order_unlock_lock(lock_index);
+}
@@ -135,6 +135,8 @@ typedef struct {
/* Array of ordered locks */
odp_atomic_u64_t lock[CONFIG_QUEUE_MAX_ORD_LOCKS];
+ odp_atomic_u64_t lock_acquired;
+
} order_context_t ODP_ALIGNED_CACHE;
typedef struct {
@@ -1255,6 +1257,7 @@ static void order_unlock(void)
static void schedule_order_lock(unsigned lock_index)
{
odp_atomic_u64_t *ord_lock;
+ odp_atomic_u64_t *lock_acquired;
uint32_t queue_index;
queue_index = thread_local.ordered.src_queue;
@@ -1264,6 +1267,7 @@ static void schedule_order_lock(unsigned lock_index)
!thread_local.ordered.lock_called.u8[lock_index]);
ord_lock = &sched->order[queue_index].lock[lock_index];
+ lock_acquired = &sched->order[queue_index].lock_acquired;
/* Busy loop to synchronize ordered processing */
while (1) {
@@ -1273,6 +1277,7 @@ static void schedule_order_lock(unsigned lock_index)
if (lock_seq == thread_local.ordered.ctx) {
thread_local.ordered.lock_called.u8[lock_index] = 1;
+ odp_atomic_store_rel_u64(lock_acquired, lock_index);
return;
}
odp_cpu_pause();
@@ -1282,6 +1287,7 @@ static void schedule_order_lock(unsigned lock_index)
static void schedule_order_unlock(unsigned lock_index)
{
odp_atomic_u64_t *ord_lock;
+ odp_atomic_u64_t *lock_acquired;
uint32_t queue_index;
queue_index = thread_local.ordered.src_queue;
@@ -1290,12 +1296,27 @@ static void schedule_order_unlock(unsigned lock_index)
lock_index <= sched->queues[queue_index].lock_count);
ord_lock = &sched->order[queue_index].lock[lock_index];
+ lock_acquired = &sched->order[queue_index].lock_acquired;
ODP_ASSERT(thread_local.ordered.ctx == odp_atomic_load_u64(ord_lock));
+ odp_atomic_store_rel_u64(lock_acquired,
+ sched->queues[queue_index].lock_count + 1);
odp_atomic_store_rel_u64(ord_lock, thread_local.ordered.ctx + 1);
}
+static void schedule_order_unlock_lock(unsigned lock_index)
+{
+ uint32_t queue_index;
+ odp_atomic_u64_t *lock_acquired;
+
+ queue_index = thread_local.ordered.src_queue;
+
+ lock_acquired = &sched->order[queue_index].lock_acquired;
+ schedule_order_unlock(odp_atomic_load_u64(lock_acquired));
+ schedule_order_lock(lock_index);
+}
+
static unsigned schedule_max_ordered_locks(void)
{
return CONFIG_QUEUE_MAX_ORD_LOCKS;
@@ -1368,7 +1389,8 @@ const schedule_api_t schedule_iquery_api = {
.schedule_group_thrmask = schedule_group_thrmask,
.schedule_group_info = schedule_group_info,
.schedule_order_lock = schedule_order_lock,
- .schedule_order_unlock = schedule_order_unlock
+ .schedule_order_unlock = schedule_order_unlock,
+ .schedule_order_unlock_lock = schedule_order_unlock_lock
};
static void thread_set_interest(sched_thread_local_t *thread,
@@ -1007,6 +1007,8 @@ static void schedule_order_lock(unsigned lock_index)
monitor32(&rctx->rwin->olock[lock_index],
__ATOMIC_ACQUIRE) != rctx->sn)
doze();
+ atomic_store_release(&rctx->rwin->lock_acquired, lock_index, false);
+
}
}
@@ -1025,9 +1027,25 @@ static void schedule_order_unlock(unsigned lock_index)
atomic_store_release(&rctx->rwin->olock[lock_index],
rctx->sn + 1,
/*readonly=*/false);
+ atomic_store_release(&rctx->rwin->lock_acquired,
+ rctx->rwin->lock_count + 1, false);
rctx->olock_flags |= 1U << lock_index;
}
+static void schedule_order_unlock_lock(unsigned lock_index)
+{
+ struct reorder_context *rctx;
+
+ rctx = sched_ts->rctx;
+ if (odp_unlikely(rctx == NULL || rctx->rwin == NULL)) {
+ ODP_ERR("Invalid call to odp_schedule_order_unlock_lock\n");
+ return;
+ }
+ schedule_order_unlock(__atomic_load_n(&rctx->rwin->lock_acquired,
+ __ATOMIC_ACQUIRE));
+ schedule_order_lock(lock_index);
+}
+
static void schedule_release_atomic(void)
{
sched_scalable_thread_state_t *ts;
@@ -1978,4 +1996,5 @@ const schedule_api_t schedule_scalable_api = {
.schedule_group_info = schedule_group_info,
.schedule_order_lock = schedule_order_lock,
.schedule_order_unlock = schedule_order_unlock,
+ .schedule_order_unlock_lock = schedule_order_unlock_lock,
};
@@ -819,6 +819,11 @@ static void schedule_order_unlock(unsigned lock_index)
(void)lock_index;
}
+static void schedule_order_unlock_lock(unsigned lock_index)
+{
+ (void)lock_index;
+}
+
static void order_lock(void)
{
}
@@ -868,5 +873,6 @@ const schedule_api_t schedule_sp_api = {
.schedule_group_thrmask = schedule_group_thrmask,
.schedule_group_info = schedule_group_info,
.schedule_order_lock = schedule_order_lock,
- .schedule_order_unlock = schedule_order_unlock
+ .schedule_order_unlock = schedule_order_unlock,
+ .schedule_order_unlock_lock = schedule_order_unlock_lock
};