@@ -127,7 +127,7 @@ static bool dma_fence_array_signaled(struct dma_fence *fence)
}
for (i = 0; i < array->num_fences; ++i) {
- if (dma_fence_is_signaled(array->fences[i]) && !--num_pending)
+ if (dma_fence_check_and_signal(array->fences[i]) && !--num_pending)
goto signal;
}
return false;
@@ -51,12 +51,12 @@ struct dma_fence *dma_fence_chain_walk(struct dma_fence *fence)
prev_chain = to_dma_fence_chain(prev);
if (prev_chain) {
- if (!dma_fence_is_signaled(prev_chain->fence))
+ if (!dma_fence_check_and_signal(prev_chain->fence))
break;
replacement = dma_fence_chain_get_prev(prev_chain);
} else {
- if (!dma_fence_is_signaled(prev))
+ if (!dma_fence_check_and_signal(prev))
break;
replacement = NULL;
@@ -166,7 +166,7 @@ static bool dma_fence_chain_signaled(struct dma_fence *fence)
dma_fence_chain_for_each(fence, fence) {
struct dma_fence *f = dma_fence_chain_contained(fence);
- if (!dma_fence_is_signaled(f)) {
+ if (!dma_fence_check_and_signal(f)) {
dma_fence_put(fence);
return false;
}
@@ -93,7 +93,7 @@ struct dma_fence *__dma_fence_unwrap_merge(unsigned int num_fences,
timestamp = ns_to_ktime(0);
for (i = 0; i < num_fences; ++i) {
dma_fence_unwrap_for_each(tmp, &iter[i], fences[i]) {
- if (!dma_fence_is_signaled(tmp)) {
+ if (!dma_fence_check_and_signal(tmp)) {
dma_fence_put(unsignaled);
unsignaled = dma_fence_get(tmp);
++count;
@@ -127,7 +127,7 @@ struct dma_fence *__dma_fence_unwrap_merge(unsigned int num_fences,
count = 0;
for (i = 0; i < num_fences; ++i) {
dma_fence_unwrap_for_each(tmp, &iter[i], fences[i]) {
- if (!dma_fence_is_signaled(tmp)) {
+ if (!dma_fence_check_and_signal(tmp)) {
array[count++] = dma_fence_get(tmp);
} else {
ktime_t t = dma_fence_timestamp(tmp);
@@ -857,7 +857,7 @@ dma_fence_wait_any_timeout(struct dma_fence **fences, uint32_t count,
if (timeout == 0) {
for (i = 0; i < count; ++i)
- if (dma_fence_is_signaled(fences[i])) {
+ if (dma_fence_check_and_signal(fences[i])) {
if (idx)
*idx = i;
return 1;
@@ -968,7 +968,7 @@ EXPORT_SYMBOL(dma_fence_wait_any_timeout);
*/
void dma_fence_set_deadline(struct dma_fence *fence, ktime_t deadline)
{
- if (fence->ops->set_deadline && !dma_fence_is_signaled(fence))
+ if (fence->ops->set_deadline && !dma_fence_check_and_signal(fence))
fence->ops->set_deadline(fence, deadline);
}
EXPORT_SYMBOL(dma_fence_set_deadline);
@@ -985,7 +985,7 @@ void dma_fence_describe(struct dma_fence *fence, struct seq_file *seq)
seq_printf(seq, "%s %s seq %llu %ssignalled\n",
fence->ops->get_driver_name(fence),
fence->ops->get_timeline_name(fence), fence->seqno,
- dma_fence_is_signaled(fence) ? "" : "un");
+ dma_fence_check_and_signal(fence) ? "" : "un");
}
EXPORT_SYMBOL(dma_fence_describe);
@@ -217,7 +217,7 @@ int dma_resv_reserve_fences(struct dma_resv *obj, unsigned int num_fences)
struct dma_fence *fence;
dma_resv_list_entry(old, i, obj, &fence, &usage);
- if (dma_fence_is_signaled(fence))
+ if (dma_fence_check_and_signal(fence))
RCU_INIT_POINTER(new->table[--k], fence);
else
dma_resv_list_set(new, j++, fence, usage);
@@ -309,7 +309,7 @@ void dma_resv_add_fence(struct dma_resv *obj, struct dma_fence *fence,
dma_resv_list_entry(fobj, i, obj, &old, &old_usage);
if ((old->context == fence->context && old_usage >= usage &&
dma_fence_is_later_or_same(fence, old)) ||
- dma_fence_is_signaled(old)) {
+ dma_fence_check_and_signal(old)) {
dma_resv_list_set(fobj, i, fence, usage);
dma_fence_put(old);
return;
@@ -398,7 +398,7 @@ static void dma_resv_iter_walk_unlocked(struct dma_resv_iter *cursor)
continue;
}
- if (!dma_fence_is_signaled(cursor->fence) &&
+ if (!dma_fence_check_and_signal(cursor->fence) &&
cursor->usage >= cursor->fence_usage)
break;
} while (true);
@@ -484,7 +484,7 @@ static int find_race(void *arg)
count = 0;
for (i = 0; i < data.fc.chain_length; i++)
- if (dma_fence_is_signaled(data.fc.fences[i]))
+ if (dma_fence_check_and_signal(data.fc.fences[i]))
count++;
pr_info("Completed %lu cycles\n", count);
@@ -506,14 +506,14 @@ static int signal_forward(void *arg)
for (i = 0; i < fc.chain_length; i++) {
dma_fence_signal(fc.fences[i]);
- if (!dma_fence_is_signaled(fc.chains[i])) {
+ if (!dma_fence_check_and_signal(fc.chains[i])) {
pr_err("chain[%d] not signaled!\n", i);
err = -EINVAL;
goto err;
}
if (i + 1 < fc.chain_length &&
- dma_fence_is_signaled(fc.chains[i + 1])) {
+ dma_fence_check_and_signal(fc.chains[i + 1])) {
pr_err("chain[%d] is signaled!\n", i);
err = -EINVAL;
goto err;
@@ -538,7 +538,7 @@ static int signal_backward(void *arg)
for (i = fc.chain_length; i--; ) {
dma_fence_signal(fc.fences[i]);
- if (i > 0 && dma_fence_is_signaled(fc.chains[i])) {
+ if (i > 0 && dma_fence_check_and_signal(fc.chains[i])) {
pr_err("chain[%d] is signaled!\n", i);
err = -EINVAL;
goto err;
@@ -546,7 +546,7 @@ static int signal_backward(void *arg)
}
for (i = 0; i < fc.chain_length; i++) {
- if (!dma_fence_is_signaled(fc.chains[i])) {
+ if (!dma_fence_check_and_signal(fc.chains[i])) {
pr_err("chain[%d] was not signaled!\n", i);
err = -EINVAL;
goto err;
@@ -121,7 +121,7 @@ static int test_signaling(void *arg)
dma_fence_enable_sw_signaling(f);
- if (dma_fence_is_signaled(f)) {
+ if (dma_fence_check_and_signal(f)) {
pr_err("Fence unexpectedly signaled on creation\n");
goto err_free;
}
@@ -131,7 +131,7 @@ static int test_signaling(void *arg)
goto err_free;
}
- if (!dma_fence_is_signaled(f)) {
+ if (!dma_fence_check_and_signal(f)) {
pr_err("Fence not reporting signaled\n");
goto err_free;
}
@@ -427,7 +427,7 @@ static int test_stub(void *arg)
for (i = 0; i < ARRAY_SIZE(f); i++) {
f[i] = dma_fence_get_stub();
- if (!dma_fence_is_signaled(f[i])) {
+ if (!dma_fence_check_and_signal(f[i])) {
pr_err("Obtained unsignaled stub fence!\n");
goto err;
}
@@ -505,7 +505,7 @@ static int thread_signal_callback(void *arg)
pr_err("Callback not seen on thread %d, pass %lu (%lu misses), signaling %s add_callback; fence signaled? %s\n",
t->id, pass, miss,
t->before ? "before" : "after",
- dma_fence_is_signaled(f2) ? "yes" : "no");
+ dma_fence_check_and_signal(f2) ? "yes" : "no");
err = -EINVAL;
}
@@ -261,7 +261,7 @@ static struct sync_pt *sync_pt_create(struct sync_timeline *obj,
INIT_LIST_HEAD(&pt->link);
spin_lock_irq(&obj->lock);
- if (!dma_fence_is_signaled_locked(&pt->base)) {
+ if (!dma_fence_check_and_signal_locked(&pt->base)) {
struct rb_node **p = &obj->pt_tree.rb_node;
struct rb_node *parent = NULL;
@@ -201,7 +201,7 @@ static __poll_t sync_file_poll(struct file *file, poll_table *wait)
wake_up_all(&sync_file->wq);
}
- return dma_fence_is_signaled(sync_file->fence) ? EPOLLIN : 0;
+ return dma_fence_check_and_signal(sync_file->fence) ? EPOLLIN : 0;
}
static long sync_file_ioctl_merge(struct sync_file *sync_file,
@@ -269,7 +269,7 @@ static int sync_fill_fence_info(struct dma_fence *fence,
info->status = dma_fence_get_status(fence);
info->timestamp_ns =
- dma_fence_is_signaled(fence) ?
+ dma_fence_check_and_signal(fence) ?
ktime_to_ns(dma_fence_timestamp(fence)) :
ktime_set(0, 0);
@@ -123,7 +123,7 @@ static bool amdkfd_fence_enable_signaling(struct dma_fence *f)
if (!fence)
return false;
- if (dma_fence_is_signaled(f))
+ if (dma_fence_check_and_signal(f))
return true;
if (!fence->svm_bo) {
@@ -1864,7 +1864,7 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
} else {
mutex_lock(&avm->process_info->lock);
if (avm->process_info->eviction_fence &&
- !dma_fence_is_signaled(&avm->process_info->eviction_fence->base))
+ !dma_fence_check_and_signal(&avm->process_info->eviction_fence->base))
ret = amdgpu_amdkfd_bo_validate_and_fence(bo, domain,
&avm->process_info->eviction_fence->base);
mutex_unlock(&avm->process_info->lock);
@@ -2425,7 +2425,7 @@ static int import_obj_create(struct amdgpu_device *adev,
mutex_lock(&avm->process_info->lock);
if (avm->process_info->eviction_fence &&
- !dma_fence_is_signaled(&avm->process_info->eviction_fence->base))
+ !dma_fence_check_and_signal(&avm->process_info->eviction_fence->base))
ret = amdgpu_amdkfd_bo_validate_and_fence(bo, (*mem)->domain,
&avm->process_info->eviction_fence->base);
mutex_unlock(&avm->process_info->lock);
@@ -2875,7 +2875,7 @@ static void replace_eviction_fence(struct dma_fence __rcu **ef,
* replace the fence in restore_work that only gets scheduled after
* eviction work signaled the fence.
*/
- WARN_ONCE(!dma_fence_is_signaled(old_ef),
+ WARN_ONCE(!dma_fence_check_and_signal(old_ef),
"Replacing unsignaled eviction fence");
dma_fence_put(old_ef);
}
@@ -3049,7 +3049,7 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence __rcu *
* Anyone signaling an eviction fence must stop the queues first
* and schedule another restore worker.
*/
- if (dma_fence_is_signaled(&process_info->eviction_fence->base)) {
+ if (dma_fence_check_and_signal(&process_info->eviction_fence->base)) {
struct amdgpu_amdkfd_fence *new_fence =
amdgpu_amdkfd_fence_create(
process_info->eviction_fence->base.context,
@@ -765,7 +765,7 @@ uint64_t amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx,
idx = seq & (amdgpu_sched_jobs - 1);
other = centity->fences[idx];
- WARN_ON(other && !dma_fence_is_signaled(other));
+ WARN_ON(other && !dma_fence_check_and_signal(other));
dma_fence_get(fence);
@@ -1895,7 +1895,7 @@ static void amdgpu_ib_preempt_mark_partial_job(struct amdgpu_ring *ring)
no_preempt:
spin_lock(&sched->job_list_lock);
list_for_each_entry_safe(s_job, tmp, &sched->pending_list, list) {
- if (dma_fence_is_signaled(&s_job->s_fence->finished)) {
+ if (dma_fence_check_and_signal(&s_job->s_fence->finished)) {
/* remove job from ring_mirror_list */
list_del_init(&s_job->list);
sched->ops->free_job(s_job);
@@ -6000,7 +6000,7 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
*
* job->base holds a reference to parent fence
*/
- if (job && dma_fence_is_signaled(&job->hw_fence)) {
+ if (job && dma_fence_check_and_signal(&job->hw_fence)) {
job_signaled = true;
dev_info(adev->dev, "Guilty job already signaled, skipping HW reset");
goto skip_hw_reset;
@@ -6860,7 +6860,7 @@ struct dma_fence *amdgpu_device_switch_gang(struct amdgpu_device *adev,
if (old == gang)
break;
- if (!dma_fence_is_signaled(old))
+ if (!dma_fence_check_and_signal(old))
return old;
} while (cmpxchg((struct dma_fence __force **)&adev->gang_submit,
@@ -719,7 +719,7 @@ void amdgpu_fence_driver_clear_job_fences(struct amdgpu_ring *ring)
* and they will remain unsignaled during sa_bo free.
*/
job = container_of(old, struct amdgpu_job, hw_fence);
- if (!job->base.s_fence && !dma_fence_is_signaled(old))
+ if (!job->base.s_fence && !dma_fence_check_and_signal(old))
dma_fence_signal(old);
RCU_INIT_POINTER(*ptr, NULL);
dma_fence_put(old);
@@ -745,7 +745,7 @@ void amdgpu_fence_driver_set_error(struct amdgpu_ring *ring, int error)
fence = rcu_dereference_protected(drv->fences[i],
lockdep_is_held(&drv->lock));
- if (fence && !dma_fence_is_signaled_locked(fence))
+ if (fence && !dma_fence_check_and_signal_locked(fence))
dma_fence_set_error(fence, error);
}
spin_unlock_irqrestore(&drv->lock, flags);
@@ -204,7 +204,7 @@ static int amdgpu_vmid_grab_idle(struct amdgpu_ring *ring,
struct dma_fence **fences;
unsigned i;
- if (!dma_fence_is_signaled(ring->vmid_wait)) {
+ if (!dma_fence_check_and_signal(ring->vmid_wait)) {
*fence = dma_fence_get(ring->vmid_wait);
return 0;
}
@@ -287,14 +287,14 @@ static int amdgpu_vmid_grab_reserved(struct amdgpu_vm *vm,
(*id)->flushed_updates < updates ||
!(*id)->last_flush ||
((*id)->last_flush->context != fence_context &&
- !dma_fence_is_signaled((*id)->last_flush))) {
+ !dma_fence_check_and_signal((*id)->last_flush))) {
struct dma_fence *tmp;
/* Wait for the gang to be assembled before using a
* reserved VMID or otherwise the gang could deadlock.
*/
tmp = amdgpu_device_get_gang(adev);
- if (!dma_fence_is_signaled(tmp) && tmp != job->gang_submit) {
+ if (!dma_fence_check_and_signal(tmp) && tmp != job->gang_submit) {
*id = NULL;
*fence = tmp;
return 0;
@@ -372,7 +372,7 @@ static int amdgpu_vmid_grab_used(struct amdgpu_vm *vm,
if (!(*id)->last_flush ||
((*id)->last_flush->context != fence_context &&
- !dma_fence_is_signaled((*id)->last_flush)))
+ !dma_fence_check_and_signal((*id)->last_flush)))
needs_flush = true;
if ((*id)->flushed_updates < updates)
@@ -464,16 +464,16 @@ bool amdgpu_ring_soft_recovery(struct amdgpu_ring *ring, unsigned int vmid,
return false;
spin_lock_irqsave(fence->lock, flags);
- if (!dma_fence_is_signaled_locked(fence))
+ if (!dma_fence_check_and_signal_locked(fence))
dma_fence_set_error(fence, -ENODATA);
spin_unlock_irqrestore(fence->lock, flags);
atomic_inc(&ring->adev->gpu_reset_counter);
- while (!dma_fence_is_signaled(fence) &&
+ while (!dma_fence_check_and_signal(fence) &&
ktime_to_ns(ktime_sub(deadline, ktime_get())) > 0)
ring->funcs->soft_recovery(ring, vmid);
- return dma_fence_is_signaled(fence);
+ return dma_fence_check_and_signal(fence);
}
/*
@@ -318,7 +318,7 @@ struct dma_fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync,
struct dma_fence *f = e->fence;
struct drm_sched_fence *s_fence = to_drm_sched_fence(f);
- if (dma_fence_is_signaled(f)) {
+ if (dma_fence_check_and_signal(f)) {
amdgpu_sync_entry_free(e);
continue;
}
@@ -327,7 +327,7 @@ struct dma_fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync,
* when they are scheduled.
*/
if (s_fence->sched == &ring->sched) {
- if (dma_fence_is_signaled(&s_fence->scheduled))
+ if (dma_fence_check_and_signal(&s_fence->scheduled))
continue;
return &s_fence->scheduled;
@@ -361,7 +361,7 @@ struct dma_fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync)
hash_del(&e->node);
kmem_cache_free(amdgpu_sync_slab, e);
- if (!dma_fence_is_signaled(f))
+ if (!dma_fence_check_and_signal(f))
return f;
dma_fence_put(f);
@@ -387,7 +387,7 @@ int amdgpu_sync_clone(struct amdgpu_sync *source, struct amdgpu_sync *clone)
hash_for_each_safe(source->fences, i, tmp, e, node) {
f = e->fence;
- if (!dma_fence_is_signaled(f)) {
+ if (!dma_fence_check_and_signal(f)) {
r = amdgpu_sync_fence(clone, f);
if (r)
return r;
@@ -415,7 +415,7 @@ int amdgpu_sync_push_to_job(struct amdgpu_sync *sync, struct amdgpu_job *job)
hash_for_each_safe(sync->fences, i, tmp, e, node) {
f = e->fence;
- if (dma_fence_is_signaled(f)) {
+ if (dma_fence_check_and_signal(f)) {
amdgpu_sync_entry_free(e);
continue;
}
@@ -775,7 +775,7 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job,
mutex_lock(&id_mgr->lock);
if (id->pasid != job->pasid || !id->pasid_mapping ||
- !dma_fence_is_signaled(id->pasid_mapping))
+ !dma_fence_check_and_signal(id->pasid_mapping))
pasid_mapping_needed = true;
mutex_unlock(&id_mgr->lock);
@@ -1110,7 +1110,7 @@ int amdgpu_vm_update_range(struct amdgpu_device *adev, struct amdgpu_vm *vm,
goto error_free;
}
- if (!unlocked && !dma_fence_is_signaled(vm->last_unlocked)) {
+ if (!unlocked && !dma_fence_check_and_signal(vm->last_unlocked)) {
struct dma_fence *tmp = dma_fence_get_stub();
amdgpu_bo_fence(vm->root.bo, vm->last_unlocked, true);
@@ -2200,7 +2200,7 @@ bool amdgpu_vm_evictable(struct amdgpu_bo *bo)
return false;
/* Don't evict VM page tables while they are updated */
- if (!dma_fence_is_signaled(bo_base->vm->last_unlocked)) {
+ if (!dma_fence_check_and_signal(bo_base->vm->last_unlocked)) {
amdgpu_vm_eviction_unlock(bo_base->vm);
return false;
}
@@ -1165,7 +1165,7 @@ int kgd2kfd_schedule_evict_and_restore_process(struct mm_struct *mm,
if (!fence)
return -EINVAL;
- if (dma_fence_is_signaled(fence))
+ if (dma_fence_check_and_signal(fence))
return 0;
p = kfd_lookup_process_by_mm(mm);
@@ -426,7 +426,7 @@ static void svm_range_bo_release(struct kref *kref)
mmput(mm);
}
- if (!dma_fence_is_signaled(&svm_bo->eviction_fence->base))
+ if (!dma_fence_check_and_signal(&svm_bo->eviction_fence->base))
/* We're not in the eviction worker. Signal the fence. */
dma_fence_signal(&svm_bo->eviction_fence->base);
dma_fence_put(&svm_bo->eviction_fence->base);
@@ -133,7 +133,7 @@ static void drm_suballoc_try_free(struct drm_suballoc_manager *sa_manager)
sa = list_entry(sa_manager->hole->next, struct drm_suballoc, olist);
list_for_each_entry_safe_from(sa, tmp, &sa_manager->olist, olist) {
- if (!sa->fence || !dma_fence_is_signaled(sa->fence))
+ if (!sa->fence || !dma_fence_check_and_signal(sa->fence))
return;
drm_suballoc_remove_locked(sa);
@@ -253,7 +253,7 @@ static bool drm_suballoc_next_hole(struct drm_suballoc_manager *sa_manager,
sa = list_first_entry(&sa_manager->flist[i],
struct drm_suballoc, flist);
- if (!dma_fence_is_signaled(sa->fence)) {
+ if (!dma_fence_check_and_signal(sa->fence)) {
fences[i] = sa->fence;
continue;
}
@@ -406,7 +406,7 @@ void drm_suballoc_free(struct drm_suballoc *suballoc,
sa_manager = suballoc->manager;
spin_lock(&sa_manager->wq.lock);
- if (fence && !dma_fence_is_signaled(fence)) {
+ if (fence && !dma_fence_check_and_signal(fence)) {
u32 idx;
suballoc->fence = dma_fence_get(fence);
@@ -1120,7 +1120,7 @@ static signed long drm_syncobj_array_wait_timeout(struct drm_syncobj **syncobjs,
entries[i].fence = dma_fence_get_stub();
if ((flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE) ||
- dma_fence_is_signaled(entries[i].fence)) {
+ dma_fence_check_and_signal(entries[i].fence)) {
if (signaled_count == 0 && idx)
*idx = i;
signaled_count++;
@@ -1164,7 +1164,7 @@ static signed long drm_syncobj_array_wait_timeout(struct drm_syncobj **syncobjs,
continue;
if ((flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE) ||
- dma_fence_is_signaled(fence) ||
+ dma_fence_check_and_signal(fence) ||
(!entries[i].fence_cb.func &&
dma_fence_add_callback(fence,
&entries[i].fence_cb,
@@ -1723,7 +1723,7 @@ int drm_syncobj_query_ioctl(struct drm_device *dev, void *data,
dma_fence_put(last_signaled);
last_signaled = dma_fence_get(iter);
}
- point = dma_fence_is_signaled(last_signaled) ?
+ point = dma_fence_check_and_signal(last_signaled) ?
last_signaled->seqno :
to_dma_fence_chain(last_signaled)->prev_seqno;
}
@@ -1284,7 +1284,7 @@ int etnaviv_gpu_wait_fence_interruptible(struct etnaviv_gpu *gpu,
if (!timeout) {
/* No timeout was requested: just test for completion */
- ret = dma_fence_is_signaled(fence) ? 0 : -EBUSY;
+ ret = dma_fence_check_and_signal(fence) ? 0 : -EBUSY;
} else {
unsigned long remaining = etnaviv_timeout_to_jiffies(timeout);
@@ -43,7 +43,7 @@ static enum drm_gpu_sched_stat etnaviv_sched_timedout_job(struct drm_sched_job
* If the GPU managed to complete this jobs fence, the timout is
* spurious. Bail out.
*/
- if (dma_fence_is_signaled(submit->out_fence))
+ if (dma_fence_check_and_signal(submit->out_fence))
goto out_no_timeout;
/*
@@ -95,7 +95,7 @@ static void fence_set_priority(struct dma_fence *fence,
struct i915_request *rq;
struct intel_engine_cs *engine;
- if (dma_fence_is_signaled(fence) || !dma_fence_is_i915(fence))
+ if (dma_fence_check_and_signal(fence) || !dma_fence_is_i915(fence))
return;
rq = to_request(fence);
@@ -115,7 +115,7 @@ static inline bool __dma_fence_is_chain(const struct dma_fence *fence)
void i915_gem_fence_wait_priority(struct dma_fence *fence,
const struct i915_sched_attr *attr)
{
- if (dma_fence_is_signaled(fence))
+ if (dma_fence_check_and_signal(fence))
return;
local_bh_disable();
@@ -258,7 +258,7 @@ static int __igt_lmem_pages_migrate(struct intel_gt *gt,
goto out_put;
if (spin) {
- if (dma_fence_is_signaled(spin_fence)) {
+ if (dma_fence_check_and_signal(spin_fence)) {
pr_err("Spinner was terminated by hangcheck.\n");
err = -EBUSY;
goto out_unlock;
@@ -97,7 +97,7 @@ static int i915_deps_grow(struct i915_deps *deps, struct dma_fence *fence,
return 0;
sync:
- if (ctx->no_wait_gpu && !dma_fence_is_signaled(fence)) {
+ if (ctx->no_wait_gpu && !dma_fence_check_and_signal(fence)) {
ret = -EBUSY;
goto unref;
}
@@ -136,7 +136,7 @@ int i915_deps_sync(const struct i915_deps *deps, const struct ttm_operation_ctx
int ret = 0;
for (i = 0; i < deps->num_deps; ++i, ++fences) {
- if (ctx->no_wait_gpu && !dma_fence_is_signaled(*fences)) {
+ if (ctx->no_wait_gpu && !dma_fence_check_and_signal(*fences)) {
ret = -EBUSY;
break;
}
@@ -183,7 +183,7 @@ int i915_deps_add_dependency(struct i915_deps *deps,
if (!fence)
return 0;
- if (dma_fence_is_signaled(fence)) {
+ if (dma_fence_check_and_signal(fence)) {
ret = fence->error;
if (ret)
i915_deps_fini(deps);
@@ -1932,7 +1932,7 @@ static bool __i915_spin_request(struct i915_request * const rq, int state)
timeout_ns = READ_ONCE(rq->engine->props.max_busywait_duration_ns);
timeout_ns += local_clock_ns(&cpu);
do {
- if (dma_fence_is_signaled(&rq->fence))
+ if (dma_fence_check_and_signal(&rq->fence))
return true;
if (signal_pending_state(state, current))
@@ -1989,7 +1989,7 @@ long i915_request_wait_timeout(struct i915_request *rq,
might_sleep();
GEM_BUG_ON(timeout < 0);
- if (dma_fence_is_signaled(&rq->fence))
+ if (dma_fence_check_and_signal(&rq->fence))
return timeout ?: 1;
if (!timeout)
@@ -2072,7 +2072,7 @@ long i915_request_wait_timeout(struct i915_request *rq,
for (;;) {
set_current_state(state);
- if (dma_fence_is_signaled(&rq->fence))
+ if (dma_fence_check_and_signal(&rq->fence))
break;
if (signal_pending_state(state, current)) {
@@ -483,7 +483,7 @@ int i915_sw_fence_await_dma_fence(struct i915_sw_fence *fence,
debug_fence_assert(fence);
might_sleep_if(gfpflags_allow_blocking(gfp));
- if (dma_fence_is_signaled(dma)) {
+ if (dma_fence_check_and_signal(dma)) {
i915_sw_fence_set_error_once(fence, dma->error);
return 0;
}
@@ -551,7 +551,7 @@ int __i915_sw_fence_await_dma_fence(struct i915_sw_fence *fence,
debug_fence_assert(fence);
- if (dma_fence_is_signaled(dma)) {
+ if (dma_fence_check_and_signal(dma)) {
i915_sw_fence_set_error_once(fence, dma->error);
return 0;
}
@@ -430,7 +430,7 @@ static int i915_vma_verify_bind_complete(struct i915_vma *vma)
if (!fence)
return 0;
- if (dma_fence_is_signaled(fence))
+ if (dma_fence_check_and_signal(fence))
err = fence->error;
else
err = -EBUSY;
@@ -170,7 +170,7 @@ static int igt_fence_wait(void *arg)
i915_request_add(request);
- if (dma_fence_is_signaled(&request->fence)) {
+ if (dma_fence_check_and_signal(&request->fence)) {
pr_err("fence signaled immediately!\n");
goto out;
}
@@ -185,7 +185,7 @@ static int igt_fence_wait(void *arg)
goto out;
}
- if (!dma_fence_is_signaled(&request->fence)) {
+ if (!dma_fence_check_and_signal(&request->fence)) {
pr_err("fence unsignaled after waiting!\n");
goto out;
}
@@ -365,7 +365,7 @@ static unsigned long job_count_remaining_native_deps(struct pvr_job *job)
if (!jfence)
continue;
- if (!dma_fence_is_signaled(&jfence->base))
+ if (!dma_fence_check_and_signal(&jfence->base))
remaining_count++;
}
@@ -470,7 +470,7 @@ pvr_queue_get_paired_frag_job_dep(struct pvr_queue *queue, struct pvr_job *job)
xa_for_each(&frag_job->base.dependencies, index, f) {
/* Skip already signaled fences. */
- if (dma_fence_is_signaled(f))
+ if (dma_fence_check_and_signal(f))
continue;
/* Skip our own fence. */
@@ -625,7 +625,7 @@ static void pvr_queue_submit_job_to_cccb(struct pvr_job *job)
&job->paired_job->base.s_fence->scheduled == fence)
continue;
- if (dma_fence_is_signaled(&jfence->base))
+ if (dma_fence_check_and_signal(&jfence->base))
continue;
pvr_fw_object_get_fw_addr(jfence->queue->timeline_ufo.fw_obj,
@@ -768,7 +768,7 @@ static void pvr_queue_start(struct pvr_queue *queue)
*queue->timeline_ufo.value = atomic_read(&queue->job_fence_ctx.seqno);
list_for_each_entry(job, &queue->scheduler.pending_list, base.list) {
- if (dma_fence_is_signaled(job->done_fence)) {
+ if (dma_fence_check_and_signal(job->done_fence)) {
/* Jobs might have completed after drm_sched_stop() was called.
* In that case, re-assign the parent field to the done_fence.
*/
@@ -910,7 +910,7 @@ pvr_queue_signal_done_fences(struct pvr_queue *queue)
if ((int)(cur_seqno - lower_32_bits(job->done_fence->seqno)) < 0)
break;
- if (!dma_fence_is_signaled(job->done_fence)) {
+ if (!dma_fence_check_and_signal(job->done_fence)) {
dma_fence_signal(job->done_fence);
pvr_job_release_pm_ref(job);
atomic_dec(&queue->in_flight_job_count);
@@ -408,7 +408,7 @@ static enum drm_gpu_sched_stat lima_sched_timedout_job(struct drm_sched_job *job
* If the GPU managed to complete this jobs fence, the timeout is
* spurious. Bail out.
*/
- if (dma_fence_is_signaled(task->fence)) {
+ if (dma_fence_check_and_signal(task->fence)) {
DRM_WARN("%s spurious timeout\n", lima_ip_name(ip));
return DRM_GPU_SCHED_STAT_NOMINAL;
}
@@ -425,7 +425,7 @@ static enum drm_gpu_sched_stat lima_sched_timedout_job(struct drm_sched_job *job
if (pipe->bcast_processor)
synchronize_irq(pipe->bcast_processor->irq);
- if (dma_fence_is_signaled(task->fence)) {
+ if (dma_fence_check_and_signal(task->fence)) {
DRM_WARN("%s unexpectedly high interrupt latency\n", lima_ip_name(ip));
return DRM_GPU_SCHED_STAT_NOMINAL;
}
@@ -717,7 +717,7 @@ static void retire_submits(struct msm_gpu *gpu)
* been signalled, then later submits are not signalled
* either, so we are also done.
*/
- if (submit && dma_fence_is_signaled(submit->hw_fence)) {
+ if (submit && dma_fence_check_and_signal(submit->hw_fence)) {
retire_submit(gpu, ring, submit);
} else {
break;
@@ -159,7 +159,7 @@ nouveau_cli_work_ready(struct dma_fence *fence)
bool ret = true;
spin_lock_irq(fence->lock);
- if (!dma_fence_is_signaled_locked(fence))
+ if (!dma_fence_check_and_signal_locked(fence))
ret = false;
spin_unlock_irq(fence->lock);
@@ -274,7 +274,7 @@ nouveau_fence_done(struct nouveau_fence *fence)
nvif_event_block(&fctx->event);
spin_unlock_irqrestore(&fctx->lock, flags);
}
- return dma_fence_is_signaled(&fence->base);
+ return dma_fence_check_and_signal(&fence->base);
}
static long
@@ -754,7 +754,7 @@ static enum drm_gpu_sched_stat panfrost_job_timedout(struct drm_sched_job
* If the GPU managed to complete this jobs fence, the timeout is
* spurious. Bail out.
*/
- if (dma_fence_is_signaled(job->done_fence))
+ if (dma_fence_check_and_signal(job->done_fence))
return DRM_GPU_SCHED_STAT_NOMINAL;
/*
@@ -768,7 +768,7 @@ static enum drm_gpu_sched_stat panfrost_job_timedout(struct drm_sched_job
*/
synchronize_irq(pfdev->js->irq);
- if (dma_fence_is_signaled(job->done_fence)) {
+ if (dma_fence_check_and_signal(job->done_fence)) {
dev_warn(pfdev->dev, "unexpectedly high interrupt latency\n");
return DRM_GPU_SCHED_STAT_NOMINAL;
}
@@ -63,7 +63,7 @@ static long qxl_fence_wait(struct dma_fence *fence, bool intr,
qdev = container_of(fence->lock, struct qxl_device, release_lock);
if (!wait_event_timeout(qdev->release_event,
- (dma_fence_is_signaled(fence) ||
+ (dma_fence_check_and_signal(fence) ||
(qxl_io_notify_oom(qdev), 0)),
timeout))
return 0;
@@ -548,7 +548,7 @@ void drm_sched_entity_select_rq(struct drm_sched_entity *entity)
fence = rcu_dereference_check(entity->last_scheduled, true);
/* stay on the same engine if the previous job hasn't finished */
- if (fence && !dma_fence_is_signaled(fence))
+ if (fence && !dma_fence_check_and_signal(fence))
return;
spin_lock(&entity->lock);
@@ -382,7 +382,7 @@ static void drm_sched_run_free_queue(struct drm_gpu_scheduler *sched)
spin_lock(&sched->job_list_lock);
job = list_first_entry_or_null(&sched->pending_list,
struct drm_sched_job, list);
- if (job && dma_fence_is_signaled(&job->s_fence->finished))
+ if (job && dma_fence_check_and_signal(&job->s_fence->finished))
__drm_sched_run_free_queue(sched);
spin_unlock(&sched->job_list_lock);
}
@@ -1118,7 +1118,7 @@ drm_sched_get_finished_job(struct drm_gpu_scheduler *sched)
job = list_first_entry_or_null(&sched->pending_list,
struct drm_sched_job, list);
- if (job && dma_fence_is_signaled(&job->s_fence->finished)) {
+ if (job && dma_fence_check_and_signal(&job->s_fence->finished)) {
/* remove job from pending_list */
list_del_init(&job->list);
@@ -672,7 +672,7 @@ static int ttm_bo_add_move_fence(struct ttm_buffer_object *bo,
return 0;
if (no_wait_gpu) {
- ret = dma_fence_is_signaled(fence) ? 0 : -EBUSY;
+ ret = dma_fence_check_and_signal(fence) ? 0 : -EBUSY;
dma_fence_put(fence);
return ret;
}
@@ -519,7 +519,7 @@ v3d_copy_query_results(struct v3d_cpu_job *job)
for (i = 0; i < timestamp_query->count; i++) {
fence = drm_syncobj_fence_get(queries[i].syncobj);
- available = fence ? dma_fence_is_signaled(fence) : false;
+ available = fence ? dma_fence_check_and_signal(fence) : false;
write_result = available || copy->do_partial;
if (write_result) {
@@ -623,7 +623,7 @@ v3d_copy_performance_query(struct v3d_cpu_job *job)
for (int i = 0; i < performance_query->count; i++) {
fence = drm_syncobj_fence_get(performance_query->queries[i].syncobj);
- available = fence ? dma_fence_is_signaled(fence) : false;
+ available = fence ? dma_fence_check_and_signal(fence) : false;
write_result = available || copy->do_partial;
if (write_result)
@@ -207,7 +207,7 @@ int vgem_fence_signal_ioctl(struct drm_device *dev,
if (IS_ERR(fence))
return PTR_ERR(fence);
- if (dma_fence_is_signaled(fence))
+ if (dma_fence_check_and_signal(fence))
ret = -ETIMEDOUT;
dma_fence_signal(fence);
@@ -443,7 +443,7 @@ static bool vmw_fence_goal_check_locked(struct vmw_fence_obj *fence)
struct vmw_fence_manager *fman = fman_from_fence(fence);
u32 goal_seqno;
- if (dma_fence_is_signaled_locked(&fence->base))
+ if (dma_fence_check_and_signal_locked(&fence->base))
return false;
goal_seqno = vmw_fence_goal_read(fman->dev_priv);
@@ -513,7 +513,7 @@ bool vmw_fence_obj_signaled(struct vmw_fence_obj *fence)
vmw_fences_update(fman);
- return dma_fence_is_signaled(&fence->base);
+ return dma_fence_check_and_signal(&fence->base);
}
int vmw_fence_obj_wait(struct vmw_fence_obj *fence, bool lazy,
@@ -886,7 +886,7 @@ static void vmw_fence_obj_add_action(struct vmw_fence_obj *fence,
spin_lock(&fman->lock);
fman->pending_actions[action->type]++;
- if (dma_fence_is_signaled_locked(&fence->base)) {
+ if (dma_fence_check_and_signal_locked(&fence->base)) {
struct list_head action_list;
INIT_LIST_HEAD(&action_list);
@@ -1235,7 +1235,7 @@ static void xe_ttm_bo_release_notify(struct ttm_buffer_object *ttm_bo)
dma_resv_for_each_fence(&cursor, ttm_bo->base.resv,
DMA_RESV_USAGE_BOOKKEEP, fence) {
if (xe_fence_is_xe_preempt(fence) &&
- !dma_fence_is_signaled(fence)) {
+ !dma_fence_check_and_signal(fence)) {
if (!replacement)
replacement = dma_fence_get_stub();
@@ -2202,9 +2202,9 @@ xe_guc_exec_queue_snapshot_capture(struct xe_exec_queue *q)
snapshot->pending_list[i].seqno =
xe_sched_job_seqno(job_iter);
snapshot->pending_list[i].fence =
- dma_fence_is_signaled(job_iter->fence) ? 1 : 0;
+ dma_fence_check_and_signal(job_iter->fence) ? 1 : 0;
snapshot->pending_list[i].finished =
- dma_fence_is_signaled(&job_iter->drm.s_fence->finished)
+ dma_fence_check_and_signal(&job_iter->drm.s_fence->finished)
? 1 : 0;
i++;
}
@@ -62,7 +62,7 @@ static void hw_fence_irq_run_cb(struct irq_work *work)
struct dma_fence *dma_fence = &fence->dma;
trace_xe_hw_fence_try_signal(fence);
- if (dma_fence_is_signaled_locked(dma_fence)) {
+ if (dma_fence_check_and_signal_locked(dma_fence)) {
trace_xe_hw_fence_signal(fence);
list_del_init(&fence->irq_link);
dma_fence_put(dma_fence);
@@ -1127,7 +1127,7 @@ static int xe_pt_vm_dependencies(struct xe_sched_job *job,
while (rtfence) {
fence = rtfence->fence;
- if (!dma_fence_is_signaled(fence)) {
+ if (!dma_fence_check_and_signal(fence)) {
/*
* Is this a CPU update? GPU is busy updating, so return
* an error
@@ -60,7 +60,7 @@ int xe_range_fence_insert(struct xe_range_fence_tree *tree,
__xe_range_fence_tree_cleanup(tree);
- if (dma_fence_is_signaled(fence))
+ if (dma_fence_check_and_signal(fence))
goto free;
rfence->ops = ops;
@@ -201,7 +201,7 @@ struct dma_fence_ops {
* once indicates as signalled must always return true from this
* callback. This callback may return false even if the fence has
* completed already, in this case information hasn't propogated throug
- * the system yet. See also dma_fence_is_signaled().
+ * the system yet. See also dma_fence_check_and_signal().
*
* May set &dma_fence.error if returning true.
*
@@ -382,8 +382,7 @@ bool dma_fence_remove_callback(struct dma_fence *fence,
void dma_fence_enable_sw_signaling(struct dma_fence *fence);
/**
- * dma_fence_is_signaled_locked - Return an indication if the fence
- * is signaled yet.
+ * dma_fence_check_and_signal_locked - Checks a fence and signals it if necessary
* @fence: the fence to check
*
* Returns true if the fence was already signaled, false if not. Since this
@@ -393,10 +392,10 @@ void dma_fence_enable_sw_signaling(struct dma_fence *fence);
*
* This function requires &dma_fence.lock to be held.
*
- * See also dma_fence_is_signaled().
+ * See also dma_fence_check_and_signal().
*/
static inline bool
-dma_fence_is_signaled_locked(struct dma_fence *fence)
+dma_fence_check_and_signal_locked(struct dma_fence *fence)
{
if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
return true;
@@ -410,7 +409,7 @@ dma_fence_is_signaled_locked(struct dma_fence *fence)
}
/**
- * dma_fence_is_signaled - Return an indication if the fence is signaled yet.
+ * dma_fence_check_and_signal - Checks a fence and signals it if necessary
* @fence: the fence to check
*
* Returns true if the fence was already signaled, false if not. Since this
@@ -423,10 +422,10 @@ dma_fence_is_signaled_locked(struct dma_fence *fence)
* wraparound between time of issue and time of use by checking the return
* value of this function before calling hardware-specific wait instructions.
*
- * See also dma_fence_is_signaled_locked().
+ * See also dma_fence_check_and_signal_locked().
*/
static inline bool
-dma_fence_is_signaled(struct dma_fence *fence)
+dma_fence_check_and_signal(struct dma_fence *fence)
{
if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
return true;
@@ -514,9 +513,9 @@ static inline struct dma_fence *dma_fence_later(struct dma_fence *f1,
* here is overkill.
*/
if (dma_fence_is_later(f1, f2))
- return dma_fence_is_signaled(f1) ? NULL : f1;
+ return dma_fence_check_and_signal(f1) ? NULL : f1;
else
- return dma_fence_is_signaled(f2) ? NULL : f2;
+ return dma_fence_check_and_signal(f2) ? NULL : f2;
}
/**
@@ -535,7 +534,7 @@ static inline struct dma_fence *dma_fence_later(struct dma_fence *f1,
*/
static inline int dma_fence_get_status_locked(struct dma_fence *fence)
{
- if (dma_fence_is_signaled_locked(fence))
+ if (dma_fence_check_and_signal_locked(fence))
return fence->error ?: 1;
else
return 0;
dma_fence_is_signaled()'s name strongly reads as if this function were intended for checking whether a fence is already signaled. Also the boolean it returns hints at that. The function's behavior, however, is more complex: it can check with a driver callback whether the hardware's sequence number indicates that the fence can already be treated as signaled, although the hardware's / driver's interrupt handler has not signaled it yet. If that's the case, the function also signals the fence. (Presumably) this has caused a bug in Nouveau (unknown commit), where nouveau_fence_done() uses the function to check a fence, which causes a race. Give the function a more obvious name. Signed-off-by: Philipp Stanner <phasta@kernel.org> --- drivers/dma-buf/dma-fence-array.c | 2 +- drivers/dma-buf/dma-fence-chain.c | 6 +++--- drivers/dma-buf/dma-fence-unwrap.c | 4 ++-- drivers/dma-buf/dma-fence.c | 6 +++--- drivers/dma-buf/dma-resv.c | 6 +++--- drivers/dma-buf/st-dma-fence-chain.c | 10 ++++----- drivers/dma-buf/st-dma-fence.c | 8 +++---- drivers/dma-buf/sw_sync.c | 2 +- drivers/dma-buf/sync_file.c | 4 ++-- .../gpu/drm/amd/amdgpu/amdgpu_amdkfd_fence.c | 2 +- .../gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c | 8 +++---- drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 4 ++-- drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c | 4 ++-- drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c | 8 +++---- drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c | 6 +++--- drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c | 10 ++++----- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 6 +++--- drivers/gpu/drm/amd/amdkfd/kfd_device.c | 2 +- drivers/gpu/drm/amd/amdkfd/kfd_svm.c | 2 +- drivers/gpu/drm/drm_suballoc.c | 6 +++--- drivers/gpu/drm/drm_syncobj.c | 6 +++--- drivers/gpu/drm/etnaviv/etnaviv_gpu.c | 2 +- drivers/gpu/drm/etnaviv/etnaviv_sched.c | 2 +- drivers/gpu/drm/i915/gem/i915_gem_wait.c | 4 ++-- .../drm/i915/gem/selftests/i915_gem_migrate.c | 2 +- drivers/gpu/drm/i915/i915_deps.c | 6 +++--- drivers/gpu/drm/i915/i915_request.c | 6 +++--- drivers/gpu/drm/i915/i915_sw_fence.c | 4 ++-- drivers/gpu/drm/i915/i915_vma.c | 2 +- drivers/gpu/drm/i915/selftests/i915_request.c | 4 ++-- drivers/gpu/drm/imagination/pvr_queue.c | 10 ++++----- drivers/gpu/drm/lima/lima_sched.c | 4 ++-- drivers/gpu/drm/msm/msm_gpu.c | 2 +- drivers/gpu/drm/nouveau/nouveau_drm.c | 2 +- drivers/gpu/drm/nouveau/nouveau_fence.c | 2 +- drivers/gpu/drm/panfrost/panfrost_job.c | 4 ++-- drivers/gpu/drm/qxl/qxl_release.c | 2 +- drivers/gpu/drm/scheduler/sched_entity.c | 2 +- drivers/gpu/drm/scheduler/sched_main.c | 4 ++-- drivers/gpu/drm/ttm/ttm_bo.c | 2 +- drivers/gpu/drm/v3d/v3d_sched.c | 4 ++-- drivers/gpu/drm/vgem/vgem_fence.c | 2 +- drivers/gpu/drm/vmwgfx/vmwgfx_fence.c | 6 +++--- drivers/gpu/drm/xe/xe_bo.c | 2 +- drivers/gpu/drm/xe/xe_guc_submit.c | 4 ++-- drivers/gpu/drm/xe/xe_hw_fence.c | 2 +- drivers/gpu/drm/xe/xe_pt.c | 2 +- drivers/gpu/drm/xe/xe_range_fence.c | 2 +- include/linux/dma-fence.h | 21 +++++++++---------- 51 files changed, 113 insertions(+), 114 deletions(-)