@@ -206,7 +206,6 @@ static int frontbuffer_active(struct i915_active *ref)
return 0;
}
-__i915_active_call
static void frontbuffer_retire(struct i915_active *ref)
{
struct intel_frontbuffer *front =
@@ -254,8 +253,7 @@ intel_frontbuffer_get(struct drm_i915_gem_object *obj)
kref_init(&front->ref);
atomic_set(&front->bits, 0);
i915_active_init(&front->write,
- frontbuffer_active,
- i915_active_may_sleep(frontbuffer_retire));
+ frontbuffer_active, frontbuffer_retire);
spin_lock(&i915->fb_tracking.lock);
if (rcu_access_pointer(obj->frontbuffer)) {
@@ -1057,7 +1057,6 @@ struct context_barrier_task {
void *data;
};
-__i915_active_call
static void cb_retire(struct i915_active *base)
{
struct context_barrier_task *cb = container_of(base, typeof(*cb), base);
@@ -222,7 +222,6 @@ static void __ring_retire(struct intel_ring *ring)
i915_active_release(&ring->vma->active);
}
-__i915_active_call
static void __intel_context_retire(struct i915_active *active)
{
struct intel_context *ce = container_of(active, typeof(*ce), active);
@@ -61,7 +61,6 @@ static int pool_active(struct i915_active *ref)
return 0;
}
-__i915_active_call
static void pool_retire(struct i915_active *ref)
{
struct intel_engine_pool_node *node =
@@ -131,7 +131,6 @@ static void __idle_cacheline_free(struct intel_timeline_cacheline *cl)
kfree_rcu(cl, rcu);
}
-__i915_active_call
static void __cacheline_retire(struct i915_active *active)
{
struct intel_timeline_cacheline *cl =
@@ -148,8 +148,15 @@ __active_retire(struct i915_active *ref)
spin_unlock_irqrestore(&ref->tree_lock, flags);
/* After the final retire, the entire struct may be freed */
- if (ref->retire)
- ref->retire(ref);
+ if (ref->retire) {
+ if (ref->active) {
+ mutex_lock(&ref->callback_lock);
+ ref->retire(ref);
+ mutex_unlock(&ref->callback_lock);
+ } else {
+ ref->retire(ref);
+ }
+ }
/* ... except if you wait on it, you must manage your own references! */
wake_up_var(ref);
@@ -281,15 +288,15 @@ void __i915_active_init(struct i915_active *ref,
struct lock_class_key *mkey,
struct lock_class_key *wkey)
{
- unsigned long bits;
-
debug_active_init(ref);
ref->flags = 0;
ref->active = active;
- ref->retire = ptr_unpack_bits(retire, &bits, 2);
- if (bits & I915_ACTIVE_MAY_SLEEP)
+ ref->retire = retire;
+ if (ref->active && ref->retire) {
+ mutex_init(&ref->callback_lock);
ref->flags |= I915_ACTIVE_RETIRE_SLEEPS;
+ }
spin_lock_init(&ref->tree_lock);
ref->tree = RB_ROOT;
@@ -428,8 +435,15 @@ int i915_active_acquire(struct i915_active *ref)
return err;
if (likely(!i915_active_acquire_if_busy(ref))) {
- if (ref->active)
- err = ref->active(ref);
+ if (ref->active) {
+ if (ref->retire) {
+ mutex_lock(&ref->callback_lock);
+ err = ref->active(ref);
+ mutex_unlock(&ref->callback_lock);
+ } else {
+ err = ref->active(ref);
+ }
+ }
if (!err) {
spin_lock_irq(&ref->tree_lock); /* __active_retire() */
debug_active_activate(ref);
@@ -24,11 +24,6 @@ struct i915_active_fence {
struct active_node;
-#define I915_ACTIVE_MAY_SLEEP BIT(0)
-
-#define __i915_active_call __aligned(4)
-#define i915_active_may_sleep(fn) ptr_pack_bits(&(fn), I915_ACTIVE_MAY_SLEEP, 2)
-
struct i915_active {
atomic_t count;
struct mutex mutex;
@@ -45,6 +40,7 @@ struct i915_active {
int (*active)(struct i915_active *ref);
void (*retire)(struct i915_active *ref);
+ struct mutex callback_lock;
struct work_struct work;
@@ -93,7 +93,6 @@ static int __i915_vma_active(struct i915_active *ref)
return i915_vma_tryget(active_to_vma(ref)) ? 0 : -ENOENT;
}
-__i915_active_call
static void __i915_vma_retire(struct i915_active *ref)
{
i915_vma_put(active_to_vma(ref));