@@ -147,8 +147,22 @@ __active_retire(struct i915_active *ref)
spin_unlock_irqrestore(&ref->tree_lock, flags);
/* After the final retire, the entire struct may be freed */
- if (ref->retire)
- ref->retire(ref);
+ if (ref->retire) {
+ if (ref->active) {
+ bool freed = false;
+
+ /* Don't race with the active callback, and avoid UaF */
+ down_write(&ref->rwsem);
+ ref->freed = &freed;
+ ref->retire(ref);
+ if (!freed) {
+ ref->freed = NULL;
+ up_write(&ref->rwsem);
+ }
+ } else {
+ ref->retire(ref);
+ }
+ }
/* ... except if you wait on it, you must manage your own references! */
wake_up_var(ref);
@@ -278,7 +292,8 @@ void __i915_active_init(struct i915_active *ref,
int (*active)(struct i915_active *ref),
void (*retire)(struct i915_active *ref),
struct lock_class_key *mkey,
- struct lock_class_key *wkey)
+ struct lock_class_key *wkey,
+ struct lock_class_key *rkey)
{
unsigned long bits;
@@ -287,8 +302,13 @@ void __i915_active_init(struct i915_active *ref,
ref->flags = 0;
ref->active = active;
ref->retire = ptr_unpack_bits(retire, &bits, 2);
- if (bits & I915_ACTIVE_MAY_SLEEP)
+ ref->freed = NULL;
+ if (ref->active && ref->retire) {
+ __init_rwsem(&ref->rwsem, "i915_active.rwsem", rkey);
ref->flags |= I915_ACTIVE_RETIRE_SLEEPS;
+ } else if (bits & I915_ACTIVE_MAY_SLEEP) {
+ ref->flags |= I915_ACTIVE_RETIRE_SLEEPS;
+ }
spin_lock_init(&ref->tree_lock);
ref->tree = RB_ROOT;
@@ -417,8 +437,20 @@ int i915_active_acquire(struct i915_active *ref)
return err;
if (likely(!i915_active_acquire_if_busy(ref))) {
- if (ref->active)
- err = ref->active(ref);
+ if (ref->active) {
+ if (ref->retire) {
+ /*
+ * This can be a recursive call, and the mutex
+ * above already protects from concurrent active
+ * callbacks, so a read lock fits best.
+ */
+ down_read(&ref->rwsem);
+ err = ref->active(ref);
+ up_read(&ref->rwsem);
+ } else {
+ err = ref->active(ref);
+ }
+ }
if (!err) {
spin_lock_irq(&ref->tree_lock); /* __active_retire() */
debug_active_activate(ref);
@@ -502,16 +534,20 @@ int i915_request_await_active(struct i915_request *rq, struct i915_active *ref)
return err;
}
-#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)
void i915_active_fini(struct i915_active *ref)
{
+ if (ref->freed) {
+ *ref->freed = true;
+ up_write(&ref->rwsem);
+ }
+#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)
debug_active_fini(ref);
GEM_BUG_ON(atomic_read(&ref->count));
GEM_BUG_ON(work_pending(&ref->work));
GEM_BUG_ON(!RB_EMPTY_ROOT(&ref->tree));
mutex_destroy(&ref->mutex);
-}
#endif
+}
static inline bool is_idle_barrier(struct active_node *node, u64 idx)
{
@@ -153,14 +153,16 @@ void __i915_active_init(struct i915_active *ref,
int (*active)(struct i915_active *ref),
void (*retire)(struct i915_active *ref),
struct lock_class_key *mkey,
- struct lock_class_key *wkey);
+ struct lock_class_key *wkey,
+ struct lock_class_key *rkey);
/* Specialise each class of i915_active to avoid impossible lockdep cycles. */
#define i915_active_init(ref, active, retire) do { \
static struct lock_class_key __mkey; \
static struct lock_class_key __wkey; \
+ static struct lock_class_key __rkey; \
\
- __i915_active_init(ref, active, retire, &__mkey, &__wkey); \
+ __i915_active_init(ref, active, retire, &__mkey, &__wkey, &__rkey); \
} while (0)
int i915_active_ref(struct i915_active *ref,
@@ -200,11 +202,7 @@ i915_active_is_idle(const struct i915_active *ref)
return !atomic_read(&ref->count);
}
-#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)
void i915_active_fini(struct i915_active *ref);
-#else
-static inline void i915_active_fini(struct i915_active *ref) { }
-#endif
int i915_active_acquire_preallocate_barrier(struct i915_active *ref,
struct intel_engine_cs *engine);
@@ -32,6 +32,8 @@ struct active_node;
struct i915_active {
atomic_t count;
struct mutex mutex;
+ struct rw_semaphore rwsem;
+ bool *freed;
spinlock_t tree_lock;
struct active_node *cache;