@@ -124,7 +124,8 @@ static int msm_gem_show(struct drm_device *dev, struct seq_file *m)
}
seq_printf(m, "Inactive Objects:\n");
- msm_gem_describe_objects(&priv->inactive_list, m);
+ msm_gem_describe_objects(&priv->inactive_dontneed, m);
+ msm_gem_describe_objects(&priv->inactive_willneed, m);
mutex_unlock(&priv->mm_lock);
@@ -465,7 +465,8 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv)
priv->wq = alloc_ordered_workqueue("msm", 0);
- INIT_LIST_HEAD(&priv->inactive_list);
+ INIT_LIST_HEAD(&priv->inactive_willneed);
+ INIT_LIST_HEAD(&priv->inactive_dontneed);
mutex_init(&priv->mm_lock);
/* Teach lockdep about lock ordering wrt. shrinker: */
@@ -175,8 +175,9 @@ struct msm_drm_private {
struct msm_perf_state *perf;
/*
- * List of inactive GEM objects. Every bo is either in the inactive_list
- * or gpu->active_list (for the gpu it is active on[1])
+ * Lists of inactive GEM objects. Every bo is either in one of the
+ * inactive lists (depending on whether or not it is shrinkable) or
+ * gpu->active_list (for the gpu it is active on[1])
*
* These lists are protected by mm_lock. If struct_mutex is involved, it
* should be aquired prior to mm_lock. One should *not* hold mm_lock in
@@ -185,7 +186,8 @@ struct msm_drm_private {
* [1] if someone ever added support for the old 2d cores, there could be
* more than one gpu object
*/
- struct list_head inactive_list;
+ struct list_head inactive_willneed; /* inactive + !shrinkable */
+ struct list_head inactive_dontneed; /* inactive + shrinkable */
struct mutex mm_lock;
struct workqueue_struct *wq;
@@ -17,6 +17,7 @@
#include "msm_gpu.h"
#include "msm_mmu.h"
+static void update_inactive(struct msm_gem_object *msm_obj);
static dma_addr_t physaddr(struct drm_gem_object *obj)
{
@@ -678,6 +679,12 @@ int msm_gem_madvise(struct drm_gem_object *obj, unsigned madv)
madv = msm_obj->madv;
+ /* If the obj is inactive, we might need to move it
+ * between inactive lists
+ */
+ if (msm_obj->active_count == 0)
+ update_inactive(msm_obj);
+
msm_gem_unlock(obj);
return (madv != __MSM_MADV_PURGED);
@@ -781,19 +788,31 @@ void msm_gem_active_get(struct drm_gem_object *obj, struct msm_gpu *gpu)
void msm_gem_active_put(struct drm_gem_object *obj)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
- struct msm_drm_private *priv = obj->dev->dev_private;
might_sleep();
WARN_ON(!msm_gem_is_locked(obj));
if (--msm_obj->active_count == 0) {
- mutex_lock(&priv->mm_lock);
- list_del_init(&msm_obj->mm_list);
- list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
- mutex_unlock(&priv->mm_lock);
+ update_inactive(msm_obj);
}
}
+static void update_inactive(struct msm_gem_object *msm_obj)
+{
+ struct msm_drm_private *priv = msm_obj->base.dev->dev_private;
+
+ mutex_lock(&priv->mm_lock);
+ WARN_ON(msm_obj->active_count != 0);
+
+ list_del_init(&msm_obj->mm_list);
+ if (msm_obj->madv == MSM_MADV_WILLNEED)
+ list_add_tail(&msm_obj->mm_list, &priv->inactive_willneed);
+ else
+ list_add_tail(&msm_obj->mm_list, &priv->inactive_dontneed);
+
+ mutex_unlock(&priv->mm_lock);
+}
+
int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout)
{
bool write = !!(op & MSM_PREP_WRITE);
@@ -1099,7 +1118,8 @@ static struct drm_gem_object *_msm_gem_new(struct drm_device *dev,
}
mutex_lock(&priv->mm_lock);
- list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
+ /* Initially obj is idle, obj->madv == WILLNEED: */
+ list_add_tail(&msm_obj->mm_list, &priv->inactive_willneed);
mutex_unlock(&priv->mm_lock);
return obj;
@@ -1169,7 +1189,7 @@ struct drm_gem_object *msm_gem_import(struct drm_device *dev,
msm_gem_unlock(obj);
mutex_lock(&priv->mm_lock);
- list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
+ list_add_tail(&msm_obj->mm_list, &priv->inactive_willneed);
mutex_unlock(&priv->mm_lock);
return obj;
@@ -19,7 +19,7 @@ msm_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc)
mutex_lock(&priv->mm_lock);
- list_for_each_entry(msm_obj, &priv->inactive_list, mm_list) {
+ list_for_each_entry(msm_obj, &priv->inactive_dontneed, mm_list) {
if (!msm_gem_trylock(&msm_obj->base))
continue;
if (is_purgeable(msm_obj))
@@ -42,7 +42,7 @@ msm_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
mutex_lock(&priv->mm_lock);
- list_for_each_entry(msm_obj, &priv->inactive_list, mm_list) {
+ list_for_each_entry(msm_obj, &priv->inactive_dontneed, mm_list) {
if (freed >= sc->nr_to_scan)
break;
if (!msm_gem_trylock(&msm_obj->base))
@@ -96,7 +96,8 @@ msm_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr)
struct msm_drm_private *priv =
container_of(nb, struct msm_drm_private, vmap_notifier);
struct list_head *mm_lists[] = {
- &priv->inactive_list,
+ &priv->inactive_dontneed,
+ &priv->inactive_willneed,
priv->gpu ? &priv->gpu->active_list : NULL,
NULL,
};