drm/i915: Report all objects with allocated pages to the shrinker
Currently, we try to report to the shrinker the precise number of objects (pages) that are available to be reaped at this moment. This requires searching all objects with allocated pages to see if they fulfill the search criteria, and this count is performed quite frequently. (The shrinker tries to free ~128 pages on each invocation, before which we count all the objects; counting takes longer than unbinding the objects!) If we take the pragmatic view that with sufficient desire, all objects are eventually reapable (they become inactive, or no longer used as framebuffer etc), we can simply return the count of pinned pages maintained during get_pages/put_pages rather than walk the lists every time. The downside is that we may (slightly) over-report the number of objects/pages we could shrink and so penalize ourselves by shrinking more than required. This is mitigated by keeping the order in which we shrink objects such that we avoid penalizing active and frequently used objects, and if memory is so tight that we need to free them we would need to anyway. v2: Only expose shrinkable objects to the shrinker; a small reduction in not considering stolen and foreign objects. v3: Restore the tracking from a "backup" copy from before the gem/ split Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> Cc: Matthew Auld <matthew.auld@intel.com> Reviewed-by: Matthew Auld <matthew.auld@intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20190530203500.26272-2-chris@chris-wilson.co.uk
此提交包含在:
@@ -110,7 +110,8 @@ static void __i915_vma_retire(struct i915_active *ref)
|
||||
* so that we don't steal from recently used but inactive objects
|
||||
* (unless we are forced to ofc!)
|
||||
*/
|
||||
obj_bump_mru(obj);
|
||||
if (i915_gem_object_is_shrinkable(obj))
|
||||
obj_bump_mru(obj);
|
||||
|
||||
i915_gem_object_put(obj); /* and drop the active reference */
|
||||
}
|
||||
@@ -677,11 +678,14 @@ i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
|
||||
struct drm_i915_gem_object *obj = vma->obj;
|
||||
|
||||
spin_lock(&dev_priv->mm.obj_lock);
|
||||
list_move_tail(&obj->mm.link, &dev_priv->mm.bound_list);
|
||||
obj->bind_count++;
|
||||
spin_unlock(&dev_priv->mm.obj_lock);
|
||||
|
||||
if (i915_gem_object_is_shrinkable(obj))
|
||||
list_move_tail(&obj->mm.link, &dev_priv->mm.bound_list);
|
||||
|
||||
obj->bind_count++;
|
||||
assert_bind_count(obj);
|
||||
|
||||
spin_unlock(&dev_priv->mm.obj_lock);
|
||||
}
|
||||
|
||||
return 0;
|
||||
@@ -717,9 +721,13 @@ i915_vma_remove(struct i915_vma *vma)
|
||||
struct drm_i915_gem_object *obj = vma->obj;
|
||||
|
||||
spin_lock(&i915->mm.obj_lock);
|
||||
|
||||
GEM_BUG_ON(obj->bind_count == 0);
|
||||
if (--obj->bind_count == 0 &&
|
||||
i915_gem_object_is_shrinkable(obj) &&
|
||||
obj->mm.madv == I915_MADV_WILLNEED)
|
||||
list_move_tail(&obj->mm.link, &i915->mm.unbound_list);
|
||||
|
||||
spin_unlock(&i915->mm.obj_lock);
|
||||
|
||||
/*
|
||||
|
新增問題並參考
封鎖使用者