drm/i915: Flush pages on acquisition
When we return pages to the system, we ensure that they are marked as being in the CPU domain since any external access is uncontrolled and we must assume the worst. This means that we need to always flush the pages on acquisition if we need to use them on the GPU, and from the beginning have used set-domain. Set-domain is overkill for the purpose as it is a general synchronisation barrier, but our intent is to only flush the pages being swapped in. If we move that flush into the pages acquisition phase, we know then that when we have obj->mm.pages, they are coherent with the GPU and need only maintain that status without resorting to heavy handed use of set-domain. The principle knock-on effect for userspace is through mmap-gtt pagefaulting. Our uAPI has always implied that the GTT mmap was async (especially as when any pagefault occurs is unpredicatable to userspace) and so userspace had to apply explicit domain control itself (set-domain). However, swapping is transparent to the kernel, and so on first fault we need to acquire the pages and make them coherent for access through the GTT. Our use of set-domain here leaks into the uABI that the first pagefault was synchronous. This is unintentional and baring a few igt should be unoticed, nevertheless we bump the uABI version for mmap-gtt to reflect the change in behaviour. Another implication of the change is that gem_create() is presumed to create an object that is coherent with the CPU and is in the CPU write domain, so a set-domain(CPU) following a gem_create() would be a minor operation that merely checked whether we could allocate all pages for the object. On applying this change, a set-domain(CPU) causes a clflush as we acquire the pages. This will have a small impact on mesa as we move the clflush here on !llc from execbuf time to create, but that should have minimal performance impact as the same clflush exists but is now done early and because of the clflush issue, userspace recycles bo and so should resist allocating fresh objects. Internally, the presumption that objects are created in the CPU write-domain and remain so through writes to obj->mm.mapping is more prevalent than I expected; but easy enough to catch and apply a manual flush. For the future, we should push the page flush from the central set_pages() into the callers so that we can more finely control when it is applied, but for now doing it one location is easier to validate, at the cost of sometimes flushing when there is no need. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Matthew Auld <matthew.william.auld@gmail.com> Cc: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com> Cc: Antonio Argenziano <antonio.argenziano@intel.com> Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> Reviewed-by: Matthew Auld <matthew.william.auld@gmail.com> Link: https://patchwork.freedesktop.org/patch/msgid/20190321161908.8007-1-chris@chris-wilson.co.uk
This commit is contained in:
@@ -1713,6 +1713,9 @@ static unsigned int tile_row_pages(const struct drm_i915_gem_object *obj)
|
||||
* 2 - Recognise WC as a separate cache domain so that we can flush the
|
||||
* delayed writes via GTT before performing direct access via WC.
|
||||
*
|
||||
* 3 - Remove implicit set-domain(GTT) and synchronisation on initial
|
||||
* pagefault; swapin remains transparent.
|
||||
*
|
||||
* Restrictions:
|
||||
*
|
||||
* * snoopable objects cannot be accessed via the GTT. It can cause machine
|
||||
@@ -1740,7 +1743,7 @@ static unsigned int tile_row_pages(const struct drm_i915_gem_object *obj)
|
||||
*/
|
||||
int i915_gem_mmap_gtt_version(void)
|
||||
{
|
||||
return 2;
|
||||
return 3;
|
||||
}
|
||||
|
||||
static inline struct i915_ggtt_view
|
||||
@@ -1808,17 +1811,6 @@ vm_fault_t i915_gem_fault(struct vm_fault *vmf)
|
||||
|
||||
trace_i915_gem_object_fault(obj, page_offset, true, write);
|
||||
|
||||
/* Try to flush the object off the GPU first without holding the lock.
|
||||
* Upon acquiring the lock, we will perform our sanity checks and then
|
||||
* repeat the flush holding the lock in the normal manner to catch cases
|
||||
* where we are gazumped.
|
||||
*/
|
||||
ret = i915_gem_object_wait(obj,
|
||||
I915_WAIT_INTERRUPTIBLE,
|
||||
MAX_SCHEDULE_TIMEOUT);
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
ret = i915_gem_object_pin_pages(obj);
|
||||
if (ret)
|
||||
goto err;
|
||||
@@ -1874,10 +1866,6 @@ vm_fault_t i915_gem_fault(struct vm_fault *vmf)
|
||||
goto err_unlock;
|
||||
}
|
||||
|
||||
ret = i915_gem_object_set_to_gtt_domain(obj, write);
|
||||
if (ret)
|
||||
goto err_unpin;
|
||||
|
||||
ret = i915_vma_pin_fence(vma);
|
||||
if (ret)
|
||||
goto err_unpin;
|
||||
@@ -2534,6 +2522,14 @@ void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
|
||||
|
||||
lockdep_assert_held(&obj->mm.lock);
|
||||
|
||||
/* Make the pages coherent with the GPU (flushing any swapin). */
|
||||
if (obj->cache_dirty) {
|
||||
obj->write_domain = 0;
|
||||
if (i915_gem_object_has_struct_page(obj))
|
||||
drm_clflush_sg(pages);
|
||||
obj->cache_dirty = false;
|
||||
}
|
||||
|
||||
obj->mm.get_page.sg_pos = pages->sgl;
|
||||
obj->mm.get_page.sg_idx = 0;
|
||||
|
||||
@@ -2735,6 +2731,33 @@ err_unlock:
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
void __i915_gem_object_flush_map(struct drm_i915_gem_object *obj,
|
||||
unsigned long offset,
|
||||
unsigned long size)
|
||||
{
|
||||
enum i915_map_type has_type;
|
||||
void *ptr;
|
||||
|
||||
GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
|
||||
GEM_BUG_ON(range_overflows_t(typeof(obj->base.size),
|
||||
offset, size, obj->base.size));
|
||||
|
||||
obj->mm.dirty = true;
|
||||
|
||||
if (obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE)
|
||||
return;
|
||||
|
||||
ptr = page_unpack_bits(obj->mm.mapping, &has_type);
|
||||
if (has_type == I915_MAP_WC)
|
||||
return;
|
||||
|
||||
drm_clflush_virt_range(ptr + offset, size);
|
||||
if (size == obj->base.size) {
|
||||
obj->write_domain &= ~I915_GEM_DOMAIN_CPU;
|
||||
obj->cache_dirty = false;
|
||||
}
|
||||
}
|
||||
|
||||
static int
|
||||
i915_gem_object_pwrite_gtt(struct drm_i915_gem_object *obj,
|
||||
const struct drm_i915_gem_pwrite *arg)
|
||||
@@ -4692,6 +4715,8 @@ static int __intel_engines_record_defaults(struct drm_i915_private *i915)
|
||||
goto err_active;
|
||||
|
||||
engine->default_state = i915_gem_object_get(state->obj);
|
||||
i915_gem_object_set_cache_coherency(engine->default_state,
|
||||
I915_CACHE_LLC);
|
||||
|
||||
/* Check we can acquire the image of the context state */
|
||||
vaddr = i915_gem_object_pin_map(engine->default_state,
|
||||
|
Reference in New Issue
Block a user