12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328 |
- /*
- * Copyright © 2008-2015 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- *
- * Authors:
- * Eric Anholt <[email protected]>
- *
- */
- #include <linux/dma-fence-array.h>
- #include <linux/kthread.h>
- #include <linux/dma-resv.h>
- #include <linux/shmem_fs.h>
- #include <linux/slab.h>
- #include <linux/stop_machine.h>
- #include <linux/swap.h>
- #include <linux/pci.h>
- #include <linux/dma-buf.h>
- #include <linux/mman.h>
- #include <drm/drm_cache.h>
- #include <drm/drm_vma_manager.h>
- #include "display/intel_display.h"
- #include "display/intel_frontbuffer.h"
- #include "gem/i915_gem_clflush.h"
- #include "gem/i915_gem_context.h"
- #include "gem/i915_gem_ioctls.h"
- #include "gem/i915_gem_mman.h"
- #include "gem/i915_gem_pm.h"
- #include "gem/i915_gem_region.h"
- #include "gem/i915_gem_userptr.h"
- #include "gt/intel_engine_user.h"
- #include "gt/intel_gt.h"
- #include "gt/intel_gt_pm.h"
- #include "gt/intel_workarounds.h"
- #include "i915_drv.h"
- #include "i915_file_private.h"
- #include "i915_trace.h"
- #include "i915_vgpu.h"
- #include "intel_pm.h"
- static int
- insert_mappable_node(struct i915_ggtt *ggtt, struct drm_mm_node *node, u32 size)
- {
- int err;
- err = mutex_lock_interruptible(&ggtt->vm.mutex);
- if (err)
- return err;
- memset(node, 0, sizeof(*node));
- err = drm_mm_insert_node_in_range(&ggtt->vm.mm, node,
- size, 0, I915_COLOR_UNEVICTABLE,
- 0, ggtt->mappable_end,
- DRM_MM_INSERT_LOW);
- mutex_unlock(&ggtt->vm.mutex);
- return err;
- }
- static void
- remove_mappable_node(struct i915_ggtt *ggtt, struct drm_mm_node *node)
- {
- mutex_lock(&ggtt->vm.mutex);
- drm_mm_remove_node(node);
- mutex_unlock(&ggtt->vm.mutex);
- }
- int
- i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file)
- {
- struct drm_i915_private *i915 = to_i915(dev);
- struct i915_ggtt *ggtt = to_gt(i915)->ggtt;
- struct drm_i915_gem_get_aperture *args = data;
- struct i915_vma *vma;
- u64 pinned;
- if (mutex_lock_interruptible(&ggtt->vm.mutex))
- return -EINTR;
- pinned = ggtt->vm.reserved;
- list_for_each_entry(vma, &ggtt->vm.bound_list, vm_link)
- if (i915_vma_is_pinned(vma))
- pinned += vma->node.size;
- mutex_unlock(&ggtt->vm.mutex);
- args->aper_size = ggtt->vm.total;
- args->aper_available_size = args->aper_size - pinned;
- return 0;
- }
- int i915_gem_object_unbind(struct drm_i915_gem_object *obj,
- unsigned long flags)
- {
- struct intel_runtime_pm *rpm = &to_i915(obj->base.dev)->runtime_pm;
- bool vm_trylock = !!(flags & I915_GEM_OBJECT_UNBIND_VM_TRYLOCK);
- LIST_HEAD(still_in_list);
- intel_wakeref_t wakeref;
- struct i915_vma *vma;
- int ret;
- assert_object_held(obj);
- if (list_empty(&obj->vma.list))
- return 0;
- /*
- * As some machines use ACPI to handle runtime-resume callbacks, and
- * ACPI is quite kmalloc happy, we cannot resume beneath the vm->mutex
- * as they are required by the shrinker. Ergo, we wake the device up
- * first just in case.
- */
- wakeref = intel_runtime_pm_get(rpm);
- try_again:
- ret = 0;
- spin_lock(&obj->vma.lock);
- while (!ret && (vma = list_first_entry_or_null(&obj->vma.list,
- struct i915_vma,
- obj_link))) {
- list_move_tail(&vma->obj_link, &still_in_list);
- if (!i915_vma_is_bound(vma, I915_VMA_BIND_MASK))
- continue;
- if (flags & I915_GEM_OBJECT_UNBIND_TEST) {
- ret = -EBUSY;
- break;
- }
- /*
- * Requiring the vm destructor to take the object lock
- * before destroying a vma would help us eliminate the
- * i915_vm_tryget() here, AND thus also the barrier stuff
- * at the end. That's an easy fix, but sleeping locks in
- * a kthread should generally be avoided.
- */
- ret = -EAGAIN;
- if (!i915_vm_tryget(vma->vm))
- break;
- spin_unlock(&obj->vma.lock);
- /*
- * Since i915_vma_parked() takes the object lock
- * before vma destruction, it won't race us here,
- * and destroy the vma from under us.
- */
- ret = -EBUSY;
- if (flags & I915_GEM_OBJECT_UNBIND_ASYNC) {
- assert_object_held(vma->obj);
- ret = i915_vma_unbind_async(vma, vm_trylock);
- }
- if (ret == -EBUSY && (flags & I915_GEM_OBJECT_UNBIND_ACTIVE ||
- !i915_vma_is_active(vma))) {
- if (vm_trylock) {
- if (mutex_trylock(&vma->vm->mutex)) {
- ret = __i915_vma_unbind(vma);
- mutex_unlock(&vma->vm->mutex);
- }
- } else {
- ret = i915_vma_unbind(vma);
- }
- }
- i915_vm_put(vma->vm);
- spin_lock(&obj->vma.lock);
- }
- list_splice_init(&still_in_list, &obj->vma.list);
- spin_unlock(&obj->vma.lock);
- if (ret == -EAGAIN && flags & I915_GEM_OBJECT_UNBIND_BARRIER) {
- rcu_barrier(); /* flush the i915_vm_release() */
- goto try_again;
- }
- intel_runtime_pm_put(rpm, wakeref);
- return ret;
- }
- static int
- shmem_pread(struct page *page, int offset, int len, char __user *user_data,
- bool needs_clflush)
- {
- char *vaddr;
- int ret;
- vaddr = kmap(page);
- if (needs_clflush)
- drm_clflush_virt_range(vaddr + offset, len);
- ret = __copy_to_user(user_data, vaddr + offset, len);
- kunmap(page);
- return ret ? -EFAULT : 0;
- }
- static int
- i915_gem_shmem_pread(struct drm_i915_gem_object *obj,
- struct drm_i915_gem_pread *args)
- {
- unsigned int needs_clflush;
- unsigned int idx, offset;
- char __user *user_data;
- u64 remain;
- int ret;
- ret = i915_gem_object_lock_interruptible(obj, NULL);
- if (ret)
- return ret;
- ret = i915_gem_object_pin_pages(obj);
- if (ret)
- goto err_unlock;
- ret = i915_gem_object_prepare_read(obj, &needs_clflush);
- if (ret)
- goto err_unpin;
- i915_gem_object_finish_access(obj);
- i915_gem_object_unlock(obj);
- remain = args->size;
- user_data = u64_to_user_ptr(args->data_ptr);
- offset = offset_in_page(args->offset);
- for (idx = args->offset >> PAGE_SHIFT; remain; idx++) {
- struct page *page = i915_gem_object_get_page(obj, idx);
- unsigned int length = min_t(u64, remain, PAGE_SIZE - offset);
- ret = shmem_pread(page, offset, length, user_data,
- needs_clflush);
- if (ret)
- break;
- remain -= length;
- user_data += length;
- offset = 0;
- }
- i915_gem_object_unpin_pages(obj);
- return ret;
- err_unpin:
- i915_gem_object_unpin_pages(obj);
- err_unlock:
- i915_gem_object_unlock(obj);
- return ret;
- }
- static inline bool
- gtt_user_read(struct io_mapping *mapping,
- loff_t base, int offset,
- char __user *user_data, int length)
- {
- void __iomem *vaddr;
- unsigned long unwritten;
- /* We can use the cpu mem copy function because this is X86. */
- vaddr = io_mapping_map_atomic_wc(mapping, base);
- unwritten = __copy_to_user_inatomic(user_data,
- (void __force *)vaddr + offset,
- length);
- io_mapping_unmap_atomic(vaddr);
- if (unwritten) {
- vaddr = io_mapping_map_wc(mapping, base, PAGE_SIZE);
- unwritten = copy_to_user(user_data,
- (void __force *)vaddr + offset,
- length);
- io_mapping_unmap(vaddr);
- }
- return unwritten;
- }
- static struct i915_vma *i915_gem_gtt_prepare(struct drm_i915_gem_object *obj,
- struct drm_mm_node *node,
- bool write)
- {
- struct drm_i915_private *i915 = to_i915(obj->base.dev);
- struct i915_ggtt *ggtt = to_gt(i915)->ggtt;
- struct i915_vma *vma;
- struct i915_gem_ww_ctx ww;
- int ret;
- i915_gem_ww_ctx_init(&ww, true);
- retry:
- vma = ERR_PTR(-ENODEV);
- ret = i915_gem_object_lock(obj, &ww);
- if (ret)
- goto err_ww;
- ret = i915_gem_object_set_to_gtt_domain(obj, write);
- if (ret)
- goto err_ww;
- if (!i915_gem_object_is_tiled(obj))
- vma = i915_gem_object_ggtt_pin_ww(obj, &ww, NULL, 0, 0,
- PIN_MAPPABLE |
- PIN_NONBLOCK /* NOWARN */ |
- PIN_NOEVICT);
- if (vma == ERR_PTR(-EDEADLK)) {
- ret = -EDEADLK;
- goto err_ww;
- } else if (!IS_ERR(vma)) {
- node->start = i915_ggtt_offset(vma);
- node->flags = 0;
- } else {
- ret = insert_mappable_node(ggtt, node, PAGE_SIZE);
- if (ret)
- goto err_ww;
- GEM_BUG_ON(!drm_mm_node_allocated(node));
- vma = NULL;
- }
- ret = i915_gem_object_pin_pages(obj);
- if (ret) {
- if (drm_mm_node_allocated(node)) {
- ggtt->vm.clear_range(&ggtt->vm, node->start, node->size);
- remove_mappable_node(ggtt, node);
- } else {
- i915_vma_unpin(vma);
- }
- }
- err_ww:
- if (ret == -EDEADLK) {
- ret = i915_gem_ww_ctx_backoff(&ww);
- if (!ret)
- goto retry;
- }
- i915_gem_ww_ctx_fini(&ww);
- return ret ? ERR_PTR(ret) : vma;
- }
- static void i915_gem_gtt_cleanup(struct drm_i915_gem_object *obj,
- struct drm_mm_node *node,
- struct i915_vma *vma)
- {
- struct drm_i915_private *i915 = to_i915(obj->base.dev);
- struct i915_ggtt *ggtt = to_gt(i915)->ggtt;
- i915_gem_object_unpin_pages(obj);
- if (drm_mm_node_allocated(node)) {
- ggtt->vm.clear_range(&ggtt->vm, node->start, node->size);
- remove_mappable_node(ggtt, node);
- } else {
- i915_vma_unpin(vma);
- }
- }
- static int
- i915_gem_gtt_pread(struct drm_i915_gem_object *obj,
- const struct drm_i915_gem_pread *args)
- {
- struct drm_i915_private *i915 = to_i915(obj->base.dev);
- struct i915_ggtt *ggtt = to_gt(i915)->ggtt;
- intel_wakeref_t wakeref;
- struct drm_mm_node node;
- void __user *user_data;
- struct i915_vma *vma;
- u64 remain, offset;
- int ret = 0;
- wakeref = intel_runtime_pm_get(&i915->runtime_pm);
- vma = i915_gem_gtt_prepare(obj, &node, false);
- if (IS_ERR(vma)) {
- ret = PTR_ERR(vma);
- goto out_rpm;
- }
- user_data = u64_to_user_ptr(args->data_ptr);
- remain = args->size;
- offset = args->offset;
- while (remain > 0) {
- /* Operation in this page
- *
- * page_base = page offset within aperture
- * page_offset = offset within page
- * page_length = bytes to copy for this page
- */
- u32 page_base = node.start;
- unsigned page_offset = offset_in_page(offset);
- unsigned page_length = PAGE_SIZE - page_offset;
- page_length = remain < page_length ? remain : page_length;
- if (drm_mm_node_allocated(&node)) {
- ggtt->vm.insert_page(&ggtt->vm,
- i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT),
- node.start, I915_CACHE_NONE, 0);
- } else {
- page_base += offset & PAGE_MASK;
- }
- if (gtt_user_read(&ggtt->iomap, page_base, page_offset,
- user_data, page_length)) {
- ret = -EFAULT;
- break;
- }
- remain -= page_length;
- user_data += page_length;
- offset += page_length;
- }
- i915_gem_gtt_cleanup(obj, &node, vma);
- out_rpm:
- intel_runtime_pm_put(&i915->runtime_pm, wakeref);
- return ret;
- }
- /**
- * Reads data from the object referenced by handle.
- * @dev: drm device pointer
- * @data: ioctl data blob
- * @file: drm file pointer
- *
- * On error, the contents of *data are undefined.
- */
- int
- i915_gem_pread_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file)
- {
- struct drm_i915_private *i915 = to_i915(dev);
- struct drm_i915_gem_pread *args = data;
- struct drm_i915_gem_object *obj;
- int ret;
- /* PREAD is disallowed for all platforms after TGL-LP. This also
- * covers all platforms with local memory.
- */
- if (GRAPHICS_VER(i915) >= 12 && !IS_TIGERLAKE(i915))
- return -EOPNOTSUPP;
- if (args->size == 0)
- return 0;
- if (!access_ok(u64_to_user_ptr(args->data_ptr),
- args->size))
- return -EFAULT;
- obj = i915_gem_object_lookup(file, args->handle);
- if (!obj)
- return -ENOENT;
- /* Bounds check source. */
- if (range_overflows_t(u64, args->offset, args->size, obj->base.size)) {
- ret = -EINVAL;
- goto out;
- }
- trace_i915_gem_object_pread(obj, args->offset, args->size);
- ret = -ENODEV;
- if (obj->ops->pread)
- ret = obj->ops->pread(obj, args);
- if (ret != -ENODEV)
- goto out;
- ret = i915_gem_object_wait(obj,
- I915_WAIT_INTERRUPTIBLE,
- MAX_SCHEDULE_TIMEOUT);
- if (ret)
- goto out;
- ret = i915_gem_shmem_pread(obj, args);
- if (ret == -EFAULT || ret == -ENODEV)
- ret = i915_gem_gtt_pread(obj, args);
- out:
- i915_gem_object_put(obj);
- return ret;
- }
- /* This is the fast write path which cannot handle
- * page faults in the source data
- */
- static inline bool
- ggtt_write(struct io_mapping *mapping,
- loff_t base, int offset,
- char __user *user_data, int length)
- {
- void __iomem *vaddr;
- unsigned long unwritten;
- /* We can use the cpu mem copy function because this is X86. */
- vaddr = io_mapping_map_atomic_wc(mapping, base);
- unwritten = __copy_from_user_inatomic_nocache((void __force *)vaddr + offset,
- user_data, length);
- io_mapping_unmap_atomic(vaddr);
- if (unwritten) {
- vaddr = io_mapping_map_wc(mapping, base, PAGE_SIZE);
- unwritten = copy_from_user((void __force *)vaddr + offset,
- user_data, length);
- io_mapping_unmap(vaddr);
- }
- return unwritten;
- }
- /**
- * This is the fast pwrite path, where we copy the data directly from the
- * user into the GTT, uncached.
- * @obj: i915 GEM object
- * @args: pwrite arguments structure
- */
- static int
- i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj,
- const struct drm_i915_gem_pwrite *args)
- {
- struct drm_i915_private *i915 = to_i915(obj->base.dev);
- struct i915_ggtt *ggtt = to_gt(i915)->ggtt;
- struct intel_runtime_pm *rpm = &i915->runtime_pm;
- intel_wakeref_t wakeref;
- struct drm_mm_node node;
- struct i915_vma *vma;
- u64 remain, offset;
- void __user *user_data;
- int ret = 0;
- if (i915_gem_object_has_struct_page(obj)) {
- /*
- * Avoid waking the device up if we can fallback, as
- * waking/resuming is very slow (worst-case 10-100 ms
- * depending on PCI sleeps and our own resume time).
- * This easily dwarfs any performance advantage from
- * using the cache bypass of indirect GGTT access.
- */
- wakeref = intel_runtime_pm_get_if_in_use(rpm);
- if (!wakeref)
- return -EFAULT;
- } else {
- /* No backing pages, no fallback, we must force GGTT access */
- wakeref = intel_runtime_pm_get(rpm);
- }
- vma = i915_gem_gtt_prepare(obj, &node, true);
- if (IS_ERR(vma)) {
- ret = PTR_ERR(vma);
- goto out_rpm;
- }
- i915_gem_object_invalidate_frontbuffer(obj, ORIGIN_CPU);
- user_data = u64_to_user_ptr(args->data_ptr);
- offset = args->offset;
- remain = args->size;
- while (remain) {
- /* Operation in this page
- *
- * page_base = page offset within aperture
- * page_offset = offset within page
- * page_length = bytes to copy for this page
- */
- u32 page_base = node.start;
- unsigned int page_offset = offset_in_page(offset);
- unsigned int page_length = PAGE_SIZE - page_offset;
- page_length = remain < page_length ? remain : page_length;
- if (drm_mm_node_allocated(&node)) {
- /* flush the write before we modify the GGTT */
- intel_gt_flush_ggtt_writes(ggtt->vm.gt);
- ggtt->vm.insert_page(&ggtt->vm,
- i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT),
- node.start, I915_CACHE_NONE, 0);
- wmb(); /* flush modifications to the GGTT (insert_page) */
- } else {
- page_base += offset & PAGE_MASK;
- }
- /* If we get a fault while copying data, then (presumably) our
- * source page isn't available. Return the error and we'll
- * retry in the slow path.
- * If the object is non-shmem backed, we retry again with the
- * path that handles page fault.
- */
- if (ggtt_write(&ggtt->iomap, page_base, page_offset,
- user_data, page_length)) {
- ret = -EFAULT;
- break;
- }
- remain -= page_length;
- user_data += page_length;
- offset += page_length;
- }
- intel_gt_flush_ggtt_writes(ggtt->vm.gt);
- i915_gem_object_flush_frontbuffer(obj, ORIGIN_CPU);
- i915_gem_gtt_cleanup(obj, &node, vma);
- out_rpm:
- intel_runtime_pm_put(rpm, wakeref);
- return ret;
- }
- /* Per-page copy function for the shmem pwrite fastpath.
- * Flushes invalid cachelines before writing to the target if
- * needs_clflush_before is set and flushes out any written cachelines after
- * writing if needs_clflush is set.
- */
- static int
- shmem_pwrite(struct page *page, int offset, int len, char __user *user_data,
- bool needs_clflush_before,
- bool needs_clflush_after)
- {
- char *vaddr;
- int ret;
- vaddr = kmap(page);
- if (needs_clflush_before)
- drm_clflush_virt_range(vaddr + offset, len);
- ret = __copy_from_user(vaddr + offset, user_data, len);
- if (!ret && needs_clflush_after)
- drm_clflush_virt_range(vaddr + offset, len);
- kunmap(page);
- return ret ? -EFAULT : 0;
- }
- static int
- i915_gem_shmem_pwrite(struct drm_i915_gem_object *obj,
- const struct drm_i915_gem_pwrite *args)
- {
- unsigned int partial_cacheline_write;
- unsigned int needs_clflush;
- unsigned int offset, idx;
- void __user *user_data;
- u64 remain;
- int ret;
- ret = i915_gem_object_lock_interruptible(obj, NULL);
- if (ret)
- return ret;
- ret = i915_gem_object_pin_pages(obj);
- if (ret)
- goto err_unlock;
- ret = i915_gem_object_prepare_write(obj, &needs_clflush);
- if (ret)
- goto err_unpin;
- i915_gem_object_finish_access(obj);
- i915_gem_object_unlock(obj);
- /* If we don't overwrite a cacheline completely we need to be
- * careful to have up-to-date data by first clflushing. Don't
- * overcomplicate things and flush the entire patch.
- */
- partial_cacheline_write = 0;
- if (needs_clflush & CLFLUSH_BEFORE)
- partial_cacheline_write = boot_cpu_data.x86_clflush_size - 1;
- user_data = u64_to_user_ptr(args->data_ptr);
- remain = args->size;
- offset = offset_in_page(args->offset);
- for (idx = args->offset >> PAGE_SHIFT; remain; idx++) {
- struct page *page = i915_gem_object_get_page(obj, idx);
- unsigned int length = min_t(u64, remain, PAGE_SIZE - offset);
- ret = shmem_pwrite(page, offset, length, user_data,
- (offset | length) & partial_cacheline_write,
- needs_clflush & CLFLUSH_AFTER);
- if (ret)
- break;
- remain -= length;
- user_data += length;
- offset = 0;
- }
- i915_gem_object_flush_frontbuffer(obj, ORIGIN_CPU);
- i915_gem_object_unpin_pages(obj);
- return ret;
- err_unpin:
- i915_gem_object_unpin_pages(obj);
- err_unlock:
- i915_gem_object_unlock(obj);
- return ret;
- }
- /**
- * Writes data to the object referenced by handle.
- * @dev: drm device
- * @data: ioctl data blob
- * @file: drm file
- *
- * On error, the contents of the buffer that were to be modified are undefined.
- */
- int
- i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file)
- {
- struct drm_i915_private *i915 = to_i915(dev);
- struct drm_i915_gem_pwrite *args = data;
- struct drm_i915_gem_object *obj;
- int ret;
- /* PWRITE is disallowed for all platforms after TGL-LP. This also
- * covers all platforms with local memory.
- */
- if (GRAPHICS_VER(i915) >= 12 && !IS_TIGERLAKE(i915))
- return -EOPNOTSUPP;
- if (args->size == 0)
- return 0;
- if (!access_ok(u64_to_user_ptr(args->data_ptr), args->size))
- return -EFAULT;
- obj = i915_gem_object_lookup(file, args->handle);
- if (!obj)
- return -ENOENT;
- /* Bounds check destination. */
- if (range_overflows_t(u64, args->offset, args->size, obj->base.size)) {
- ret = -EINVAL;
- goto err;
- }
- /* Writes not allowed into this read-only object */
- if (i915_gem_object_is_readonly(obj)) {
- ret = -EINVAL;
- goto err;
- }
- trace_i915_gem_object_pwrite(obj, args->offset, args->size);
- ret = -ENODEV;
- if (obj->ops->pwrite)
- ret = obj->ops->pwrite(obj, args);
- if (ret != -ENODEV)
- goto err;
- ret = i915_gem_object_wait(obj,
- I915_WAIT_INTERRUPTIBLE |
- I915_WAIT_ALL,
- MAX_SCHEDULE_TIMEOUT);
- if (ret)
- goto err;
- ret = -EFAULT;
- /* We can only do the GTT pwrite on untiled buffers, as otherwise
- * it would end up going through the fenced access, and we'll get
- * different detiling behavior between reading and writing.
- * pread/pwrite currently are reading and writing from the CPU
- * perspective, requiring manual detiling by the client.
- */
- if (!i915_gem_object_has_struct_page(obj) ||
- i915_gem_cpu_write_needs_clflush(obj))
- /* Note that the gtt paths might fail with non-page-backed user
- * pointers (e.g. gtt mappings when moving data between
- * textures). Fallback to the shmem path in that case.
- */
- ret = i915_gem_gtt_pwrite_fast(obj, args);
- if (ret == -EFAULT || ret == -ENOSPC) {
- if (i915_gem_object_has_struct_page(obj))
- ret = i915_gem_shmem_pwrite(obj, args);
- }
- err:
- i915_gem_object_put(obj);
- return ret;
- }
- /**
- * Called when user space has done writes to this buffer
- * @dev: drm device
- * @data: ioctl data blob
- * @file: drm file
- */
- int
- i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file)
- {
- struct drm_i915_gem_sw_finish *args = data;
- struct drm_i915_gem_object *obj;
- obj = i915_gem_object_lookup(file, args->handle);
- if (!obj)
- return -ENOENT;
- /*
- * Proxy objects are barred from CPU access, so there is no
- * need to ban sw_finish as it is a nop.
- */
- /* Pinned buffers may be scanout, so flush the cache */
- i915_gem_object_flush_if_display(obj);
- i915_gem_object_put(obj);
- return 0;
- }
- void i915_gem_runtime_suspend(struct drm_i915_private *i915)
- {
- struct drm_i915_gem_object *obj, *on;
- int i;
- /*
- * Only called during RPM suspend. All users of the userfault_list
- * must be holding an RPM wakeref to ensure that this can not
- * run concurrently with themselves (and use the struct_mutex for
- * protection between themselves).
- */
- list_for_each_entry_safe(obj, on,
- &to_gt(i915)->ggtt->userfault_list, userfault_link)
- __i915_gem_object_release_mmap_gtt(obj);
- list_for_each_entry_safe(obj, on,
- &i915->runtime_pm.lmem_userfault_list, userfault_link)
- i915_gem_object_runtime_pm_release_mmap_offset(obj);
- /*
- * The fence will be lost when the device powers down. If any were
- * in use by hardware (i.e. they are pinned), we should not be powering
- * down! All other fences will be reacquired by the user upon waking.
- */
- for (i = 0; i < to_gt(i915)->ggtt->num_fences; i++) {
- struct i915_fence_reg *reg = &to_gt(i915)->ggtt->fence_regs[i];
- /*
- * Ideally we want to assert that the fence register is not
- * live at this point (i.e. that no piece of code will be
- * trying to write through fence + GTT, as that both violates
- * our tracking of activity and associated locking/barriers,
- * but also is illegal given that the hw is powered down).
- *
- * Previously we used reg->pin_count as a "liveness" indicator.
- * That is not sufficient, and we need a more fine-grained
- * tool if we want to have a sanity check here.
- */
- if (!reg->vma)
- continue;
- GEM_BUG_ON(i915_vma_has_userfault(reg->vma));
- reg->dirty = true;
- }
- }
- static void discard_ggtt_vma(struct i915_vma *vma)
- {
- struct drm_i915_gem_object *obj = vma->obj;
- spin_lock(&obj->vma.lock);
- if (!RB_EMPTY_NODE(&vma->obj_node)) {
- rb_erase(&vma->obj_node, &obj->vma.tree);
- RB_CLEAR_NODE(&vma->obj_node);
- }
- spin_unlock(&obj->vma.lock);
- }
- struct i915_vma *
- i915_gem_object_ggtt_pin_ww(struct drm_i915_gem_object *obj,
- struct i915_gem_ww_ctx *ww,
- const struct i915_gtt_view *view,
- u64 size, u64 alignment, u64 flags)
- {
- struct drm_i915_private *i915 = to_i915(obj->base.dev);
- struct i915_ggtt *ggtt = to_gt(i915)->ggtt;
- struct i915_vma *vma;
- int ret;
- GEM_WARN_ON(!ww);
- if (flags & PIN_MAPPABLE &&
- (!view || view->type == I915_GTT_VIEW_NORMAL)) {
- /*
- * If the required space is larger than the available
- * aperture, we will not able to find a slot for the
- * object and unbinding the object now will be in
- * vain. Worse, doing so may cause us to ping-pong
- * the object in and out of the Global GTT and
- * waste a lot of cycles under the mutex.
- */
- if (obj->base.size > ggtt->mappable_end)
- return ERR_PTR(-E2BIG);
- /*
- * If NONBLOCK is set the caller is optimistically
- * trying to cache the full object within the mappable
- * aperture, and *must* have a fallback in place for
- * situations where we cannot bind the object. We
- * can be a little more lax here and use the fallback
- * more often to avoid costly migrations of ourselves
- * and other objects within the aperture.
- *
- * Half-the-aperture is used as a simple heuristic.
- * More interesting would to do search for a free
- * block prior to making the commitment to unbind.
- * That caters for the self-harm case, and with a
- * little more heuristics (e.g. NOFAULT, NOEVICT)
- * we could try to minimise harm to others.
- */
- if (flags & PIN_NONBLOCK &&
- obj->base.size > ggtt->mappable_end / 2)
- return ERR_PTR(-ENOSPC);
- }
- new_vma:
- vma = i915_vma_instance(obj, &ggtt->vm, view);
- if (IS_ERR(vma))
- return vma;
- if (i915_vma_misplaced(vma, size, alignment, flags)) {
- if (flags & PIN_NONBLOCK) {
- if (i915_vma_is_pinned(vma) || i915_vma_is_active(vma))
- return ERR_PTR(-ENOSPC);
- /*
- * If this misplaced vma is too big (i.e, at-least
- * half the size of aperture) or hasn't been pinned
- * mappable before, we ignore the misplacement when
- * PIN_NONBLOCK is set in order to avoid the ping-pong
- * issue described above. In other words, we try to
- * avoid the costly operation of unbinding this vma
- * from the GGTT and rebinding it back because there
- * may not be enough space for this vma in the aperture.
- */
- if (flags & PIN_MAPPABLE &&
- (vma->fence_size > ggtt->mappable_end / 2 ||
- !i915_vma_is_map_and_fenceable(vma)))
- return ERR_PTR(-ENOSPC);
- }
- if (i915_vma_is_pinned(vma) || i915_vma_is_active(vma)) {
- discard_ggtt_vma(vma);
- goto new_vma;
- }
- ret = i915_vma_unbind(vma);
- if (ret)
- return ERR_PTR(ret);
- }
- ret = i915_vma_pin_ww(vma, ww, size, alignment, flags | PIN_GLOBAL);
- if (ret)
- return ERR_PTR(ret);
- if (vma->fence && !i915_gem_object_is_tiled(obj)) {
- mutex_lock(&ggtt->vm.mutex);
- i915_vma_revoke_fence(vma);
- mutex_unlock(&ggtt->vm.mutex);
- }
- ret = i915_vma_wait_for_bind(vma);
- if (ret) {
- i915_vma_unpin(vma);
- return ERR_PTR(ret);
- }
- return vma;
- }
- struct i915_vma * __must_check
- i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
- const struct i915_gtt_view *view,
- u64 size, u64 alignment, u64 flags)
- {
- struct i915_gem_ww_ctx ww;
- struct i915_vma *ret;
- int err;
- for_i915_gem_ww(&ww, err, true) {
- err = i915_gem_object_lock(obj, &ww);
- if (err)
- continue;
- ret = i915_gem_object_ggtt_pin_ww(obj, &ww, view, size,
- alignment, flags);
- if (IS_ERR(ret))
- err = PTR_ERR(ret);
- }
- return err ? ERR_PTR(err) : ret;
- }
- int
- i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
- {
- struct drm_i915_private *i915 = to_i915(dev);
- struct drm_i915_gem_madvise *args = data;
- struct drm_i915_gem_object *obj;
- int err;
- switch (args->madv) {
- case I915_MADV_DONTNEED:
- case I915_MADV_WILLNEED:
- break;
- default:
- return -EINVAL;
- }
- obj = i915_gem_object_lookup(file_priv, args->handle);
- if (!obj)
- return -ENOENT;
- err = i915_gem_object_lock_interruptible(obj, NULL);
- if (err)
- goto out;
- if (i915_gem_object_has_pages(obj) &&
- i915_gem_object_is_tiled(obj) &&
- i915->gem_quirks & GEM_QUIRK_PIN_SWIZZLED_PAGES) {
- if (obj->mm.madv == I915_MADV_WILLNEED) {
- GEM_BUG_ON(!i915_gem_object_has_tiling_quirk(obj));
- i915_gem_object_clear_tiling_quirk(obj);
- i915_gem_object_make_shrinkable(obj);
- }
- if (args->madv == I915_MADV_WILLNEED) {
- GEM_BUG_ON(i915_gem_object_has_tiling_quirk(obj));
- i915_gem_object_make_unshrinkable(obj);
- i915_gem_object_set_tiling_quirk(obj);
- }
- }
- if (obj->mm.madv != __I915_MADV_PURGED) {
- obj->mm.madv = args->madv;
- if (obj->ops->adjust_lru)
- obj->ops->adjust_lru(obj);
- }
- if (i915_gem_object_has_pages(obj) ||
- i915_gem_object_has_self_managed_shrink_list(obj)) {
- unsigned long flags;
- spin_lock_irqsave(&i915->mm.obj_lock, flags);
- if (!list_empty(&obj->mm.link)) {
- struct list_head *list;
- if (obj->mm.madv != I915_MADV_WILLNEED)
- list = &i915->mm.purge_list;
- else
- list = &i915->mm.shrink_list;
- list_move_tail(&obj->mm.link, list);
- }
- spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
- }
- /* if the object is no longer attached, discard its backing storage */
- if (obj->mm.madv == I915_MADV_DONTNEED &&
- !i915_gem_object_has_pages(obj))
- i915_gem_object_truncate(obj);
- args->retained = obj->mm.madv != __I915_MADV_PURGED;
- i915_gem_object_unlock(obj);
- out:
- i915_gem_object_put(obj);
- return err;
- }
- /*
- * A single pass should suffice to release all the freed objects (along most
- * call paths), but be a little more paranoid in that freeing the objects does
- * take a little amount of time, during which the rcu callbacks could have added
- * new objects into the freed list, and armed the work again.
- */
- void i915_gem_drain_freed_objects(struct drm_i915_private *i915)
- {
- while (atomic_read(&i915->mm.free_count)) {
- flush_work(&i915->mm.free_work);
- flush_delayed_work(&i915->bdev.wq);
- rcu_barrier();
- }
- }
- /*
- * Similar to objects above (see i915_gem_drain_freed-objects), in general we
- * have workers that are armed by RCU and then rearm themselves in their
- * callbacks. To be paranoid, we need to drain the workqueue a second time after
- * waiting for the RCU grace period so that we catch work queued via RCU from
- * the first pass. As neither drain_workqueue() nor flush_workqueue() report a
- * result, we make an assumption that we only don't require more than 3 passes
- * to catch all _recursive_ RCU delayed work.
- */
- void i915_gem_drain_workqueue(struct drm_i915_private *i915)
- {
- int i;
- for (i = 0; i < 3; i++) {
- flush_workqueue(i915->wq);
- rcu_barrier();
- i915_gem_drain_freed_objects(i915);
- }
- drain_workqueue(i915->wq);
- }
- int i915_gem_init(struct drm_i915_private *dev_priv)
- {
- struct intel_gt *gt;
- unsigned int i;
- int ret;
- /* We need to fallback to 4K pages if host doesn't support huge gtt. */
- if (intel_vgpu_active(dev_priv) && !intel_vgpu_has_huge_gtt(dev_priv))
- RUNTIME_INFO(dev_priv)->page_sizes = I915_GTT_PAGE_SIZE_4K;
- ret = i915_gem_init_userptr(dev_priv);
- if (ret)
- return ret;
- intel_uc_fetch_firmwares(&to_gt(dev_priv)->uc);
- intel_wopcm_init(&dev_priv->wopcm);
- ret = i915_init_ggtt(dev_priv);
- if (ret) {
- GEM_BUG_ON(ret == -EIO);
- goto err_unlock;
- }
- /*
- * Despite its name intel_init_clock_gating applies both display
- * clock gating workarounds; GT mmio workarounds and the occasional
- * GT power context workaround. Worse, sometimes it includes a context
- * register workaround which we need to apply before we record the
- * default HW state for all contexts.
- *
- * FIXME: break up the workarounds and apply them at the right time!
- */
- intel_init_clock_gating(dev_priv);
- for_each_gt(gt, dev_priv, i) {
- ret = intel_gt_init(gt);
- if (ret)
- goto err_unlock;
- }
- return 0;
- /*
- * Unwinding is complicated by that we want to handle -EIO to mean
- * disable GPU submission but keep KMS alive. We want to mark the
- * HW as irrevisibly wedged, but keep enough state around that the
- * driver doesn't explode during runtime.
- */
- err_unlock:
- i915_gem_drain_workqueue(dev_priv);
- if (ret != -EIO) {
- for_each_gt(gt, dev_priv, i) {
- intel_gt_driver_remove(gt);
- intel_gt_driver_release(gt);
- intel_uc_cleanup_firmwares(>->uc);
- }
- }
- if (ret == -EIO) {
- /*
- * Allow engines or uC initialisation to fail by marking the GPU
- * as wedged. But we only want to do this when the GPU is angry,
- * for all other failure, such as an allocation failure, bail.
- */
- for_each_gt(gt, dev_priv, i) {
- if (!intel_gt_is_wedged(gt)) {
- i915_probe_error(dev_priv,
- "Failed to initialize GPU, declaring it wedged!\n");
- intel_gt_set_wedged(gt);
- }
- }
- /* Minimal basic recovery for KMS */
- ret = i915_ggtt_enable_hw(dev_priv);
- i915_ggtt_resume(to_gt(dev_priv)->ggtt);
- intel_init_clock_gating(dev_priv);
- }
- i915_gem_drain_freed_objects(dev_priv);
- return ret;
- }
- void i915_gem_driver_register(struct drm_i915_private *i915)
- {
- i915_gem_driver_register__shrinker(i915);
- intel_engines_driver_register(i915);
- }
- void i915_gem_driver_unregister(struct drm_i915_private *i915)
- {
- i915_gem_driver_unregister__shrinker(i915);
- }
- void i915_gem_driver_remove(struct drm_i915_private *dev_priv)
- {
- struct intel_gt *gt;
- unsigned int i;
- i915_gem_suspend_late(dev_priv);
- for_each_gt(gt, dev_priv, i)
- intel_gt_driver_remove(gt);
- dev_priv->uabi_engines = RB_ROOT;
- /* Flush any outstanding unpin_work. */
- i915_gem_drain_workqueue(dev_priv);
- i915_gem_drain_freed_objects(dev_priv);
- }
- void i915_gem_driver_release(struct drm_i915_private *dev_priv)
- {
- struct intel_gt *gt;
- unsigned int i;
- for_each_gt(gt, dev_priv, i) {
- intel_gt_driver_release(gt);
- intel_uc_cleanup_firmwares(>->uc);
- }
- /* Flush any outstanding work, including i915_gem_context.release_work. */
- i915_gem_drain_workqueue(dev_priv);
- drm_WARN_ON(&dev_priv->drm, !list_empty(&dev_priv->gem.contexts.list));
- }
- static void i915_gem_init__mm(struct drm_i915_private *i915)
- {
- spin_lock_init(&i915->mm.obj_lock);
- init_llist_head(&i915->mm.free_list);
- INIT_LIST_HEAD(&i915->mm.purge_list);
- INIT_LIST_HEAD(&i915->mm.shrink_list);
- i915_gem_init__objects(i915);
- }
- void i915_gem_init_early(struct drm_i915_private *dev_priv)
- {
- i915_gem_init__mm(dev_priv);
- i915_gem_init__contexts(dev_priv);
- spin_lock_init(&dev_priv->display.fb_tracking.lock);
- }
- void i915_gem_cleanup_early(struct drm_i915_private *dev_priv)
- {
- i915_gem_drain_freed_objects(dev_priv);
- GEM_BUG_ON(!llist_empty(&dev_priv->mm.free_list));
- GEM_BUG_ON(atomic_read(&dev_priv->mm.free_count));
- drm_WARN_ON(&dev_priv->drm, dev_priv->mm.shrink_count);
- }
- int i915_gem_open(struct drm_i915_private *i915, struct drm_file *file)
- {
- struct drm_i915_file_private *file_priv;
- struct i915_drm_client *client;
- int ret = -ENOMEM;
- DRM_DEBUG("\n");
- file_priv = kzalloc(sizeof(*file_priv), GFP_KERNEL);
- if (!file_priv)
- goto err_alloc;
- client = i915_drm_client_add(&i915->clients);
- if (IS_ERR(client)) {
- ret = PTR_ERR(client);
- goto err_client;
- }
- file->driver_priv = file_priv;
- file_priv->dev_priv = i915;
- file_priv->file = file;
- file_priv->client = client;
- file_priv->bsd_engine = -1;
- file_priv->hang_timestamp = jiffies;
- ret = i915_gem_context_open(i915, file);
- if (ret)
- goto err_context;
- return 0;
- err_context:
- i915_drm_client_put(client);
- err_client:
- kfree(file_priv);
- err_alloc:
- return ret;
- }
- #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
- #include "selftests/mock_gem_device.c"
- #include "selftests/i915_gem.c"
- #endif
|