i915_gem.c 34 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328
  1. /*
  2. * Copyright © 2008-2015 Intel Corporation
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice (including the next
  12. * paragraph) shall be included in all copies or substantial portions of the
  13. * Software.
  14. *
  15. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  18. * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20. * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  21. * IN THE SOFTWARE.
  22. *
  23. * Authors:
  24. * Eric Anholt <[email protected]>
  25. *
  26. */
  27. #include <linux/dma-fence-array.h>
  28. #include <linux/kthread.h>
  29. #include <linux/dma-resv.h>
  30. #include <linux/shmem_fs.h>
  31. #include <linux/slab.h>
  32. #include <linux/stop_machine.h>
  33. #include <linux/swap.h>
  34. #include <linux/pci.h>
  35. #include <linux/dma-buf.h>
  36. #include <linux/mman.h>
  37. #include <drm/drm_cache.h>
  38. #include <drm/drm_vma_manager.h>
  39. #include "display/intel_display.h"
  40. #include "display/intel_frontbuffer.h"
  41. #include "gem/i915_gem_clflush.h"
  42. #include "gem/i915_gem_context.h"
  43. #include "gem/i915_gem_ioctls.h"
  44. #include "gem/i915_gem_mman.h"
  45. #include "gem/i915_gem_pm.h"
  46. #include "gem/i915_gem_region.h"
  47. #include "gem/i915_gem_userptr.h"
  48. #include "gt/intel_engine_user.h"
  49. #include "gt/intel_gt.h"
  50. #include "gt/intel_gt_pm.h"
  51. #include "gt/intel_workarounds.h"
  52. #include "i915_drv.h"
  53. #include "i915_file_private.h"
  54. #include "i915_trace.h"
  55. #include "i915_vgpu.h"
  56. #include "intel_pm.h"
  57. static int
  58. insert_mappable_node(struct i915_ggtt *ggtt, struct drm_mm_node *node, u32 size)
  59. {
  60. int err;
  61. err = mutex_lock_interruptible(&ggtt->vm.mutex);
  62. if (err)
  63. return err;
  64. memset(node, 0, sizeof(*node));
  65. err = drm_mm_insert_node_in_range(&ggtt->vm.mm, node,
  66. size, 0, I915_COLOR_UNEVICTABLE,
  67. 0, ggtt->mappable_end,
  68. DRM_MM_INSERT_LOW);
  69. mutex_unlock(&ggtt->vm.mutex);
  70. return err;
  71. }
  72. static void
  73. remove_mappable_node(struct i915_ggtt *ggtt, struct drm_mm_node *node)
  74. {
  75. mutex_lock(&ggtt->vm.mutex);
  76. drm_mm_remove_node(node);
  77. mutex_unlock(&ggtt->vm.mutex);
  78. }
  79. int
  80. i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
  81. struct drm_file *file)
  82. {
  83. struct drm_i915_private *i915 = to_i915(dev);
  84. struct i915_ggtt *ggtt = to_gt(i915)->ggtt;
  85. struct drm_i915_gem_get_aperture *args = data;
  86. struct i915_vma *vma;
  87. u64 pinned;
  88. if (mutex_lock_interruptible(&ggtt->vm.mutex))
  89. return -EINTR;
  90. pinned = ggtt->vm.reserved;
  91. list_for_each_entry(vma, &ggtt->vm.bound_list, vm_link)
  92. if (i915_vma_is_pinned(vma))
  93. pinned += vma->node.size;
  94. mutex_unlock(&ggtt->vm.mutex);
  95. args->aper_size = ggtt->vm.total;
  96. args->aper_available_size = args->aper_size - pinned;
  97. return 0;
  98. }
  99. int i915_gem_object_unbind(struct drm_i915_gem_object *obj,
  100. unsigned long flags)
  101. {
  102. struct intel_runtime_pm *rpm = &to_i915(obj->base.dev)->runtime_pm;
  103. bool vm_trylock = !!(flags & I915_GEM_OBJECT_UNBIND_VM_TRYLOCK);
  104. LIST_HEAD(still_in_list);
  105. intel_wakeref_t wakeref;
  106. struct i915_vma *vma;
  107. int ret;
  108. assert_object_held(obj);
  109. if (list_empty(&obj->vma.list))
  110. return 0;
  111. /*
  112. * As some machines use ACPI to handle runtime-resume callbacks, and
  113. * ACPI is quite kmalloc happy, we cannot resume beneath the vm->mutex
  114. * as they are required by the shrinker. Ergo, we wake the device up
  115. * first just in case.
  116. */
  117. wakeref = intel_runtime_pm_get(rpm);
  118. try_again:
  119. ret = 0;
  120. spin_lock(&obj->vma.lock);
  121. while (!ret && (vma = list_first_entry_or_null(&obj->vma.list,
  122. struct i915_vma,
  123. obj_link))) {
  124. list_move_tail(&vma->obj_link, &still_in_list);
  125. if (!i915_vma_is_bound(vma, I915_VMA_BIND_MASK))
  126. continue;
  127. if (flags & I915_GEM_OBJECT_UNBIND_TEST) {
  128. ret = -EBUSY;
  129. break;
  130. }
  131. /*
  132. * Requiring the vm destructor to take the object lock
  133. * before destroying a vma would help us eliminate the
  134. * i915_vm_tryget() here, AND thus also the barrier stuff
  135. * at the end. That's an easy fix, but sleeping locks in
  136. * a kthread should generally be avoided.
  137. */
  138. ret = -EAGAIN;
  139. if (!i915_vm_tryget(vma->vm))
  140. break;
  141. spin_unlock(&obj->vma.lock);
  142. /*
  143. * Since i915_vma_parked() takes the object lock
  144. * before vma destruction, it won't race us here,
  145. * and destroy the vma from under us.
  146. */
  147. ret = -EBUSY;
  148. if (flags & I915_GEM_OBJECT_UNBIND_ASYNC) {
  149. assert_object_held(vma->obj);
  150. ret = i915_vma_unbind_async(vma, vm_trylock);
  151. }
  152. if (ret == -EBUSY && (flags & I915_GEM_OBJECT_UNBIND_ACTIVE ||
  153. !i915_vma_is_active(vma))) {
  154. if (vm_trylock) {
  155. if (mutex_trylock(&vma->vm->mutex)) {
  156. ret = __i915_vma_unbind(vma);
  157. mutex_unlock(&vma->vm->mutex);
  158. }
  159. } else {
  160. ret = i915_vma_unbind(vma);
  161. }
  162. }
  163. i915_vm_put(vma->vm);
  164. spin_lock(&obj->vma.lock);
  165. }
  166. list_splice_init(&still_in_list, &obj->vma.list);
  167. spin_unlock(&obj->vma.lock);
  168. if (ret == -EAGAIN && flags & I915_GEM_OBJECT_UNBIND_BARRIER) {
  169. rcu_barrier(); /* flush the i915_vm_release() */
  170. goto try_again;
  171. }
  172. intel_runtime_pm_put(rpm, wakeref);
  173. return ret;
  174. }
  175. static int
  176. shmem_pread(struct page *page, int offset, int len, char __user *user_data,
  177. bool needs_clflush)
  178. {
  179. char *vaddr;
  180. int ret;
  181. vaddr = kmap(page);
  182. if (needs_clflush)
  183. drm_clflush_virt_range(vaddr + offset, len);
  184. ret = __copy_to_user(user_data, vaddr + offset, len);
  185. kunmap(page);
  186. return ret ? -EFAULT : 0;
  187. }
  188. static int
  189. i915_gem_shmem_pread(struct drm_i915_gem_object *obj,
  190. struct drm_i915_gem_pread *args)
  191. {
  192. unsigned int needs_clflush;
  193. unsigned int idx, offset;
  194. char __user *user_data;
  195. u64 remain;
  196. int ret;
  197. ret = i915_gem_object_lock_interruptible(obj, NULL);
  198. if (ret)
  199. return ret;
  200. ret = i915_gem_object_pin_pages(obj);
  201. if (ret)
  202. goto err_unlock;
  203. ret = i915_gem_object_prepare_read(obj, &needs_clflush);
  204. if (ret)
  205. goto err_unpin;
  206. i915_gem_object_finish_access(obj);
  207. i915_gem_object_unlock(obj);
  208. remain = args->size;
  209. user_data = u64_to_user_ptr(args->data_ptr);
  210. offset = offset_in_page(args->offset);
  211. for (idx = args->offset >> PAGE_SHIFT; remain; idx++) {
  212. struct page *page = i915_gem_object_get_page(obj, idx);
  213. unsigned int length = min_t(u64, remain, PAGE_SIZE - offset);
  214. ret = shmem_pread(page, offset, length, user_data,
  215. needs_clflush);
  216. if (ret)
  217. break;
  218. remain -= length;
  219. user_data += length;
  220. offset = 0;
  221. }
  222. i915_gem_object_unpin_pages(obj);
  223. return ret;
  224. err_unpin:
  225. i915_gem_object_unpin_pages(obj);
  226. err_unlock:
  227. i915_gem_object_unlock(obj);
  228. return ret;
  229. }
  230. static inline bool
  231. gtt_user_read(struct io_mapping *mapping,
  232. loff_t base, int offset,
  233. char __user *user_data, int length)
  234. {
  235. void __iomem *vaddr;
  236. unsigned long unwritten;
  237. /* We can use the cpu mem copy function because this is X86. */
  238. vaddr = io_mapping_map_atomic_wc(mapping, base);
  239. unwritten = __copy_to_user_inatomic(user_data,
  240. (void __force *)vaddr + offset,
  241. length);
  242. io_mapping_unmap_atomic(vaddr);
  243. if (unwritten) {
  244. vaddr = io_mapping_map_wc(mapping, base, PAGE_SIZE);
  245. unwritten = copy_to_user(user_data,
  246. (void __force *)vaddr + offset,
  247. length);
  248. io_mapping_unmap(vaddr);
  249. }
  250. return unwritten;
  251. }
  252. static struct i915_vma *i915_gem_gtt_prepare(struct drm_i915_gem_object *obj,
  253. struct drm_mm_node *node,
  254. bool write)
  255. {
  256. struct drm_i915_private *i915 = to_i915(obj->base.dev);
  257. struct i915_ggtt *ggtt = to_gt(i915)->ggtt;
  258. struct i915_vma *vma;
  259. struct i915_gem_ww_ctx ww;
  260. int ret;
  261. i915_gem_ww_ctx_init(&ww, true);
  262. retry:
  263. vma = ERR_PTR(-ENODEV);
  264. ret = i915_gem_object_lock(obj, &ww);
  265. if (ret)
  266. goto err_ww;
  267. ret = i915_gem_object_set_to_gtt_domain(obj, write);
  268. if (ret)
  269. goto err_ww;
  270. if (!i915_gem_object_is_tiled(obj))
  271. vma = i915_gem_object_ggtt_pin_ww(obj, &ww, NULL, 0, 0,
  272. PIN_MAPPABLE |
  273. PIN_NONBLOCK /* NOWARN */ |
  274. PIN_NOEVICT);
  275. if (vma == ERR_PTR(-EDEADLK)) {
  276. ret = -EDEADLK;
  277. goto err_ww;
  278. } else if (!IS_ERR(vma)) {
  279. node->start = i915_ggtt_offset(vma);
  280. node->flags = 0;
  281. } else {
  282. ret = insert_mappable_node(ggtt, node, PAGE_SIZE);
  283. if (ret)
  284. goto err_ww;
  285. GEM_BUG_ON(!drm_mm_node_allocated(node));
  286. vma = NULL;
  287. }
  288. ret = i915_gem_object_pin_pages(obj);
  289. if (ret) {
  290. if (drm_mm_node_allocated(node)) {
  291. ggtt->vm.clear_range(&ggtt->vm, node->start, node->size);
  292. remove_mappable_node(ggtt, node);
  293. } else {
  294. i915_vma_unpin(vma);
  295. }
  296. }
  297. err_ww:
  298. if (ret == -EDEADLK) {
  299. ret = i915_gem_ww_ctx_backoff(&ww);
  300. if (!ret)
  301. goto retry;
  302. }
  303. i915_gem_ww_ctx_fini(&ww);
  304. return ret ? ERR_PTR(ret) : vma;
  305. }
  306. static void i915_gem_gtt_cleanup(struct drm_i915_gem_object *obj,
  307. struct drm_mm_node *node,
  308. struct i915_vma *vma)
  309. {
  310. struct drm_i915_private *i915 = to_i915(obj->base.dev);
  311. struct i915_ggtt *ggtt = to_gt(i915)->ggtt;
  312. i915_gem_object_unpin_pages(obj);
  313. if (drm_mm_node_allocated(node)) {
  314. ggtt->vm.clear_range(&ggtt->vm, node->start, node->size);
  315. remove_mappable_node(ggtt, node);
  316. } else {
  317. i915_vma_unpin(vma);
  318. }
  319. }
  320. static int
  321. i915_gem_gtt_pread(struct drm_i915_gem_object *obj,
  322. const struct drm_i915_gem_pread *args)
  323. {
  324. struct drm_i915_private *i915 = to_i915(obj->base.dev);
  325. struct i915_ggtt *ggtt = to_gt(i915)->ggtt;
  326. intel_wakeref_t wakeref;
  327. struct drm_mm_node node;
  328. void __user *user_data;
  329. struct i915_vma *vma;
  330. u64 remain, offset;
  331. int ret = 0;
  332. wakeref = intel_runtime_pm_get(&i915->runtime_pm);
  333. vma = i915_gem_gtt_prepare(obj, &node, false);
  334. if (IS_ERR(vma)) {
  335. ret = PTR_ERR(vma);
  336. goto out_rpm;
  337. }
  338. user_data = u64_to_user_ptr(args->data_ptr);
  339. remain = args->size;
  340. offset = args->offset;
  341. while (remain > 0) {
  342. /* Operation in this page
  343. *
  344. * page_base = page offset within aperture
  345. * page_offset = offset within page
  346. * page_length = bytes to copy for this page
  347. */
  348. u32 page_base = node.start;
  349. unsigned page_offset = offset_in_page(offset);
  350. unsigned page_length = PAGE_SIZE - page_offset;
  351. page_length = remain < page_length ? remain : page_length;
  352. if (drm_mm_node_allocated(&node)) {
  353. ggtt->vm.insert_page(&ggtt->vm,
  354. i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT),
  355. node.start, I915_CACHE_NONE, 0);
  356. } else {
  357. page_base += offset & PAGE_MASK;
  358. }
  359. if (gtt_user_read(&ggtt->iomap, page_base, page_offset,
  360. user_data, page_length)) {
  361. ret = -EFAULT;
  362. break;
  363. }
  364. remain -= page_length;
  365. user_data += page_length;
  366. offset += page_length;
  367. }
  368. i915_gem_gtt_cleanup(obj, &node, vma);
  369. out_rpm:
  370. intel_runtime_pm_put(&i915->runtime_pm, wakeref);
  371. return ret;
  372. }
  373. /**
  374. * Reads data from the object referenced by handle.
  375. * @dev: drm device pointer
  376. * @data: ioctl data blob
  377. * @file: drm file pointer
  378. *
  379. * On error, the contents of *data are undefined.
  380. */
  381. int
  382. i915_gem_pread_ioctl(struct drm_device *dev, void *data,
  383. struct drm_file *file)
  384. {
  385. struct drm_i915_private *i915 = to_i915(dev);
  386. struct drm_i915_gem_pread *args = data;
  387. struct drm_i915_gem_object *obj;
  388. int ret;
  389. /* PREAD is disallowed for all platforms after TGL-LP. This also
  390. * covers all platforms with local memory.
  391. */
  392. if (GRAPHICS_VER(i915) >= 12 && !IS_TIGERLAKE(i915))
  393. return -EOPNOTSUPP;
  394. if (args->size == 0)
  395. return 0;
  396. if (!access_ok(u64_to_user_ptr(args->data_ptr),
  397. args->size))
  398. return -EFAULT;
  399. obj = i915_gem_object_lookup(file, args->handle);
  400. if (!obj)
  401. return -ENOENT;
  402. /* Bounds check source. */
  403. if (range_overflows_t(u64, args->offset, args->size, obj->base.size)) {
  404. ret = -EINVAL;
  405. goto out;
  406. }
  407. trace_i915_gem_object_pread(obj, args->offset, args->size);
  408. ret = -ENODEV;
  409. if (obj->ops->pread)
  410. ret = obj->ops->pread(obj, args);
  411. if (ret != -ENODEV)
  412. goto out;
  413. ret = i915_gem_object_wait(obj,
  414. I915_WAIT_INTERRUPTIBLE,
  415. MAX_SCHEDULE_TIMEOUT);
  416. if (ret)
  417. goto out;
  418. ret = i915_gem_shmem_pread(obj, args);
  419. if (ret == -EFAULT || ret == -ENODEV)
  420. ret = i915_gem_gtt_pread(obj, args);
  421. out:
  422. i915_gem_object_put(obj);
  423. return ret;
  424. }
  425. /* This is the fast write path which cannot handle
  426. * page faults in the source data
  427. */
  428. static inline bool
  429. ggtt_write(struct io_mapping *mapping,
  430. loff_t base, int offset,
  431. char __user *user_data, int length)
  432. {
  433. void __iomem *vaddr;
  434. unsigned long unwritten;
  435. /* We can use the cpu mem copy function because this is X86. */
  436. vaddr = io_mapping_map_atomic_wc(mapping, base);
  437. unwritten = __copy_from_user_inatomic_nocache((void __force *)vaddr + offset,
  438. user_data, length);
  439. io_mapping_unmap_atomic(vaddr);
  440. if (unwritten) {
  441. vaddr = io_mapping_map_wc(mapping, base, PAGE_SIZE);
  442. unwritten = copy_from_user((void __force *)vaddr + offset,
  443. user_data, length);
  444. io_mapping_unmap(vaddr);
  445. }
  446. return unwritten;
  447. }
  448. /**
  449. * This is the fast pwrite path, where we copy the data directly from the
  450. * user into the GTT, uncached.
  451. * @obj: i915 GEM object
  452. * @args: pwrite arguments structure
  453. */
  454. static int
  455. i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj,
  456. const struct drm_i915_gem_pwrite *args)
  457. {
  458. struct drm_i915_private *i915 = to_i915(obj->base.dev);
  459. struct i915_ggtt *ggtt = to_gt(i915)->ggtt;
  460. struct intel_runtime_pm *rpm = &i915->runtime_pm;
  461. intel_wakeref_t wakeref;
  462. struct drm_mm_node node;
  463. struct i915_vma *vma;
  464. u64 remain, offset;
  465. void __user *user_data;
  466. int ret = 0;
  467. if (i915_gem_object_has_struct_page(obj)) {
  468. /*
  469. * Avoid waking the device up if we can fallback, as
  470. * waking/resuming is very slow (worst-case 10-100 ms
  471. * depending on PCI sleeps and our own resume time).
  472. * This easily dwarfs any performance advantage from
  473. * using the cache bypass of indirect GGTT access.
  474. */
  475. wakeref = intel_runtime_pm_get_if_in_use(rpm);
  476. if (!wakeref)
  477. return -EFAULT;
  478. } else {
  479. /* No backing pages, no fallback, we must force GGTT access */
  480. wakeref = intel_runtime_pm_get(rpm);
  481. }
  482. vma = i915_gem_gtt_prepare(obj, &node, true);
  483. if (IS_ERR(vma)) {
  484. ret = PTR_ERR(vma);
  485. goto out_rpm;
  486. }
  487. i915_gem_object_invalidate_frontbuffer(obj, ORIGIN_CPU);
  488. user_data = u64_to_user_ptr(args->data_ptr);
  489. offset = args->offset;
  490. remain = args->size;
  491. while (remain) {
  492. /* Operation in this page
  493. *
  494. * page_base = page offset within aperture
  495. * page_offset = offset within page
  496. * page_length = bytes to copy for this page
  497. */
  498. u32 page_base = node.start;
  499. unsigned int page_offset = offset_in_page(offset);
  500. unsigned int page_length = PAGE_SIZE - page_offset;
  501. page_length = remain < page_length ? remain : page_length;
  502. if (drm_mm_node_allocated(&node)) {
  503. /* flush the write before we modify the GGTT */
  504. intel_gt_flush_ggtt_writes(ggtt->vm.gt);
  505. ggtt->vm.insert_page(&ggtt->vm,
  506. i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT),
  507. node.start, I915_CACHE_NONE, 0);
  508. wmb(); /* flush modifications to the GGTT (insert_page) */
  509. } else {
  510. page_base += offset & PAGE_MASK;
  511. }
  512. /* If we get a fault while copying data, then (presumably) our
  513. * source page isn't available. Return the error and we'll
  514. * retry in the slow path.
  515. * If the object is non-shmem backed, we retry again with the
  516. * path that handles page fault.
  517. */
  518. if (ggtt_write(&ggtt->iomap, page_base, page_offset,
  519. user_data, page_length)) {
  520. ret = -EFAULT;
  521. break;
  522. }
  523. remain -= page_length;
  524. user_data += page_length;
  525. offset += page_length;
  526. }
  527. intel_gt_flush_ggtt_writes(ggtt->vm.gt);
  528. i915_gem_object_flush_frontbuffer(obj, ORIGIN_CPU);
  529. i915_gem_gtt_cleanup(obj, &node, vma);
  530. out_rpm:
  531. intel_runtime_pm_put(rpm, wakeref);
  532. return ret;
  533. }
  534. /* Per-page copy function for the shmem pwrite fastpath.
  535. * Flushes invalid cachelines before writing to the target if
  536. * needs_clflush_before is set and flushes out any written cachelines after
  537. * writing if needs_clflush is set.
  538. */
  539. static int
  540. shmem_pwrite(struct page *page, int offset, int len, char __user *user_data,
  541. bool needs_clflush_before,
  542. bool needs_clflush_after)
  543. {
  544. char *vaddr;
  545. int ret;
  546. vaddr = kmap(page);
  547. if (needs_clflush_before)
  548. drm_clflush_virt_range(vaddr + offset, len);
  549. ret = __copy_from_user(vaddr + offset, user_data, len);
  550. if (!ret && needs_clflush_after)
  551. drm_clflush_virt_range(vaddr + offset, len);
  552. kunmap(page);
  553. return ret ? -EFAULT : 0;
  554. }
  555. static int
  556. i915_gem_shmem_pwrite(struct drm_i915_gem_object *obj,
  557. const struct drm_i915_gem_pwrite *args)
  558. {
  559. unsigned int partial_cacheline_write;
  560. unsigned int needs_clflush;
  561. unsigned int offset, idx;
  562. void __user *user_data;
  563. u64 remain;
  564. int ret;
  565. ret = i915_gem_object_lock_interruptible(obj, NULL);
  566. if (ret)
  567. return ret;
  568. ret = i915_gem_object_pin_pages(obj);
  569. if (ret)
  570. goto err_unlock;
  571. ret = i915_gem_object_prepare_write(obj, &needs_clflush);
  572. if (ret)
  573. goto err_unpin;
  574. i915_gem_object_finish_access(obj);
  575. i915_gem_object_unlock(obj);
  576. /* If we don't overwrite a cacheline completely we need to be
  577. * careful to have up-to-date data by first clflushing. Don't
  578. * overcomplicate things and flush the entire patch.
  579. */
  580. partial_cacheline_write = 0;
  581. if (needs_clflush & CLFLUSH_BEFORE)
  582. partial_cacheline_write = boot_cpu_data.x86_clflush_size - 1;
  583. user_data = u64_to_user_ptr(args->data_ptr);
  584. remain = args->size;
  585. offset = offset_in_page(args->offset);
  586. for (idx = args->offset >> PAGE_SHIFT; remain; idx++) {
  587. struct page *page = i915_gem_object_get_page(obj, idx);
  588. unsigned int length = min_t(u64, remain, PAGE_SIZE - offset);
  589. ret = shmem_pwrite(page, offset, length, user_data,
  590. (offset | length) & partial_cacheline_write,
  591. needs_clflush & CLFLUSH_AFTER);
  592. if (ret)
  593. break;
  594. remain -= length;
  595. user_data += length;
  596. offset = 0;
  597. }
  598. i915_gem_object_flush_frontbuffer(obj, ORIGIN_CPU);
  599. i915_gem_object_unpin_pages(obj);
  600. return ret;
  601. err_unpin:
  602. i915_gem_object_unpin_pages(obj);
  603. err_unlock:
  604. i915_gem_object_unlock(obj);
  605. return ret;
  606. }
  607. /**
  608. * Writes data to the object referenced by handle.
  609. * @dev: drm device
  610. * @data: ioctl data blob
  611. * @file: drm file
  612. *
  613. * On error, the contents of the buffer that were to be modified are undefined.
  614. */
  615. int
  616. i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
  617. struct drm_file *file)
  618. {
  619. struct drm_i915_private *i915 = to_i915(dev);
  620. struct drm_i915_gem_pwrite *args = data;
  621. struct drm_i915_gem_object *obj;
  622. int ret;
  623. /* PWRITE is disallowed for all platforms after TGL-LP. This also
  624. * covers all platforms with local memory.
  625. */
  626. if (GRAPHICS_VER(i915) >= 12 && !IS_TIGERLAKE(i915))
  627. return -EOPNOTSUPP;
  628. if (args->size == 0)
  629. return 0;
  630. if (!access_ok(u64_to_user_ptr(args->data_ptr), args->size))
  631. return -EFAULT;
  632. obj = i915_gem_object_lookup(file, args->handle);
  633. if (!obj)
  634. return -ENOENT;
  635. /* Bounds check destination. */
  636. if (range_overflows_t(u64, args->offset, args->size, obj->base.size)) {
  637. ret = -EINVAL;
  638. goto err;
  639. }
  640. /* Writes not allowed into this read-only object */
  641. if (i915_gem_object_is_readonly(obj)) {
  642. ret = -EINVAL;
  643. goto err;
  644. }
  645. trace_i915_gem_object_pwrite(obj, args->offset, args->size);
  646. ret = -ENODEV;
  647. if (obj->ops->pwrite)
  648. ret = obj->ops->pwrite(obj, args);
  649. if (ret != -ENODEV)
  650. goto err;
  651. ret = i915_gem_object_wait(obj,
  652. I915_WAIT_INTERRUPTIBLE |
  653. I915_WAIT_ALL,
  654. MAX_SCHEDULE_TIMEOUT);
  655. if (ret)
  656. goto err;
  657. ret = -EFAULT;
  658. /* We can only do the GTT pwrite on untiled buffers, as otherwise
  659. * it would end up going through the fenced access, and we'll get
  660. * different detiling behavior between reading and writing.
  661. * pread/pwrite currently are reading and writing from the CPU
  662. * perspective, requiring manual detiling by the client.
  663. */
  664. if (!i915_gem_object_has_struct_page(obj) ||
  665. i915_gem_cpu_write_needs_clflush(obj))
  666. /* Note that the gtt paths might fail with non-page-backed user
  667. * pointers (e.g. gtt mappings when moving data between
  668. * textures). Fallback to the shmem path in that case.
  669. */
  670. ret = i915_gem_gtt_pwrite_fast(obj, args);
  671. if (ret == -EFAULT || ret == -ENOSPC) {
  672. if (i915_gem_object_has_struct_page(obj))
  673. ret = i915_gem_shmem_pwrite(obj, args);
  674. }
  675. err:
  676. i915_gem_object_put(obj);
  677. return ret;
  678. }
  679. /**
  680. * Called when user space has done writes to this buffer
  681. * @dev: drm device
  682. * @data: ioctl data blob
  683. * @file: drm file
  684. */
  685. int
  686. i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
  687. struct drm_file *file)
  688. {
  689. struct drm_i915_gem_sw_finish *args = data;
  690. struct drm_i915_gem_object *obj;
  691. obj = i915_gem_object_lookup(file, args->handle);
  692. if (!obj)
  693. return -ENOENT;
  694. /*
  695. * Proxy objects are barred from CPU access, so there is no
  696. * need to ban sw_finish as it is a nop.
  697. */
  698. /* Pinned buffers may be scanout, so flush the cache */
  699. i915_gem_object_flush_if_display(obj);
  700. i915_gem_object_put(obj);
  701. return 0;
  702. }
  703. void i915_gem_runtime_suspend(struct drm_i915_private *i915)
  704. {
  705. struct drm_i915_gem_object *obj, *on;
  706. int i;
  707. /*
  708. * Only called during RPM suspend. All users of the userfault_list
  709. * must be holding an RPM wakeref to ensure that this can not
  710. * run concurrently with themselves (and use the struct_mutex for
  711. * protection between themselves).
  712. */
  713. list_for_each_entry_safe(obj, on,
  714. &to_gt(i915)->ggtt->userfault_list, userfault_link)
  715. __i915_gem_object_release_mmap_gtt(obj);
  716. list_for_each_entry_safe(obj, on,
  717. &i915->runtime_pm.lmem_userfault_list, userfault_link)
  718. i915_gem_object_runtime_pm_release_mmap_offset(obj);
  719. /*
  720. * The fence will be lost when the device powers down. If any were
  721. * in use by hardware (i.e. they are pinned), we should not be powering
  722. * down! All other fences will be reacquired by the user upon waking.
  723. */
  724. for (i = 0; i < to_gt(i915)->ggtt->num_fences; i++) {
  725. struct i915_fence_reg *reg = &to_gt(i915)->ggtt->fence_regs[i];
  726. /*
  727. * Ideally we want to assert that the fence register is not
  728. * live at this point (i.e. that no piece of code will be
  729. * trying to write through fence + GTT, as that both violates
  730. * our tracking of activity and associated locking/barriers,
  731. * but also is illegal given that the hw is powered down).
  732. *
  733. * Previously we used reg->pin_count as a "liveness" indicator.
  734. * That is not sufficient, and we need a more fine-grained
  735. * tool if we want to have a sanity check here.
  736. */
  737. if (!reg->vma)
  738. continue;
  739. GEM_BUG_ON(i915_vma_has_userfault(reg->vma));
  740. reg->dirty = true;
  741. }
  742. }
  743. static void discard_ggtt_vma(struct i915_vma *vma)
  744. {
  745. struct drm_i915_gem_object *obj = vma->obj;
  746. spin_lock(&obj->vma.lock);
  747. if (!RB_EMPTY_NODE(&vma->obj_node)) {
  748. rb_erase(&vma->obj_node, &obj->vma.tree);
  749. RB_CLEAR_NODE(&vma->obj_node);
  750. }
  751. spin_unlock(&obj->vma.lock);
  752. }
  753. struct i915_vma *
  754. i915_gem_object_ggtt_pin_ww(struct drm_i915_gem_object *obj,
  755. struct i915_gem_ww_ctx *ww,
  756. const struct i915_gtt_view *view,
  757. u64 size, u64 alignment, u64 flags)
  758. {
  759. struct drm_i915_private *i915 = to_i915(obj->base.dev);
  760. struct i915_ggtt *ggtt = to_gt(i915)->ggtt;
  761. struct i915_vma *vma;
  762. int ret;
  763. GEM_WARN_ON(!ww);
  764. if (flags & PIN_MAPPABLE &&
  765. (!view || view->type == I915_GTT_VIEW_NORMAL)) {
  766. /*
  767. * If the required space is larger than the available
  768. * aperture, we will not able to find a slot for the
  769. * object and unbinding the object now will be in
  770. * vain. Worse, doing so may cause us to ping-pong
  771. * the object in and out of the Global GTT and
  772. * waste a lot of cycles under the mutex.
  773. */
  774. if (obj->base.size > ggtt->mappable_end)
  775. return ERR_PTR(-E2BIG);
  776. /*
  777. * If NONBLOCK is set the caller is optimistically
  778. * trying to cache the full object within the mappable
  779. * aperture, and *must* have a fallback in place for
  780. * situations where we cannot bind the object. We
  781. * can be a little more lax here and use the fallback
  782. * more often to avoid costly migrations of ourselves
  783. * and other objects within the aperture.
  784. *
  785. * Half-the-aperture is used as a simple heuristic.
  786. * More interesting would to do search for a free
  787. * block prior to making the commitment to unbind.
  788. * That caters for the self-harm case, and with a
  789. * little more heuristics (e.g. NOFAULT, NOEVICT)
  790. * we could try to minimise harm to others.
  791. */
  792. if (flags & PIN_NONBLOCK &&
  793. obj->base.size > ggtt->mappable_end / 2)
  794. return ERR_PTR(-ENOSPC);
  795. }
  796. new_vma:
  797. vma = i915_vma_instance(obj, &ggtt->vm, view);
  798. if (IS_ERR(vma))
  799. return vma;
  800. if (i915_vma_misplaced(vma, size, alignment, flags)) {
  801. if (flags & PIN_NONBLOCK) {
  802. if (i915_vma_is_pinned(vma) || i915_vma_is_active(vma))
  803. return ERR_PTR(-ENOSPC);
  804. /*
  805. * If this misplaced vma is too big (i.e, at-least
  806. * half the size of aperture) or hasn't been pinned
  807. * mappable before, we ignore the misplacement when
  808. * PIN_NONBLOCK is set in order to avoid the ping-pong
  809. * issue described above. In other words, we try to
  810. * avoid the costly operation of unbinding this vma
  811. * from the GGTT and rebinding it back because there
  812. * may not be enough space for this vma in the aperture.
  813. */
  814. if (flags & PIN_MAPPABLE &&
  815. (vma->fence_size > ggtt->mappable_end / 2 ||
  816. !i915_vma_is_map_and_fenceable(vma)))
  817. return ERR_PTR(-ENOSPC);
  818. }
  819. if (i915_vma_is_pinned(vma) || i915_vma_is_active(vma)) {
  820. discard_ggtt_vma(vma);
  821. goto new_vma;
  822. }
  823. ret = i915_vma_unbind(vma);
  824. if (ret)
  825. return ERR_PTR(ret);
  826. }
  827. ret = i915_vma_pin_ww(vma, ww, size, alignment, flags | PIN_GLOBAL);
  828. if (ret)
  829. return ERR_PTR(ret);
  830. if (vma->fence && !i915_gem_object_is_tiled(obj)) {
  831. mutex_lock(&ggtt->vm.mutex);
  832. i915_vma_revoke_fence(vma);
  833. mutex_unlock(&ggtt->vm.mutex);
  834. }
  835. ret = i915_vma_wait_for_bind(vma);
  836. if (ret) {
  837. i915_vma_unpin(vma);
  838. return ERR_PTR(ret);
  839. }
  840. return vma;
  841. }
  842. struct i915_vma * __must_check
  843. i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
  844. const struct i915_gtt_view *view,
  845. u64 size, u64 alignment, u64 flags)
  846. {
  847. struct i915_gem_ww_ctx ww;
  848. struct i915_vma *ret;
  849. int err;
  850. for_i915_gem_ww(&ww, err, true) {
  851. err = i915_gem_object_lock(obj, &ww);
  852. if (err)
  853. continue;
  854. ret = i915_gem_object_ggtt_pin_ww(obj, &ww, view, size,
  855. alignment, flags);
  856. if (IS_ERR(ret))
  857. err = PTR_ERR(ret);
  858. }
  859. return err ? ERR_PTR(err) : ret;
  860. }
  861. int
  862. i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
  863. struct drm_file *file_priv)
  864. {
  865. struct drm_i915_private *i915 = to_i915(dev);
  866. struct drm_i915_gem_madvise *args = data;
  867. struct drm_i915_gem_object *obj;
  868. int err;
  869. switch (args->madv) {
  870. case I915_MADV_DONTNEED:
  871. case I915_MADV_WILLNEED:
  872. break;
  873. default:
  874. return -EINVAL;
  875. }
  876. obj = i915_gem_object_lookup(file_priv, args->handle);
  877. if (!obj)
  878. return -ENOENT;
  879. err = i915_gem_object_lock_interruptible(obj, NULL);
  880. if (err)
  881. goto out;
  882. if (i915_gem_object_has_pages(obj) &&
  883. i915_gem_object_is_tiled(obj) &&
  884. i915->gem_quirks & GEM_QUIRK_PIN_SWIZZLED_PAGES) {
  885. if (obj->mm.madv == I915_MADV_WILLNEED) {
  886. GEM_BUG_ON(!i915_gem_object_has_tiling_quirk(obj));
  887. i915_gem_object_clear_tiling_quirk(obj);
  888. i915_gem_object_make_shrinkable(obj);
  889. }
  890. if (args->madv == I915_MADV_WILLNEED) {
  891. GEM_BUG_ON(i915_gem_object_has_tiling_quirk(obj));
  892. i915_gem_object_make_unshrinkable(obj);
  893. i915_gem_object_set_tiling_quirk(obj);
  894. }
  895. }
  896. if (obj->mm.madv != __I915_MADV_PURGED) {
  897. obj->mm.madv = args->madv;
  898. if (obj->ops->adjust_lru)
  899. obj->ops->adjust_lru(obj);
  900. }
  901. if (i915_gem_object_has_pages(obj) ||
  902. i915_gem_object_has_self_managed_shrink_list(obj)) {
  903. unsigned long flags;
  904. spin_lock_irqsave(&i915->mm.obj_lock, flags);
  905. if (!list_empty(&obj->mm.link)) {
  906. struct list_head *list;
  907. if (obj->mm.madv != I915_MADV_WILLNEED)
  908. list = &i915->mm.purge_list;
  909. else
  910. list = &i915->mm.shrink_list;
  911. list_move_tail(&obj->mm.link, list);
  912. }
  913. spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
  914. }
  915. /* if the object is no longer attached, discard its backing storage */
  916. if (obj->mm.madv == I915_MADV_DONTNEED &&
  917. !i915_gem_object_has_pages(obj))
  918. i915_gem_object_truncate(obj);
  919. args->retained = obj->mm.madv != __I915_MADV_PURGED;
  920. i915_gem_object_unlock(obj);
  921. out:
  922. i915_gem_object_put(obj);
  923. return err;
  924. }
  925. /*
  926. * A single pass should suffice to release all the freed objects (along most
  927. * call paths), but be a little more paranoid in that freeing the objects does
  928. * take a little amount of time, during which the rcu callbacks could have added
  929. * new objects into the freed list, and armed the work again.
  930. */
  931. void i915_gem_drain_freed_objects(struct drm_i915_private *i915)
  932. {
  933. while (atomic_read(&i915->mm.free_count)) {
  934. flush_work(&i915->mm.free_work);
  935. flush_delayed_work(&i915->bdev.wq);
  936. rcu_barrier();
  937. }
  938. }
  939. /*
  940. * Similar to objects above (see i915_gem_drain_freed-objects), in general we
  941. * have workers that are armed by RCU and then rearm themselves in their
  942. * callbacks. To be paranoid, we need to drain the workqueue a second time after
  943. * waiting for the RCU grace period so that we catch work queued via RCU from
  944. * the first pass. As neither drain_workqueue() nor flush_workqueue() report a
  945. * result, we make an assumption that we only don't require more than 3 passes
  946. * to catch all _recursive_ RCU delayed work.
  947. */
  948. void i915_gem_drain_workqueue(struct drm_i915_private *i915)
  949. {
  950. int i;
  951. for (i = 0; i < 3; i++) {
  952. flush_workqueue(i915->wq);
  953. rcu_barrier();
  954. i915_gem_drain_freed_objects(i915);
  955. }
  956. drain_workqueue(i915->wq);
  957. }
  958. int i915_gem_init(struct drm_i915_private *dev_priv)
  959. {
  960. struct intel_gt *gt;
  961. unsigned int i;
  962. int ret;
  963. /* We need to fallback to 4K pages if host doesn't support huge gtt. */
  964. if (intel_vgpu_active(dev_priv) && !intel_vgpu_has_huge_gtt(dev_priv))
  965. RUNTIME_INFO(dev_priv)->page_sizes = I915_GTT_PAGE_SIZE_4K;
  966. ret = i915_gem_init_userptr(dev_priv);
  967. if (ret)
  968. return ret;
  969. intel_uc_fetch_firmwares(&to_gt(dev_priv)->uc);
  970. intel_wopcm_init(&dev_priv->wopcm);
  971. ret = i915_init_ggtt(dev_priv);
  972. if (ret) {
  973. GEM_BUG_ON(ret == -EIO);
  974. goto err_unlock;
  975. }
  976. /*
  977. * Despite its name intel_init_clock_gating applies both display
  978. * clock gating workarounds; GT mmio workarounds and the occasional
  979. * GT power context workaround. Worse, sometimes it includes a context
  980. * register workaround which we need to apply before we record the
  981. * default HW state for all contexts.
  982. *
  983. * FIXME: break up the workarounds and apply them at the right time!
  984. */
  985. intel_init_clock_gating(dev_priv);
  986. for_each_gt(gt, dev_priv, i) {
  987. ret = intel_gt_init(gt);
  988. if (ret)
  989. goto err_unlock;
  990. }
  991. return 0;
  992. /*
  993. * Unwinding is complicated by that we want to handle -EIO to mean
  994. * disable GPU submission but keep KMS alive. We want to mark the
  995. * HW as irrevisibly wedged, but keep enough state around that the
  996. * driver doesn't explode during runtime.
  997. */
  998. err_unlock:
  999. i915_gem_drain_workqueue(dev_priv);
  1000. if (ret != -EIO) {
  1001. for_each_gt(gt, dev_priv, i) {
  1002. intel_gt_driver_remove(gt);
  1003. intel_gt_driver_release(gt);
  1004. intel_uc_cleanup_firmwares(&gt->uc);
  1005. }
  1006. }
  1007. if (ret == -EIO) {
  1008. /*
  1009. * Allow engines or uC initialisation to fail by marking the GPU
  1010. * as wedged. But we only want to do this when the GPU is angry,
  1011. * for all other failure, such as an allocation failure, bail.
  1012. */
  1013. for_each_gt(gt, dev_priv, i) {
  1014. if (!intel_gt_is_wedged(gt)) {
  1015. i915_probe_error(dev_priv,
  1016. "Failed to initialize GPU, declaring it wedged!\n");
  1017. intel_gt_set_wedged(gt);
  1018. }
  1019. }
  1020. /* Minimal basic recovery for KMS */
  1021. ret = i915_ggtt_enable_hw(dev_priv);
  1022. i915_ggtt_resume(to_gt(dev_priv)->ggtt);
  1023. intel_init_clock_gating(dev_priv);
  1024. }
  1025. i915_gem_drain_freed_objects(dev_priv);
  1026. return ret;
  1027. }
  1028. void i915_gem_driver_register(struct drm_i915_private *i915)
  1029. {
  1030. i915_gem_driver_register__shrinker(i915);
  1031. intel_engines_driver_register(i915);
  1032. }
  1033. void i915_gem_driver_unregister(struct drm_i915_private *i915)
  1034. {
  1035. i915_gem_driver_unregister__shrinker(i915);
  1036. }
  1037. void i915_gem_driver_remove(struct drm_i915_private *dev_priv)
  1038. {
  1039. struct intel_gt *gt;
  1040. unsigned int i;
  1041. i915_gem_suspend_late(dev_priv);
  1042. for_each_gt(gt, dev_priv, i)
  1043. intel_gt_driver_remove(gt);
  1044. dev_priv->uabi_engines = RB_ROOT;
  1045. /* Flush any outstanding unpin_work. */
  1046. i915_gem_drain_workqueue(dev_priv);
  1047. i915_gem_drain_freed_objects(dev_priv);
  1048. }
  1049. void i915_gem_driver_release(struct drm_i915_private *dev_priv)
  1050. {
  1051. struct intel_gt *gt;
  1052. unsigned int i;
  1053. for_each_gt(gt, dev_priv, i) {
  1054. intel_gt_driver_release(gt);
  1055. intel_uc_cleanup_firmwares(&gt->uc);
  1056. }
  1057. /* Flush any outstanding work, including i915_gem_context.release_work. */
  1058. i915_gem_drain_workqueue(dev_priv);
  1059. drm_WARN_ON(&dev_priv->drm, !list_empty(&dev_priv->gem.contexts.list));
  1060. }
  1061. static void i915_gem_init__mm(struct drm_i915_private *i915)
  1062. {
  1063. spin_lock_init(&i915->mm.obj_lock);
  1064. init_llist_head(&i915->mm.free_list);
  1065. INIT_LIST_HEAD(&i915->mm.purge_list);
  1066. INIT_LIST_HEAD(&i915->mm.shrink_list);
  1067. i915_gem_init__objects(i915);
  1068. }
  1069. void i915_gem_init_early(struct drm_i915_private *dev_priv)
  1070. {
  1071. i915_gem_init__mm(dev_priv);
  1072. i915_gem_init__contexts(dev_priv);
  1073. spin_lock_init(&dev_priv->display.fb_tracking.lock);
  1074. }
  1075. void i915_gem_cleanup_early(struct drm_i915_private *dev_priv)
  1076. {
  1077. i915_gem_drain_freed_objects(dev_priv);
  1078. GEM_BUG_ON(!llist_empty(&dev_priv->mm.free_list));
  1079. GEM_BUG_ON(atomic_read(&dev_priv->mm.free_count));
  1080. drm_WARN_ON(&dev_priv->drm, dev_priv->mm.shrink_count);
  1081. }
  1082. int i915_gem_open(struct drm_i915_private *i915, struct drm_file *file)
  1083. {
  1084. struct drm_i915_file_private *file_priv;
  1085. struct i915_drm_client *client;
  1086. int ret = -ENOMEM;
  1087. DRM_DEBUG("\n");
  1088. file_priv = kzalloc(sizeof(*file_priv), GFP_KERNEL);
  1089. if (!file_priv)
  1090. goto err_alloc;
  1091. client = i915_drm_client_add(&i915->clients);
  1092. if (IS_ERR(client)) {
  1093. ret = PTR_ERR(client);
  1094. goto err_client;
  1095. }
  1096. file->driver_priv = file_priv;
  1097. file_priv->dev_priv = i915;
  1098. file_priv->file = file;
  1099. file_priv->client = client;
  1100. file_priv->bsd_engine = -1;
  1101. file_priv->hang_timestamp = jiffies;
  1102. ret = i915_gem_context_open(i915, file);
  1103. if (ret)
  1104. goto err_context;
  1105. return 0;
  1106. err_context:
  1107. i915_drm_client_put(client);
  1108. err_client:
  1109. kfree(file_priv);
  1110. err_alloc:
  1111. return ret;
  1112. }
  1113. #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
  1114. #include "selftests/mock_gem_device.c"
  1115. #include "selftests/i915_gem.c"
  1116. #endif