i915_vm_bind.h 8.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291
  1. /* SPDX-License-Identifier: MIT */
  2. /*
  3. * Copyright © 2022 Intel Corporation
  4. */
  5. /**
  6. * DOC: I915_PARAM_VM_BIND_VERSION
  7. *
  8. * VM_BIND feature version supported.
  9. * See typedef drm_i915_getparam_t param.
  10. *
  11. * Specifies the VM_BIND feature version supported.
  12. * The following versions of VM_BIND have been defined:
  13. *
  14. * 0: No VM_BIND support.
  15. *
  16. * 1: In VM_UNBIND calls, the UMD must specify the exact mappings created
  17. * previously with VM_BIND, the ioctl will not support unbinding multiple
  18. * mappings or splitting them. Similarly, VM_BIND calls will not replace
  19. * any existing mappings.
  20. *
  21. * 2: The restrictions on unbinding partial or multiple mappings is
  22. * lifted, Similarly, binding will replace any mappings in the given range.
  23. *
  24. * See struct drm_i915_gem_vm_bind and struct drm_i915_gem_vm_unbind.
  25. */
  26. #define I915_PARAM_VM_BIND_VERSION 57
  27. /**
  28. * DOC: I915_VM_CREATE_FLAGS_USE_VM_BIND
  29. *
  30. * Flag to opt-in for VM_BIND mode of binding during VM creation.
  31. * See struct drm_i915_gem_vm_control flags.
  32. *
  33. * The older execbuf2 ioctl will not support VM_BIND mode of operation.
  34. * For VM_BIND mode, we have new execbuf3 ioctl which will not accept any
  35. * execlist (See struct drm_i915_gem_execbuffer3 for more details).
  36. */
  37. #define I915_VM_CREATE_FLAGS_USE_VM_BIND (1 << 0)
  38. /* VM_BIND related ioctls */
  39. #define DRM_I915_GEM_VM_BIND 0x3d
  40. #define DRM_I915_GEM_VM_UNBIND 0x3e
  41. #define DRM_I915_GEM_EXECBUFFER3 0x3f
  42. #define DRM_IOCTL_I915_GEM_VM_BIND DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_VM_BIND, struct drm_i915_gem_vm_bind)
  43. #define DRM_IOCTL_I915_GEM_VM_UNBIND DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_VM_UNBIND, struct drm_i915_gem_vm_bind)
  44. #define DRM_IOCTL_I915_GEM_EXECBUFFER3 DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_EXECBUFFER3, struct drm_i915_gem_execbuffer3)
  45. /**
  46. * struct drm_i915_gem_timeline_fence - An input or output timeline fence.
  47. *
  48. * The operation will wait for input fence to signal.
  49. *
  50. * The returned output fence will be signaled after the completion of the
  51. * operation.
  52. */
  53. struct drm_i915_gem_timeline_fence {
  54. /** @handle: User's handle for a drm_syncobj to wait on or signal. */
  55. __u32 handle;
  56. /**
  57. * @flags: Supported flags are:
  58. *
  59. * I915_TIMELINE_FENCE_WAIT:
  60. * Wait for the input fence before the operation.
  61. *
  62. * I915_TIMELINE_FENCE_SIGNAL:
  63. * Return operation completion fence as output.
  64. */
  65. __u32 flags;
  66. #define I915_TIMELINE_FENCE_WAIT (1 << 0)
  67. #define I915_TIMELINE_FENCE_SIGNAL (1 << 1)
  68. #define __I915_TIMELINE_FENCE_UNKNOWN_FLAGS (-(I915_TIMELINE_FENCE_SIGNAL << 1))
  69. /**
  70. * @value: A point in the timeline.
  71. * Value must be 0 for a binary drm_syncobj. A Value of 0 for a
  72. * timeline drm_syncobj is invalid as it turns a drm_syncobj into a
  73. * binary one.
  74. */
  75. __u64 value;
  76. };
  77. /**
  78. * struct drm_i915_gem_vm_bind - VA to object mapping to bind.
  79. *
  80. * This structure is passed to VM_BIND ioctl and specifies the mapping of GPU
  81. * virtual address (VA) range to the section of an object that should be bound
  82. * in the device page table of the specified address space (VM).
  83. * The VA range specified must be unique (ie., not currently bound) and can
  84. * be mapped to whole object or a section of the object (partial binding).
  85. * Multiple VA mappings can be created to the same section of the object
  86. * (aliasing).
  87. *
  88. * The @start, @offset and @length must be 4K page aligned. However the DG2
  89. * and XEHPSDV has 64K page size for device local memory and has compact page
  90. * table. On those platforms, for binding device local-memory objects, the
  91. * @start, @offset and @length must be 64K aligned. Also, UMDs should not mix
  92. * the local memory 64K page and the system memory 4K page bindings in the same
  93. * 2M range.
  94. *
  95. * Error code -EINVAL will be returned if @start, @offset and @length are not
  96. * properly aligned. In version 1 (See I915_PARAM_VM_BIND_VERSION), error code
  97. * -ENOSPC will be returned if the VA range specified can't be reserved.
  98. *
  99. * VM_BIND/UNBIND ioctl calls executed on different CPU threads concurrently
  100. * are not ordered. Furthermore, parts of the VM_BIND operation can be done
  101. * asynchronously, if valid @fence is specified.
  102. */
  103. struct drm_i915_gem_vm_bind {
  104. /** @vm_id: VM (address space) id to bind */
  105. __u32 vm_id;
  106. /** @handle: Object handle */
  107. __u32 handle;
  108. /** @start: Virtual Address start to bind */
  109. __u64 start;
  110. /** @offset: Offset in object to bind */
  111. __u64 offset;
  112. /** @length: Length of mapping to bind */
  113. __u64 length;
  114. /**
  115. * @flags: Supported flags are:
  116. *
  117. * I915_GEM_VM_BIND_CAPTURE:
  118. * Capture this mapping in the dump upon GPU error.
  119. *
  120. * Note that @fence carries its own flags.
  121. */
  122. __u64 flags;
  123. #define I915_GEM_VM_BIND_CAPTURE (1 << 0)
  124. /**
  125. * @fence: Timeline fence for bind completion signaling.
  126. *
  127. * Timeline fence is of format struct drm_i915_gem_timeline_fence.
  128. *
  129. * It is an out fence, hence using I915_TIMELINE_FENCE_WAIT flag
  130. * is invalid, and an error will be returned.
  131. *
  132. * If I915_TIMELINE_FENCE_SIGNAL flag is not set, then out fence
  133. * is not requested and binding is completed synchronously.
  134. */
  135. struct drm_i915_gem_timeline_fence fence;
  136. /**
  137. * @extensions: Zero-terminated chain of extensions.
  138. *
  139. * For future extensions. See struct i915_user_extension.
  140. */
  141. __u64 extensions;
  142. };
  143. /**
  144. * struct drm_i915_gem_vm_unbind - VA to object mapping to unbind.
  145. *
  146. * This structure is passed to VM_UNBIND ioctl and specifies the GPU virtual
  147. * address (VA) range that should be unbound from the device page table of the
  148. * specified address space (VM). VM_UNBIND will force unbind the specified
  149. * range from device page table without waiting for any GPU job to complete.
  150. * It is UMDs responsibility to ensure the mapping is no longer in use before
  151. * calling VM_UNBIND.
  152. *
  153. * If the specified mapping is not found, the ioctl will simply return without
  154. * any error.
  155. *
  156. * VM_BIND/UNBIND ioctl calls executed on different CPU threads concurrently
  157. * are not ordered. Furthermore, parts of the VM_UNBIND operation can be done
  158. * asynchronously, if valid @fence is specified.
  159. */
  160. struct drm_i915_gem_vm_unbind {
  161. /** @vm_id: VM (address space) id to bind */
  162. __u32 vm_id;
  163. /** @rsvd: Reserved, MBZ */
  164. __u32 rsvd;
  165. /** @start: Virtual Address start to unbind */
  166. __u64 start;
  167. /** @length: Length of mapping to unbind */
  168. __u64 length;
  169. /**
  170. * @flags: Currently reserved, MBZ.
  171. *
  172. * Note that @fence carries its own flags.
  173. */
  174. __u64 flags;
  175. /**
  176. * @fence: Timeline fence for unbind completion signaling.
  177. *
  178. * Timeline fence is of format struct drm_i915_gem_timeline_fence.
  179. *
  180. * It is an out fence, hence using I915_TIMELINE_FENCE_WAIT flag
  181. * is invalid, and an error will be returned.
  182. *
  183. * If I915_TIMELINE_FENCE_SIGNAL flag is not set, then out fence
  184. * is not requested and unbinding is completed synchronously.
  185. */
  186. struct drm_i915_gem_timeline_fence fence;
  187. /**
  188. * @extensions: Zero-terminated chain of extensions.
  189. *
  190. * For future extensions. See struct i915_user_extension.
  191. */
  192. __u64 extensions;
  193. };
  194. /**
  195. * struct drm_i915_gem_execbuffer3 - Structure for DRM_I915_GEM_EXECBUFFER3
  196. * ioctl.
  197. *
  198. * DRM_I915_GEM_EXECBUFFER3 ioctl only works in VM_BIND mode and VM_BIND mode
  199. * only works with this ioctl for submission.
  200. * See I915_VM_CREATE_FLAGS_USE_VM_BIND.
  201. */
  202. struct drm_i915_gem_execbuffer3 {
  203. /**
  204. * @ctx_id: Context id
  205. *
  206. * Only contexts with user engine map are allowed.
  207. */
  208. __u32 ctx_id;
  209. /**
  210. * @engine_idx: Engine index
  211. *
  212. * An index in the user engine map of the context specified by @ctx_id.
  213. */
  214. __u32 engine_idx;
  215. /**
  216. * @batch_address: Batch gpu virtual address/es.
  217. *
  218. * For normal submission, it is the gpu virtual address of the batch
  219. * buffer. For parallel submission, it is a pointer to an array of
  220. * batch buffer gpu virtual addresses with array size equal to the
  221. * number of (parallel) engines involved in that submission (See
  222. * struct i915_context_engines_parallel_submit).
  223. */
  224. __u64 batch_address;
  225. /** @flags: Currently reserved, MBZ */
  226. __u64 flags;
  227. /** @rsvd1: Reserved, MBZ */
  228. __u32 rsvd1;
  229. /** @fence_count: Number of fences in @timeline_fences array. */
  230. __u32 fence_count;
  231. /**
  232. * @timeline_fences: Pointer to an array of timeline fences.
  233. *
  234. * Timeline fences are of format struct drm_i915_gem_timeline_fence.
  235. */
  236. __u64 timeline_fences;
  237. /** @rsvd2: Reserved, MBZ */
  238. __u64 rsvd2;
  239. /**
  240. * @extensions: Zero-terminated chain of extensions.
  241. *
  242. * For future extensions. See struct i915_user_extension.
  243. */
  244. __u64 extensions;
  245. };
  246. /**
  247. * struct drm_i915_gem_create_ext_vm_private - Extension to make the object
  248. * private to the specified VM.
  249. *
  250. * See struct drm_i915_gem_create_ext.
  251. */
  252. struct drm_i915_gem_create_ext_vm_private {
  253. #define I915_GEM_CREATE_EXT_VM_PRIVATE 2
  254. /** @base: Extension link. See struct i915_user_extension. */
  255. struct i915_user_extension base;
  256. /** @vm_id: Id of the VM to which the object is private */
  257. __u32 vm_id;
  258. };