ttm_bo_driver.h 9.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303
  1. /**************************************************************************
  2. *
  3. * Copyright (c) 2006-2009 Vmware, Inc., Palo Alto, CA., USA
  4. * All Rights Reserved.
  5. *
  6. * Permission is hereby granted, free of charge, to any person obtaining a
  7. * copy of this software and associated documentation files (the
  8. * "Software"), to deal in the Software without restriction, including
  9. * without limitation the rights to use, copy, modify, merge, publish,
  10. * distribute, sub license, and/or sell copies of the Software, and to
  11. * permit persons to whom the Software is furnished to do so, subject to
  12. * the following conditions:
  13. *
  14. * The above copyright notice and this permission notice (including the
  15. * next paragraph) shall be included in all copies or substantial portions
  16. * of the Software.
  17. *
  18. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  19. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  20. * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  21. * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  22. * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  23. * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  24. * USE OR OTHER DEALINGS IN THE SOFTWARE.
  25. *
  26. **************************************************************************/
  27. /*
  28. * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
  29. */
  30. #ifndef _TTM_BO_DRIVER_H_
  31. #define _TTM_BO_DRIVER_H_
  32. #include <drm/drm_mm.h>
  33. #include <drm/drm_vma_manager.h>
  34. #include <linux/workqueue.h>
  35. #include <linux/fs.h>
  36. #include <linux/spinlock.h>
  37. #include <linux/dma-resv.h>
  38. #include <drm/ttm/ttm_device.h>
  39. #include "ttm_bo_api.h"
  40. #include "ttm_kmap_iter.h"
  41. #include "ttm_placement.h"
  42. #include "ttm_tt.h"
  43. #include "ttm_pool.h"
  44. /*
  45. * ttm_bo.c
  46. */
  47. /**
  48. * ttm_bo_mem_space
  49. *
  50. * @bo: Pointer to a struct ttm_buffer_object. the data of which
  51. * we want to allocate space for.
  52. * @proposed_placement: Proposed new placement for the buffer object.
  53. * @mem: A struct ttm_resource.
  54. * @interruptible: Sleep interruptible when sliping.
  55. * @no_wait_gpu: Return immediately if the GPU is busy.
  56. *
  57. * Allocate memory space for the buffer object pointed to by @bo, using
  58. * the placement flags in @mem, potentially evicting other idle buffer objects.
  59. * This function may sleep while waiting for space to become available.
  60. * Returns:
  61. * -EBUSY: No space available (only if no_wait == 1).
  62. * -ENOMEM: Could not allocate memory for the buffer object, either due to
  63. * fragmentation or concurrent allocators.
  64. * -ERESTARTSYS: An interruptible sleep was interrupted by a signal.
  65. */
  66. int ttm_bo_mem_space(struct ttm_buffer_object *bo,
  67. struct ttm_placement *placement,
  68. struct ttm_resource **mem,
  69. struct ttm_operation_ctx *ctx);
  70. /**
  71. * ttm_bo_unmap_virtual
  72. *
  73. * @bo: tear down the virtual mappings for this BO
  74. */
  75. void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo);
  76. /**
  77. * ttm_bo_reserve:
  78. *
  79. * @bo: A pointer to a struct ttm_buffer_object.
  80. * @interruptible: Sleep interruptible if waiting.
  81. * @no_wait: Don't sleep while trying to reserve, rather return -EBUSY.
  82. * @ticket: ticket used to acquire the ww_mutex.
  83. *
  84. * Locks a buffer object for validation. (Or prevents other processes from
  85. * locking it for validation), while taking a number of measures to prevent
  86. * deadlocks.
  87. *
  88. * Returns:
  89. * -EDEADLK: The reservation may cause a deadlock.
  90. * Release all buffer reservations, wait for @bo to become unreserved and
  91. * try again.
  92. * -ERESTARTSYS: A wait for the buffer to become unreserved was interrupted by
  93. * a signal. Release all buffer reservations and return to user-space.
  94. * -EBUSY: The function needed to sleep, but @no_wait was true
  95. * -EALREADY: Bo already reserved using @ticket. This error code will only
  96. * be returned if @use_ticket is set to true.
  97. */
  98. static inline int ttm_bo_reserve(struct ttm_buffer_object *bo,
  99. bool interruptible, bool no_wait,
  100. struct ww_acquire_ctx *ticket)
  101. {
  102. int ret;
  103. if (no_wait) {
  104. bool success;
  105. if (WARN_ON(ticket))
  106. return -EBUSY;
  107. success = dma_resv_trylock(bo->base.resv);
  108. return success ? 0 : -EBUSY;
  109. }
  110. if (interruptible)
  111. ret = dma_resv_lock_interruptible(bo->base.resv, ticket);
  112. else
  113. ret = dma_resv_lock(bo->base.resv, ticket);
  114. if (ret == -EINTR)
  115. return -ERESTARTSYS;
  116. return ret;
  117. }
  118. /**
  119. * ttm_bo_reserve_slowpath:
  120. * @bo: A pointer to a struct ttm_buffer_object.
  121. * @interruptible: Sleep interruptible if waiting.
  122. * @sequence: Set (@bo)->sequence to this value after lock
  123. *
  124. * This is called after ttm_bo_reserve returns -EAGAIN and we backed off
  125. * from all our other reservations. Because there are no other reservations
  126. * held by us, this function cannot deadlock any more.
  127. */
  128. static inline int ttm_bo_reserve_slowpath(struct ttm_buffer_object *bo,
  129. bool interruptible,
  130. struct ww_acquire_ctx *ticket)
  131. {
  132. if (interruptible) {
  133. int ret = dma_resv_lock_slow_interruptible(bo->base.resv,
  134. ticket);
  135. if (ret == -EINTR)
  136. ret = -ERESTARTSYS;
  137. return ret;
  138. }
  139. dma_resv_lock_slow(bo->base.resv, ticket);
  140. return 0;
  141. }
  142. static inline void
  143. ttm_bo_move_to_lru_tail_unlocked(struct ttm_buffer_object *bo)
  144. {
  145. spin_lock(&bo->bdev->lru_lock);
  146. ttm_bo_move_to_lru_tail(bo);
  147. spin_unlock(&bo->bdev->lru_lock);
  148. }
  149. static inline void ttm_bo_assign_mem(struct ttm_buffer_object *bo,
  150. struct ttm_resource *new_mem)
  151. {
  152. WARN_ON(bo->resource);
  153. bo->resource = new_mem;
  154. }
  155. /**
  156. * ttm_bo_move_null = assign memory for a buffer object.
  157. * @bo: The bo to assign the memory to
  158. * @new_mem: The memory to be assigned.
  159. *
  160. * Assign the memory from new_mem to the memory of the buffer object bo.
  161. */
  162. static inline void ttm_bo_move_null(struct ttm_buffer_object *bo,
  163. struct ttm_resource *new_mem)
  164. {
  165. ttm_resource_free(bo, &bo->resource);
  166. ttm_bo_assign_mem(bo, new_mem);
  167. }
  168. /**
  169. * ttm_bo_unreserve
  170. *
  171. * @bo: A pointer to a struct ttm_buffer_object.
  172. *
  173. * Unreserve a previous reservation of @bo.
  174. */
  175. static inline void ttm_bo_unreserve(struct ttm_buffer_object *bo)
  176. {
  177. ttm_bo_move_to_lru_tail_unlocked(bo);
  178. dma_resv_unlock(bo->base.resv);
  179. }
  180. /*
  181. * ttm_bo_util.c
  182. */
  183. int ttm_mem_io_reserve(struct ttm_device *bdev,
  184. struct ttm_resource *mem);
  185. void ttm_mem_io_free(struct ttm_device *bdev,
  186. struct ttm_resource *mem);
  187. /**
  188. * ttm_bo_move_memcpy
  189. *
  190. * @bo: A pointer to a struct ttm_buffer_object.
  191. * @interruptible: Sleep interruptible if waiting.
  192. * @no_wait_gpu: Return immediately if the GPU is busy.
  193. * @new_mem: struct ttm_resource indicating where to move.
  194. *
  195. * Fallback move function for a mappable buffer object in mappable memory.
  196. * The function will, if successful,
  197. * free any old aperture space, and set (@new_mem)->mm_node to NULL,
  198. * and update the (@bo)->mem placement flags. If unsuccessful, the old
  199. * data remains untouched, and it's up to the caller to free the
  200. * memory space indicated by @new_mem.
  201. * Returns:
  202. * !0: Failure.
  203. */
  204. int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
  205. struct ttm_operation_ctx *ctx,
  206. struct ttm_resource *new_mem);
  207. /**
  208. * ttm_bo_move_accel_cleanup.
  209. *
  210. * @bo: A pointer to a struct ttm_buffer_object.
  211. * @fence: A fence object that signals when moving is complete.
  212. * @evict: This is an evict move. Don't return until the buffer is idle.
  213. * @pipeline: evictions are to be pipelined.
  214. * @new_mem: struct ttm_resource indicating where to move.
  215. *
  216. * Accelerated move function to be called when an accelerated move
  217. * has been scheduled. The function will create a new temporary buffer object
  218. * representing the old placement, and put the sync object on both buffer
  219. * objects. After that the newly created buffer object is unref'd to be
  220. * destroyed when the move is complete. This will help pipeline
  221. * buffer moves.
  222. */
  223. int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
  224. struct dma_fence *fence, bool evict,
  225. bool pipeline,
  226. struct ttm_resource *new_mem);
  227. /**
  228. * ttm_bo_move_sync_cleanup.
  229. *
  230. * @bo: A pointer to a struct ttm_buffer_object.
  231. * @new_mem: struct ttm_resource indicating where to move.
  232. *
  233. * Special case of ttm_bo_move_accel_cleanup where the bo is guaranteed
  234. * by the caller to be idle. Typically used after memcpy buffer moves.
  235. */
  236. void ttm_bo_move_sync_cleanup(struct ttm_buffer_object *bo,
  237. struct ttm_resource *new_mem);
  238. /**
  239. * ttm_bo_pipeline_gutting.
  240. *
  241. * @bo: A pointer to a struct ttm_buffer_object.
  242. *
  243. * Pipelined gutting a BO of its backing store.
  244. */
  245. int ttm_bo_pipeline_gutting(struct ttm_buffer_object *bo);
  246. /**
  247. * ttm_io_prot
  248. *
  249. * bo: ttm buffer object
  250. * res: ttm resource object
  251. * @tmp: Page protection flag for a normal, cached mapping.
  252. *
  253. * Utility function that returns the pgprot_t that should be used for
  254. * setting up a PTE with the caching model indicated by @c_state.
  255. */
  256. pgprot_t ttm_io_prot(struct ttm_buffer_object *bo, struct ttm_resource *res,
  257. pgprot_t tmp);
  258. /**
  259. * ttm_bo_tt_bind
  260. *
  261. * Bind the object tt to a memory resource.
  262. */
  263. int ttm_bo_tt_bind(struct ttm_buffer_object *bo, struct ttm_resource *mem);
  264. /**
  265. * ttm_bo_tt_destroy.
  266. */
  267. void ttm_bo_tt_destroy(struct ttm_buffer_object *bo);
  268. void ttm_move_memcpy(bool clear,
  269. u32 num_pages,
  270. struct ttm_kmap_iter *dst_iter,
  271. struct ttm_kmap_iter *src_iter);
  272. struct ttm_kmap_iter *
  273. ttm_kmap_iter_iomap_init(struct ttm_kmap_iter_iomap *iter_io,
  274. struct io_mapping *iomap,
  275. struct sg_table *st,
  276. resource_size_t start);
  277. #endif