msm_gem.h 7.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243
  1. /*
  2. * Copyright (C) 2013 Red Hat
  3. * Author: Rob Clark <[email protected]>
  4. *
  5. * This program is free software; you can redistribute it and/or modify it
  6. * under the terms of the GNU General Public License version 2 as published by
  7. * the Free Software Foundation.
  8. *
  9. * This program is distributed in the hope that it will be useful, but WITHOUT
  10. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  12. * more details.
  13. *
  14. * You should have received a copy of the GNU General Public License along with
  15. * this program. If not, see <http://www.gnu.org/licenses/>.
  16. */
  17. #ifndef __MSM_GEM_H__
  18. #define __MSM_GEM_H__
  19. #include <linux/kref.h>
  20. #include <linux/dma-resv.h>
  21. #include "msm_drv.h"
  22. /* Additional internal-use only BO flags: */
  23. #define MSM_BO_STOLEN 0x10000000 /* try to use stolen/splash memory */
  24. #define MSM_BO_KEEPATTRS 0x20000000 /* keep h/w bus attributes */
  25. #define MSM_BO_SKIPSYNC 0x40000000 /* skip dmabuf cpu sync */
  26. #define MSM_BO_EXTBUF 0x80000000 /* indicate BO is an import buffer */
  27. struct msm_gem_object;
  28. struct msm_gem_aspace_ops {
  29. int (*map)(struct msm_gem_address_space *space, struct msm_gem_vma *vma,
  30. struct sg_table *sgt, int npages, unsigned int flags);
  31. void (*unmap)(struct msm_gem_address_space *space,
  32. struct msm_gem_vma *vma, struct sg_table *sgt,
  33. unsigned int flags);
  34. void (*destroy)(struct msm_gem_address_space *space);
  35. void (*add_to_active)(struct msm_gem_address_space *space,
  36. struct msm_gem_object *obj);
  37. void (*remove_from_active)(struct msm_gem_address_space *space,
  38. struct msm_gem_object *obj);
  39. int (*register_cb)(struct msm_gem_address_space *space,
  40. void (*cb)(void *cb, bool data),
  41. void *cb_data);
  42. int (*unregister_cb)(struct msm_gem_address_space *space,
  43. void (*cb)(void *cb, bool data),
  44. void *cb_data);
  45. };
  46. struct aspace_client {
  47. void (*cb)(void *cb, bool data);
  48. void *cb_data;
  49. struct list_head list;
  50. };
  51. struct msm_gem_address_space {
  52. const char *name;
  53. /* NOTE: mm managed at the page level, size is in # of pages
  54. * and position mm_node->start is in # of pages:
  55. */
  56. struct drm_mm mm;
  57. spinlock_t lock; /* Protects drm_mm node allocation/removal */
  58. struct msm_mmu *mmu;
  59. struct kref kref;
  60. bool domain_attached;
  61. const struct msm_gem_aspace_ops *ops;
  62. struct drm_device *dev;
  63. /* list of mapped objects */
  64. struct list_head active_list;
  65. /* list of clients */
  66. struct list_head clients;
  67. struct mutex list_lock; /* Protects active_list & clients */
  68. };
  69. struct msm_gem_vma {
  70. struct drm_mm_node node;
  71. uint64_t iova;
  72. struct msm_gem_address_space *aspace;
  73. struct list_head list; /* node in msm_gem_object::vmas */
  74. bool mapped;
  75. int inuse;
  76. };
  77. struct msm_gem_object {
  78. struct drm_gem_object base;
  79. uint32_t flags;
  80. /**
  81. * Advice: are the backing pages purgeable?
  82. */
  83. uint8_t madv;
  84. /**
  85. * count of active vmap'ing
  86. */
  87. uint8_t vmap_count;
  88. /* And object is either:
  89. * inactive - on priv->inactive_list
  90. * active - on one one of the gpu's active_list.. well, at
  91. * least for now we don't have (I don't think) hw sync between
  92. * 2d and 3d one devices which have both, meaning we need to
  93. * block on submit if a bo is already on other ring
  94. *
  95. */
  96. struct list_head mm_list;
  97. struct msm_gpu *gpu; /* non-null if active */
  98. /* Transiently in the process of submit ioctl, objects associated
  99. * with the submit are on submit->bo_list.. this only lasts for
  100. * the duration of the ioctl, so one bo can never be on multiple
  101. * submit lists.
  102. */
  103. struct list_head submit_entry;
  104. struct page **pages;
  105. struct sg_table *sgt;
  106. void *vaddr;
  107. struct list_head vmas; /* list of msm_gem_vma */
  108. struct llist_node freed;
  109. /* normally (resv == &_resv) except for imported bo's */
  110. struct dma_resv *resv;
  111. struct dma_resv _resv;
  112. /* For physically contiguous buffers. Used when we don't have
  113. * an IOMMU. Also used for stolen/splashscreen buffer.
  114. */
  115. struct drm_mm_node *vram_node;
  116. struct mutex lock; /* Protects resources associated with bo */
  117. struct list_head iova_list;
  118. struct msm_gem_address_space *aspace;
  119. bool in_active_list;
  120. char name[32]; /* Identifier to print for the debugfs files */
  121. /* Indicates whether object needs to request for
  122. * new pagetables due to cb switch
  123. */
  124. bool obj_dirty;
  125. /* iova address and aligned offset */
  126. uint64_t iova;
  127. uint32_t offset;
  128. };
  129. #define to_msm_bo(x) container_of(x, struct msm_gem_object, base)
  130. static inline bool is_active(struct msm_gem_object *msm_obj)
  131. {
  132. return msm_obj->gpu != NULL;
  133. }
  134. static inline bool is_purgeable(struct msm_gem_object *msm_obj)
  135. {
  136. WARN_ON(!mutex_is_locked(&msm_obj->base.dev->struct_mutex));
  137. return (msm_obj->madv == MSM_MADV_DONTNEED) && msm_obj->sgt &&
  138. !msm_obj->base.dma_buf && !msm_obj->base.import_attach;
  139. }
  140. static inline bool is_vunmapable(struct msm_gem_object *msm_obj)
  141. {
  142. return (msm_obj->vmap_count == 0) && msm_obj->vaddr;
  143. }
  144. /* The shrinker can be triggered while we hold objA->lock, and need
  145. * to grab objB->lock to purge it. Lockdep just sees these as a single
  146. * class of lock, so we use subclasses to teach it the difference.
  147. *
  148. * OBJ_LOCK_NORMAL is implicit (ie. normal mutex_lock() call), and
  149. * OBJ_LOCK_SHRINKER is used by shrinker.
  150. *
  151. * It is *essential* that we never go down paths that could trigger the
  152. * shrinker for a purgable object. This is ensured by checking that
  153. * msm_obj->madv == MSM_MADV_WILLNEED.
  154. */
  155. enum msm_gem_lock {
  156. OBJ_LOCK_NORMAL,
  157. OBJ_LOCK_SHRINKER,
  158. };
  159. void msm_gem_purge(struct drm_gem_object *obj, enum msm_gem_lock subclass);
  160. void msm_gem_vunmap(struct drm_gem_object *obj, enum msm_gem_lock subclass);
  161. /* Created per submit-ioctl, to track bo's and cmdstream bufs, etc,
  162. * associated with the cmdstream submission for synchronization (and
  163. * make it easier to unwind when things go wrong, etc). This only
  164. * lasts for the duration of the submit-ioctl.
  165. */
  166. struct msm_gem_submit {
  167. struct drm_device *dev;
  168. struct msm_gpu *gpu;
  169. struct list_head node; /* node in ring submit list */
  170. struct list_head bo_list;
  171. struct ww_acquire_ctx ticket;
  172. uint32_t seqno; /* Sequence number of the submit on the ring */
  173. struct dma_fence *fence;
  174. struct msm_gpu_submitqueue *queue;
  175. struct pid *pid; /* submitting process */
  176. bool valid; /* true if no cmdstream patching needed */
  177. bool in_rb; /* "sudo" mode, copy cmds into RB */
  178. struct msm_ringbuffer *ring;
  179. unsigned int nr_cmds;
  180. unsigned int nr_bos;
  181. u32 ident; /* A "identifier" for the submit for logging */
  182. struct {
  183. uint32_t type;
  184. uint32_t size; /* in dwords */
  185. uint64_t iova;
  186. uint32_t idx; /* cmdstream buffer idx in bos[] */
  187. } *cmd; /* array of size nr_cmds */
  188. struct {
  189. uint32_t flags;
  190. struct msm_gem_object *obj;
  191. uint64_t iova;
  192. } bos[0];
  193. };
  194. /**
  195. * msm_gem_put_buffer - put gem buffer
  196. * @gem: pointer to gem buffer object
  197. */
  198. void msm_gem_put_buffer(struct drm_gem_object *gem);
  199. /**
  200. * msm_gem_gem_buffer - get a gem buffer
  201. * @gem: drm gem object
  202. * @drm_device: pointer to drm device
  203. * @fb: frame buffer object
  204. * @align_size: size to align the buffer to
  205. */
  206. int msm_gem_get_buffer(struct drm_gem_object *gem,
  207. struct drm_device *dev, struct drm_framebuffer *fb,
  208. uint32_t align_size);
  209. #endif /* __MSM_GEM_H__ */