kgsl_sharedmem.h 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493
  1. /* SPDX-License-Identifier: GPL-2.0-only */
  2. /*
  3. * Copyright (c) 2002,2007-2021, The Linux Foundation. All rights reserved.
  4. * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
  5. */
  6. #ifndef __KGSL_SHAREDMEM_H
  7. #define __KGSL_SHAREDMEM_H
  8. #include <linux/bitfield.h>
  9. #include <linux/dma-mapping.h>
  10. #include <linux/scatterlist.h>
  11. #include <linux/slab.h>
  12. #include "kgsl.h"
  13. #include "kgsl_mmu.h"
  14. struct kgsl_device;
  15. struct kgsl_process_private;
  16. extern bool kgsl_sharedmem_noretry_flag;
  17. #define KGSL_CACHE_OP_INV 0x01
  18. #define KGSL_CACHE_OP_FLUSH 0x02
  19. #define KGSL_CACHE_OP_CLEAN 0x03
  20. void kgsl_sharedmem_free(struct kgsl_memdesc *memdesc);
  21. int kgsl_sharedmem_readl(const struct kgsl_memdesc *memdesc,
  22. uint32_t *dst,
  23. uint64_t offsetbytes);
  24. /**
  25. * kgsl_sharedmem_writel - write a 32 bit value to a shared memory object
  26. * @memdesc: Pointer to a GPU memory object
  27. * @offsetbytes: Offset inside of @memdesc to write to
  28. * @src: Value to write
  29. *
  30. * Write @src to @offsetbytes from the start of @memdesc
  31. */
  32. void kgsl_sharedmem_writel(const struct kgsl_memdesc *memdesc,
  33. uint64_t offsetbytes,
  34. uint32_t src);
  35. int kgsl_sharedmem_readq(const struct kgsl_memdesc *memdesc,
  36. uint64_t *dst,
  37. uint64_t offsetbytes);
  38. /**
  39. * kgsl_sharedmem_writeq - write a 64 bit value to a shared memory object
  40. * @memdesc: Pointer to a GPU memory object
  41. * @offsetbytes: Offset inside of @memdesc to write to
  42. * @src: Value to write
  43. *
  44. * Write @src to @offsetbytes from the start of @memdesc
  45. */
  46. void kgsl_sharedmem_writeq(const struct kgsl_memdesc *memdesc,
  47. uint64_t offsetbytes,
  48. uint64_t src);
  49. int kgsl_cache_range_op(struct kgsl_memdesc *memdesc,
  50. uint64_t offset, uint64_t size,
  51. unsigned int op);
  52. void kgsl_memdesc_init(struct kgsl_device *device,
  53. struct kgsl_memdesc *memdesc, uint64_t flags);
  54. void kgsl_process_init_sysfs(struct kgsl_device *device,
  55. struct kgsl_process_private *private);
  56. int kgsl_sharedmem_init_sysfs(void);
  57. void kgsl_get_memory_usage(char *str, size_t len, uint64_t memflags);
  58. void kgsl_free_secure_page(struct page *page);
  59. struct page *kgsl_alloc_secure_page(void);
  60. #if IS_ENABLED(CONFIG_QCOM_SECURE_BUFFER)
  61. int kgsl_lock_sgt(struct sg_table *sgt, u64 size);
  62. int kgsl_unlock_sgt(struct sg_table *sgt);
  63. #endif
  64. /**
  65. * kgsl_zero_page() - zero out a page
  66. * @p: pointer to the struct page
  67. * @order: order of the page
  68. * @dev: A &struct device pointer
  69. *
  70. * Map a page into kernel and zero it out
  71. */
  72. void kgsl_zero_page(struct page *p, unsigned int order,
  73. struct device *dev);
  74. /**
  75. * kgsl_gfp_mask() - get gfp_mask to be used
  76. * @page_order: order of the page
  77. *
  78. * Get the gfp_mask to be used for page allocation
  79. * based on the order of the page
  80. *
  81. * Return appropriate gfp_mask
  82. */
  83. gfp_t kgsl_gfp_mask(int page_order);
  84. /**
  85. * kgsl_allocate_user - Allocate user visible GPU memory
  86. * @device: A GPU device handle
  87. * @memdesc: Memory descriptor for the object
  88. * @size: Size of the allocation in bytes
  89. * @flags: Control flags for the allocation
  90. * @priv: Internal flags for the allocation
  91. *
  92. * Allocate GPU memory on behalf of the user.
  93. * Return: 0 on success or negative on failure.
  94. */
  95. int kgsl_allocate_user(struct kgsl_device *device, struct kgsl_memdesc *memdesc,
  96. u64 size, u64 flags, u32 priv);
  97. /**
  98. * kgsl_allocate_kernel - Allocate kernel visible GPU memory
  99. * @device: A GPU device handle
  100. * @memdesc: Memory descriptor for the object
  101. * @size: Size of the allocation in bytes
  102. * @flags: Control flags for the allocation
  103. * @priv: Internal flags for the allocation
  104. *
  105. * Allocate GPU memory on for use by the kernel. Kernel objects are
  106. * automatically mapped into the kernel address space (except for secure).
  107. * Return: 0 on success or negative on failure.
  108. */
  109. int kgsl_allocate_kernel(struct kgsl_device *device,
  110. struct kgsl_memdesc *memdesc, u64 size, u64 flags, u32 priv);
  111. /**
  112. * kgsl_allocate_global - Allocate a global GPU memory object
  113. * @device: A GPU device handle
  114. * @size: Size of the allocation in bytes
  115. * @padding: Amount of extra adding to add to the VA allocation
  116. * @flags: Control flags for the allocation
  117. * @priv: Internal flags for the allocation
  118. * @name: Name of the allocation (for the debugfs file)
  119. *
  120. * Allocate a global GPU object for use by all processes. The buffer is
  121. * automatically mapped into the kernel address space and added to the list of
  122. * global buffers that get mapped into each newly created pagetable.
  123. * Return: The memory descriptor on success or a ERR_PTR encoded error on
  124. * failure.
  125. */
  126. struct kgsl_memdesc *kgsl_allocate_global(struct kgsl_device *device,
  127. u64 size, u32 padding, u64 flags, u32 priv, const char *name);
  128. /**
  129. * kgsl_allocate_global_fixed - Allocate a global GPU memory object from a fixed
  130. * region defined in the device tree
  131. * @device: A GPU device handle
  132. * @size: Size of the allocation in bytes
  133. * @flags: Control flags for the allocation
  134. * @priv: Internal flags for the allocation
  135. *
  136. * Allocate a global GPU object for use by all processes. The buffer is
  137. * added to the list of global buffers that get mapped into each newly created
  138. * pagetable.
  139. *
  140. * Return: The memory descriptor on success or a ERR_PTR encoded error on
  141. * failure.
  142. */
  143. struct kgsl_memdesc *kgsl_allocate_global_fixed(struct kgsl_device *device,
  144. const char *resource, const char *name);
  145. /**
  146. * kgsl_memdesc_init_fixed - Intializes a memory object from a fixed
  147. * region defined in the device tree
  148. * @device: A GPU device handle
  149. * @pdev: Pointer to the platform device
  150. * @resource: Name of the resource to get the size and address to allocate
  151. * @memdesc: Memory descriptor for the object
  152. *
  153. * Intializes a memory object for a fixed I/O region defined in device tree.
  154. *
  155. * Return: 0 on success or negative on failure.
  156. */
  157. int kgsl_memdesc_init_fixed(struct kgsl_device *device,
  158. struct platform_device *pdev, const char *resource,
  159. struct kgsl_memdesc *memdesc);
  160. /**
  161. * kgsl_free_globals - Free all global objects
  162. * @device: A GPU device handle
  163. *
  164. * Free all the global buffer objects. Should only be called during shutdown
  165. * after the pagetables have been freed
  166. */
  167. void kgsl_free_globals(struct kgsl_device *device);
  168. /**
  169. * kgsl_page_sync_for_device - Initialize SG table with page & sync to/from device
  170. * @dev: A GPU device handle
  171. * @page: Pointer to the struct page
  172. * @size: Size of the page
  173. * @dir: DMA direction flag
  174. */
  175. void kgsl_page_sync(struct device *dev, struct page *page,
  176. size_t size, enum dma_data_direction dir);
  177. /*
  178. * kgsl_memdesc_get_align - Get alignment flags from a memdesc
  179. * @memdesc - the memdesc
  180. *
  181. * Returns the alignment requested, as power of 2 exponent.
  182. */
  183. static inline int
  184. kgsl_memdesc_get_align(const struct kgsl_memdesc *memdesc)
  185. {
  186. return FIELD_GET(KGSL_MEMALIGN_MASK, memdesc->flags);
  187. }
  188. /*
  189. * kgsl_memdesc_get_cachemode - Get cache mode of a memdesc
  190. * @memdesc: the memdesc
  191. *
  192. * Returns a KGSL_CACHEMODE* value.
  193. */
  194. static inline int
  195. kgsl_memdesc_get_cachemode(const struct kgsl_memdesc *memdesc)
  196. {
  197. return FIELD_GET(KGSL_CACHEMODE_MASK, memdesc->flags);
  198. }
  199. static inline unsigned int
  200. kgsl_memdesc_get_memtype(const struct kgsl_memdesc *memdesc)
  201. {
  202. return FIELD_GET(KGSL_MEMTYPE_MASK, memdesc->flags);
  203. }
  204. /*
  205. * kgsl_memdesc_set_align - Set alignment flags of a memdesc
  206. * @memdesc - the memdesc
  207. * @align - alignment requested, as a power of 2 exponent.
  208. */
  209. static inline int
  210. kgsl_memdesc_set_align(struct kgsl_memdesc *memdesc, unsigned int align)
  211. {
  212. if (align > 32)
  213. align = 32;
  214. memdesc->flags &= ~(uint64_t)KGSL_MEMALIGN_MASK;
  215. memdesc->flags |= FIELD_PREP(KGSL_MEMALIGN_MASK, align);
  216. return 0;
  217. }
  218. /**
  219. * kgsl_memdesc_usermem_type - return buffer type
  220. * @memdesc - the memdesc
  221. *
  222. * Returns a KGSL_MEM_ENTRY_* value for this buffer, which
  223. * identifies if was allocated by us, or imported from
  224. * another allocator.
  225. */
  226. static inline unsigned int
  227. kgsl_memdesc_usermem_type(const struct kgsl_memdesc *memdesc)
  228. {
  229. return FIELD_GET(KGSL_MEMFLAGS_USERMEM_MASK, memdesc->flags);
  230. }
  231. /**
  232. * kgsl_memdesc_sg_dma - Turn a dma_addr (from CMA) into a sg table
  233. * @memdesc: Pointer to a memory descriptor
  234. * @addr: Physical address from the dma_alloc function
  235. * @size: Size of the chunk
  236. *
  237. * Create a sg table for the contiguous chunk specified by addr and size.
  238. *
  239. * Return: 0 on success or negative on failure.
  240. */
  241. int kgsl_memdesc_sg_dma(struct kgsl_memdesc *memdesc,
  242. phys_addr_t addr, u64 size);
  243. /*
  244. * kgsl_memdesc_is_global - is this a globally mapped buffer?
  245. * @memdesc: the memdesc
  246. *
  247. * Return: True if this is a global mapping
  248. */
  249. static inline bool kgsl_memdesc_is_global(const struct kgsl_memdesc *memdesc)
  250. {
  251. return memdesc && (memdesc->priv & KGSL_MEMDESC_GLOBAL);
  252. }
  253. /*
  254. * kgsl_memdesc_is_secured - is this a secure buffer?
  255. * @memdesc: the memdesc
  256. *
  257. * Returns true if this is a secure mapping, false otherwise
  258. */
  259. static inline bool kgsl_memdesc_is_secured(const struct kgsl_memdesc *memdesc)
  260. {
  261. return memdesc && (memdesc->priv & KGSL_MEMDESC_SECURE);
  262. }
  263. /*
  264. * kgsl_memdesc_is_reclaimed - check if a buffer is reclaimed
  265. * @memdesc: the memdesc
  266. *
  267. * Return: true if the memdesc pages were reclaimed, false otherwise
  268. */
  269. static inline bool kgsl_memdesc_is_reclaimed(const struct kgsl_memdesc *memdesc)
  270. {
  271. return memdesc && (memdesc->priv & KGSL_MEMDESC_RECLAIMED);
  272. }
  273. /*
  274. * kgsl_memdesc_use_cpu_map - use the same virtual mapping on CPU and GPU?
  275. * @memdesc: the memdesc
  276. *
  277. * Return: true if the memdesc is using SVM mapping
  278. */
  279. static inline bool
  280. kgsl_memdesc_use_cpu_map(const struct kgsl_memdesc *memdesc)
  281. {
  282. return memdesc && (memdesc->flags & KGSL_MEMFLAGS_USE_CPU_MAP);
  283. }
  284. /*
  285. * kgsl_memdesc_footprint - get the size of the mmap region
  286. * @memdesc - the memdesc
  287. *
  288. * The entire memdesc must be mapped. Additionally if the
  289. * CPU mapping is going to be mirrored, there must be room
  290. * for the guard page to be mapped so that the address spaces
  291. * match up.
  292. */
  293. static inline uint64_t
  294. kgsl_memdesc_footprint(const struct kgsl_memdesc *memdesc)
  295. {
  296. if (!(memdesc->priv & KGSL_MEMDESC_GUARD_PAGE))
  297. return memdesc->size;
  298. return PAGE_ALIGN(memdesc->size + PAGE_SIZE);
  299. }
  300. /**
  301. * kgsl_memdesc_put_gpuaddr - Release the gpuaddr assigned to a memdesc
  302. * @memdesc: Pointer to a GPU memory object
  303. *
  304. * Call the memdesc specific function to release the GPU address assigned to the
  305. * memdesc and unmap the memory
  306. */
  307. static inline void kgsl_sharedmem_put_gpuaddr(struct kgsl_memdesc *memdesc)
  308. {
  309. if (memdesc && memdesc->ops->put_gpuaddr)
  310. memdesc->ops->put_gpuaddr(memdesc);
  311. }
  312. /**
  313. * kgsl_cachemode_is_cached - Return true if the passed flags indicate a cached
  314. * buffer
  315. * @flags: A bitmask of KGSL_MEMDESC_ flags
  316. *
  317. * Return: true if the flags indicate a cached buffer
  318. */
  319. static inline bool kgsl_cachemode_is_cached(u64 flags)
  320. {
  321. u64 mode = FIELD_GET(KGSL_CACHEMODE_MASK, flags);
  322. return (mode != KGSL_CACHEMODE_UNCACHED &&
  323. mode != KGSL_CACHEMODE_WRITECOMBINE);
  324. }
  325. /**
  326. * kgsl_unmap_and_put_gpuaddr - Unmap the memory and release the gpuaddr
  327. * assigned to a memdesc
  328. * @memdesc: Pointer to a GPU memory object
  329. *
  330. * Remove the mapping from pagetable and release the GPU address assigned
  331. * to the memdesc
  332. */
  333. void kgsl_unmap_and_put_gpuaddr(struct kgsl_memdesc *memdesc);
  334. /**
  335. * struct kgsl_process_attribute - basic attribute for a process
  336. * @attr: Underlying struct attribute
  337. * @show: Attribute show function
  338. * @store: Attribute store function
  339. */
  340. struct kgsl_process_attribute {
  341. struct attribute attr;
  342. ssize_t (*show)(struct kobject *kobj,
  343. struct kgsl_process_attribute *attr, char *buf);
  344. ssize_t (*store)(struct kobject *kobj,
  345. struct kgsl_process_attribute *attr, const char *buf,
  346. ssize_t count);
  347. };
  348. #define PROCESS_ATTR(_name, _mode, _show, _store) \
  349. static struct kgsl_process_attribute attr_##_name = \
  350. __ATTR(_name, _mode, _show, _store)
  351. struct kgsl_sharedmem_bind_op_range {
  352. u64 start;
  353. u64 last;
  354. u32 child_offset;
  355. u32 op;
  356. struct kgsl_mem_entry *entry;
  357. };
  358. struct kgsl_sharedmem_bind_op {
  359. struct kgsl_mem_entry *target;
  360. struct kgsl_sharedmem_bind_op_range *ops;
  361. int nr_ops;
  362. void (*callback)(struct kgsl_sharedmem_bind_op *op);
  363. void *data;
  364. struct work_struct work;
  365. struct completion comp;
  366. struct kref ref;
  367. };
  368. /**
  369. * kgsl_sharedmem_allocate_vbo - Allocate a new virtual buffer object
  370. * @device: A KGSL GPU handle
  371. * @memdesc: Memory descriptor container to initialize
  372. * @size: Size of the VBO
  373. * @flags: Bitmask of KGSL_MEMFLAGS_*
  374. *
  375. * Initialize a new virtual buffer object memory descriptor
  376. *
  377. * Return: 0 on success or negative on failure.
  378. */
  379. int kgsl_sharedmem_allocate_vbo(struct kgsl_device *device,
  380. struct kgsl_memdesc *memdesc, u64 size, u64 flags);
  381. /**
  382. * kgsl_memdesc_print_vbo_ranges - Print a new virtual buffer object
  383. * @entry: A KGSL memory entry
  384. * @s: seq_file pointer
  385. *
  386. * Print virtual buffer object memory ranges
  387. */
  388. void kgsl_memdesc_print_vbo_ranges(struct kgsl_mem_entry *entry,
  389. struct seq_file *s);
  390. /**
  391. * kgsl_sharedmem_create_bind_op - Create a new bind op
  392. * @private: A KGSL process private
  393. * @target_id: Target virtual buffer object id
  394. * @ranges: User memory pointer to an array of range operations of type &struct
  395. * kgsl_gpumem_bind_range
  396. * @ranges_nents: Number of entries in @ranges
  397. * @ranges_size: Size of each entry in @ranges in bytes
  398. *
  399. * Create a new bind op to be used to map ranges
  400. *
  401. * Return: On success return kgsl_sharedmem_bind_op pointer or negative
  402. * on failure
  403. *
  404. */
  405. struct kgsl_sharedmem_bind_op *
  406. kgsl_sharedmem_create_bind_op(struct kgsl_process_private *private,
  407. u32 target_id, void __user *ranges, u32 ranges_nents,
  408. u64 ranges_size);
  409. /**
  410. * kgsl_sharedmem_bind_ranges - Bind ranges to virtual buffer object
  411. * @op: One of KGSL_GPUMEM_RANGE_OP_BIND or KGSL_GPUMEM_RANGE_OP_UNBIND
  412. *
  413. * Add or remove a range from kgsl memory descriptor
  414. */
  415. void kgsl_sharedmem_bind_ranges(struct kgsl_sharedmem_bind_op *op);
  416. /**
  417. * kgsl_sharedmem_bind_range_destroy - Bind ranges to virtual buffer object
  418. * @kref: kref to bind kgsl_sharedmem_bind_op
  419. *
  420. * Destroy bind ranges object
  421. */
  422. void kgsl_sharedmem_bind_range_destroy(struct kref *kref);
  423. /**
  424. * kgsl_sharedmem_put_bind_op - Bind ranges to virtual buffer object
  425. * @op: One of KGSL_GPUMEM_RANGE_OP_BIND or KGSL_GPUMEM_RANGE_OP_UNBIND
  426. *
  427. * Put kgsl_sharedmem_bind_range_destroy to free resources
  428. */
  429. static inline void kgsl_sharedmem_put_bind_op(struct kgsl_sharedmem_bind_op *op)
  430. {
  431. if (!IS_ERR_OR_NULL(op))
  432. kref_put(&op->ref, kgsl_sharedmem_bind_range_destroy);
  433. }
  434. /**
  435. * kgsl_register_shmem_callback - Register vendor hook callback with SHMEM
  436. * driver
  437. */
  438. void kgsl_register_shmem_callback(void);
  439. #endif /* __KGSL_SHAREDMEM_H */