kgsl_mmu.h 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433
  1. /* SPDX-License-Identifier: GPL-2.0-only */
  2. /*
  3. * Copyright (c) 2002,2007-2021, The Linux Foundation. All rights reserved.
  4. * Copyright (c) 2022-2023, Qualcomm Innovation Center, Inc. All rights reserved.
  5. */
  6. #ifndef __KGSL_MMU_H
  7. #define __KGSL_MMU_H
  8. #include <linux/platform_device.h>
  9. /* Identifier for the global page table */
  10. /*
  11. * Per process page tables will probably pass in the thread group
  12. * as an identifier
  13. */
  14. #define KGSL_MMU_GLOBAL_PT 0
  15. #define KGSL_MMU_SECURE_PT 1
  16. #define MMU_DEFAULT_TTBR0(_d) \
  17. (kgsl_mmu_pagetable_get_ttbr0((_d)->mmu.defaultpagetable))
  18. /* Mask ASID fields to match 48bit ptbase address*/
  19. #define MMU_SW_PT_BASE(_ttbr0) \
  20. (_ttbr0 & (BIT_ULL(KGSL_IOMMU_ASID_START_BIT) - 1))
  21. #define KGSL_MMU_DEVICE(_mmu) \
  22. container_of((_mmu), struct kgsl_device, mmu)
  23. /**
  24. * enum kgsl_ft_pagefault_policy_bits - KGSL pagefault policy bits
  25. * @KGSL_FT_PAGEFAULT_INT_ENABLE: No longer used, but retained for compatibility
  26. * @KGSL_FT_PAGEFAULT_GPUHALT_ENABLE: enable GPU halt on pagefaults
  27. * @KGSL_FT_PAGEFAULT_LOG_ONE_PER_PAGE: log one pagefault per page
  28. * @KGSL_FT_PAGEFAULT_LOG_ONE_PER_INT: log one pagefault per interrupt
  29. */
  30. enum {
  31. KGSL_FT_PAGEFAULT_INT_ENABLE = 0,
  32. KGSL_FT_PAGEFAULT_GPUHALT_ENABLE = 1,
  33. KGSL_FT_PAGEFAULT_LOG_ONE_PER_PAGE = 2,
  34. KGSL_FT_PAGEFAULT_LOG_ONE_PER_INT = 3,
  35. /* KGSL_FT_PAGEFAULT_MAX_BITS is used to calculate the mask */
  36. KGSL_FT_PAGEFAULT_MAX_BITS,
  37. };
  38. #define KGSL_FT_PAGEFAULT_MASK GENMASK(KGSL_FT_PAGEFAULT_MAX_BITS - 1, 0)
  39. #define KGSL_FT_PAGEFAULT_DEFAULT_POLICY 0
  40. struct kgsl_device;
  41. enum kgsl_mmutype {
  42. KGSL_MMU_TYPE_IOMMU = 0,
  43. KGSL_MMU_TYPE_NONE
  44. };
  45. #define KGSL_IOMMU_SMMU_V500 1
  46. struct kgsl_pagetable {
  47. spinlock_t lock;
  48. struct kref refcount;
  49. struct list_head list;
  50. unsigned int name;
  51. struct kobject *kobj;
  52. struct work_struct destroy_ws;
  53. struct {
  54. atomic_t entries;
  55. atomic_long_t mapped;
  56. atomic_long_t max_mapped;
  57. } stats;
  58. const struct kgsl_mmu_pt_ops *pt_ops;
  59. uint64_t fault_addr;
  60. struct kgsl_mmu *mmu;
  61. /** @rbtree: all buffers mapped into the pagetable, indexed by gpuaddr */
  62. struct rb_root rbtree;
  63. /** @va_start: Start of virtual range used in this pagetable */
  64. u64 va_start;
  65. /** @va_end: End of virtual range */
  66. u64 va_end;
  67. /**
  68. * @svm_start: Start of shared virtual memory range. Addresses in this
  69. * range are also valid in the process's CPU address space.
  70. */
  71. u64 svm_start;
  72. /** @svm_end: end of 32 bit compatible range */
  73. u64 svm_end;
  74. /**
  75. * @compat_va_start - Start of the "compat" virtual address range for
  76. * forced 32 bit allocations
  77. */
  78. u64 compat_va_start;
  79. /**
  80. * @compat_va_end - End of the "compat" virtual address range for
  81. * forced 32 bit allocations
  82. */
  83. u64 compat_va_end;
  84. /** @va_hint: Virtual address hint for 64-bit non-SVM allocations */
  85. u64 va_hint;
  86. u64 global_base;
  87. };
  88. struct kgsl_mmu;
  89. struct kgsl_mmu_ops {
  90. void (*mmu_close)(struct kgsl_mmu *mmu);
  91. int (*mmu_start)(struct kgsl_mmu *mmu);
  92. uint64_t (*mmu_get_current_ttbr0)(struct kgsl_mmu *mmu, struct kgsl_context *context);
  93. void (*mmu_pagefault_resume)(struct kgsl_mmu *mmu, bool terminate);
  94. void (*mmu_clear_fsr)(struct kgsl_mmu *mmu);
  95. void (*mmu_enable_clk)(struct kgsl_mmu *mmu);
  96. void (*mmu_disable_clk)(struct kgsl_mmu *mmu);
  97. int (*mmu_set_pf_policy)(struct kgsl_mmu *mmu, unsigned long pf_policy);
  98. int (*mmu_init_pt)(struct kgsl_mmu *mmu, struct kgsl_pagetable *pt);
  99. struct kgsl_pagetable * (*mmu_getpagetable)(struct kgsl_mmu *mmu,
  100. unsigned long name);
  101. void (*mmu_map_global)(struct kgsl_mmu *mmu,
  102. struct kgsl_memdesc *memdesc, u32 padding);
  103. void (*mmu_send_tlb_hint)(struct kgsl_mmu *mmu, bool hint);
  104. };
  105. struct kgsl_mmu_pt_ops {
  106. int (*mmu_map)(struct kgsl_pagetable *pt,
  107. struct kgsl_memdesc *memdesc);
  108. int (*mmu_map_child)(struct kgsl_pagetable *pt,
  109. struct kgsl_memdesc *memdesc, u64 offset,
  110. struct kgsl_memdesc *child, u64 child_offset,
  111. u64 length);
  112. int (*mmu_map_zero_page_to_range)(struct kgsl_pagetable *pt,
  113. struct kgsl_memdesc *memdesc, u64 start, u64 length);
  114. int (*mmu_unmap)(struct kgsl_pagetable *pt,
  115. struct kgsl_memdesc *memdesc);
  116. int (*mmu_unmap_range)(struct kgsl_pagetable *pt,
  117. struct kgsl_memdesc *memdesc, u64 offset, u64 length);
  118. void (*mmu_destroy_pagetable)(struct kgsl_pagetable *pt);
  119. u64 (*get_ttbr0)(struct kgsl_pagetable *pt);
  120. int (*get_context_bank)(struct kgsl_pagetable *pt, struct kgsl_context *context);
  121. int (*get_asid)(struct kgsl_pagetable *pt, struct kgsl_context *context);
  122. int (*get_gpuaddr)(struct kgsl_pagetable *pt,
  123. struct kgsl_memdesc *memdesc);
  124. void (*put_gpuaddr)(struct kgsl_memdesc *memdesc);
  125. uint64_t (*find_svm_region)(struct kgsl_pagetable *pt, uint64_t start,
  126. uint64_t end, uint64_t size, uint64_t align);
  127. int (*set_svm_region)(struct kgsl_pagetable *pt,
  128. uint64_t gpuaddr, uint64_t size);
  129. int (*svm_range)(struct kgsl_pagetable *pt, uint64_t *lo, uint64_t *hi,
  130. uint64_t memflags);
  131. bool (*addr_in_range)(struct kgsl_pagetable *pagetable,
  132. uint64_t gpuaddr, uint64_t size);
  133. };
  134. enum kgsl_mmu_feature {
  135. /* @KGSL_MMU_64BIT: Use 64 bit virtual address space */
  136. KGSL_MMU_64BIT,
  137. /* @KGSL_MMU_PAGED: Support paged memory */
  138. KGSL_MMU_PAGED,
  139. /*
  140. * @KGSL_MMU_NEED_GUARD_PAGE: Set if a guard page is needed for each
  141. * mapped region
  142. */
  143. KGSL_MMU_NEED_GUARD_PAGE,
  144. /** @KGSL_MMU_IO_COHERENT: Set if a device supports I/O coherency */
  145. KGSL_MMU_IO_COHERENT,
  146. /** @KGSL_MMU_LLC_ENABLE: Set if LLC is activated for the target */
  147. KGSL_MMU_LLCC_ENABLE,
  148. /** @KGSL_MMU_SMMU_APERTURE: Set the SMMU aperture */
  149. KGSL_MMU_SMMU_APERTURE,
  150. /**
  151. * @KGSL_MMU_IOPGTABLE: Set if the qcom,adreno-smmu implementation is
  152. * available. Implies split address space and per-process pagetables
  153. */
  154. KGSL_MMU_IOPGTABLE,
  155. /** @KGSL_MMU_SUPPORT_VBO: Non-secure VBOs are supported */
  156. KGSL_MMU_SUPPORT_VBO,
  157. /** @KGSL_MMU_PAGEFAULT_TERMINATE: Set to make pagefaults fatal */
  158. KGSL_MMU_PAGEFAULT_TERMINATE,
  159. };
  160. #include "kgsl_iommu.h"
  161. /**
  162. * struct kgsl_mmu - Master definition for KGSL MMU devices
  163. * @flags: MMU device flags
  164. * @type: Type of MMU that is attached
  165. * @subtype: Sub Type of MMU that is attached
  166. * @defaultpagetable: Default pagetable object for the MMU
  167. * @securepagetable: Default secure pagetable object for the MMU
  168. * @mmu_ops: Function pointers for the MMU sub-type
  169. * @secured: True if the MMU needs to be secured
  170. * @feature: Static list of MMU features
  171. */
  172. struct kgsl_mmu {
  173. unsigned long flags;
  174. enum kgsl_mmutype type;
  175. u32 subtype;
  176. struct kgsl_pagetable *defaultpagetable;
  177. struct kgsl_pagetable *securepagetable;
  178. const struct kgsl_mmu_ops *mmu_ops;
  179. bool secured;
  180. unsigned long features;
  181. /** @pfpolicy: The current pagefault policy for the device */
  182. unsigned long pfpolicy;
  183. /** mmu: Pointer to the IOMMU sub-device */
  184. struct kgsl_iommu iommu;
  185. };
  186. #define KGSL_IOMMU(d) (&((d)->mmu.iommu))
  187. int __init kgsl_mmu_init(void);
  188. void kgsl_mmu_exit(void);
  189. int kgsl_mmu_start(struct kgsl_device *device);
  190. void kgsl_mmu_putpagetable(struct kgsl_pagetable *pagetable);
  191. int kgsl_mmu_map(struct kgsl_pagetable *pagetable,
  192. struct kgsl_memdesc *memdesc);
  193. int kgsl_mmu_map_child(struct kgsl_pagetable *pt,
  194. struct kgsl_memdesc *memdesc, u64 offset,
  195. struct kgsl_memdesc *child, u64 child_offset,
  196. u64 length);
  197. int kgsl_mmu_map_zero_page_to_range(struct kgsl_pagetable *pt,
  198. struct kgsl_memdesc *memdesc, u64 start, u64 length);
  199. int kgsl_mmu_unmap(struct kgsl_pagetable *pagetable,
  200. struct kgsl_memdesc *memdesc);
  201. int kgsl_mmu_unmap_range(struct kgsl_pagetable *pt,
  202. struct kgsl_memdesc *memdesc, u64 offset, u64 length);
  203. unsigned int kgsl_mmu_log_fault_addr(struct kgsl_mmu *mmu,
  204. u64 ttbr0, uint64_t addr);
  205. bool kgsl_mmu_gpuaddr_in_range(struct kgsl_pagetable *pt, uint64_t gpuaddr,
  206. uint64_t size);
  207. uint64_t kgsl_mmu_find_svm_region(struct kgsl_pagetable *pagetable,
  208. uint64_t start, uint64_t end, uint64_t size,
  209. uint64_t alignment);
  210. int kgsl_mmu_set_svm_region(struct kgsl_pagetable *pagetable, uint64_t gpuaddr,
  211. uint64_t size);
  212. void kgsl_mmu_detach_pagetable(struct kgsl_pagetable *pagetable);
  213. int kgsl_mmu_svm_range(struct kgsl_pagetable *pagetable,
  214. uint64_t *lo, uint64_t *hi, uint64_t memflags);
  215. struct kgsl_pagetable *kgsl_get_pagetable(unsigned long name);
  216. /*
  217. * Static inline functions of MMU that simply call the SMMU specific
  218. * function using a function pointer. These functions can be thought
  219. * of as wrappers around the actual function
  220. */
  221. #define MMU_OP_VALID(_mmu, _field) \
  222. (((_mmu) != NULL) && \
  223. ((_mmu)->mmu_ops != NULL) && \
  224. ((_mmu)->mmu_ops->_field != NULL))
  225. #define PT_OP_VALID(_pt, _field) \
  226. (((_pt) != NULL) && \
  227. ((_pt)->pt_ops != NULL) && \
  228. ((_pt)->pt_ops->_field != NULL))
  229. /**
  230. * kgsl_mmu_get_gpuaddr - Assign a GPU address to the memdesc
  231. * @pagetable: GPU pagetable to assign the address in
  232. * @memdesc: mem descriptor to assign the memory to
  233. *
  234. * Return: 0 on success or negative on failure
  235. */
  236. static inline int kgsl_mmu_get_gpuaddr(struct kgsl_pagetable *pagetable,
  237. struct kgsl_memdesc *memdesc)
  238. {
  239. if (PT_OP_VALID(pagetable, get_gpuaddr))
  240. return pagetable->pt_ops->get_gpuaddr(pagetable, memdesc);
  241. return -ENOMEM;
  242. }
  243. /**
  244. * kgsl_mmu_put_gpuaddr - Remove a GPU address from a pagetable
  245. * @pagetable: Pagetable to release the memory from
  246. * @memdesc: Memory descriptor containing the GPU address to free
  247. *
  248. * Release a GPU address in the MMU virtual address space.
  249. */
  250. static inline void kgsl_mmu_put_gpuaddr(struct kgsl_pagetable *pagetable,
  251. struct kgsl_memdesc *memdesc)
  252. {
  253. if (PT_OP_VALID(pagetable, put_gpuaddr))
  254. pagetable->pt_ops->put_gpuaddr(memdesc);
  255. }
  256. static inline u64 kgsl_mmu_get_current_ttbr0(struct kgsl_mmu *mmu, struct kgsl_context *context)
  257. {
  258. if (MMU_OP_VALID(mmu, mmu_get_current_ttbr0))
  259. return mmu->mmu_ops->mmu_get_current_ttbr0(mmu, context);
  260. return 0;
  261. }
  262. static inline struct kgsl_pagetable *kgsl_mmu_getpagetable(struct kgsl_mmu *mmu,
  263. unsigned long name)
  264. {
  265. if (MMU_OP_VALID(mmu, mmu_getpagetable))
  266. return mmu->mmu_ops->mmu_getpagetable(mmu, name);
  267. return NULL;
  268. }
  269. static inline void kgsl_mmu_enable_clk(struct kgsl_mmu *mmu)
  270. {
  271. if (MMU_OP_VALID(mmu, mmu_enable_clk))
  272. mmu->mmu_ops->mmu_enable_clk(mmu);
  273. }
  274. static inline void kgsl_mmu_disable_clk(struct kgsl_mmu *mmu)
  275. {
  276. if (MMU_OP_VALID(mmu, mmu_disable_clk))
  277. mmu->mmu_ops->mmu_disable_clk(mmu);
  278. }
  279. static inline int kgsl_mmu_set_pagefault_policy(struct kgsl_mmu *mmu,
  280. unsigned long pf_policy)
  281. {
  282. if (MMU_OP_VALID(mmu, mmu_set_pf_policy))
  283. return mmu->mmu_ops->mmu_set_pf_policy(mmu, pf_policy);
  284. return 0;
  285. }
  286. static inline void kgsl_mmu_pagefault_resume(struct kgsl_mmu *mmu,
  287. bool terminate)
  288. {
  289. if (MMU_OP_VALID(mmu, mmu_pagefault_resume))
  290. return mmu->mmu_ops->mmu_pagefault_resume(mmu, terminate);
  291. }
  292. static inline void kgsl_mmu_clear_fsr(struct kgsl_mmu *mmu)
  293. {
  294. if (MMU_OP_VALID(mmu, mmu_clear_fsr))
  295. return mmu->mmu_ops->mmu_clear_fsr(mmu);
  296. }
  297. static inline bool kgsl_mmu_is_perprocess(struct kgsl_mmu *mmu)
  298. {
  299. return test_bit(KGSL_MMU_IOPGTABLE, &mmu->features);
  300. }
  301. static inline bool kgsl_mmu_is_secured(struct kgsl_mmu *mmu)
  302. {
  303. return mmu && (mmu->secured) && (!IS_ERR_OR_NULL(mmu->securepagetable));
  304. }
  305. static inline u64
  306. kgsl_mmu_pagetable_get_ttbr0(struct kgsl_pagetable *pagetable)
  307. {
  308. if (PT_OP_VALID(pagetable, get_ttbr0))
  309. return pagetable->pt_ops->get_ttbr0(pagetable);
  310. return 0;
  311. }
  312. static inline void kgsl_mmu_send_tlb_hint(struct kgsl_mmu *mmu, bool hint)
  313. {
  314. if (MMU_OP_VALID(mmu, mmu_send_tlb_hint))
  315. return mmu->mmu_ops->mmu_send_tlb_hint(mmu, hint);
  316. }
  317. /**
  318. * kgsl_mmu_map_global - Map a memdesc as a global buffer
  319. * @device: A KGSL GPU device handle
  320. * @memdesc: Pointer to a GPU memory descriptor
  321. * @padding: Any padding to add to the end of the VA allotment (in bytes)
  322. *
  323. * Map a buffer as globally accessible in all pagetable contexts
  324. */
  325. void kgsl_mmu_map_global(struct kgsl_device *device,
  326. struct kgsl_memdesc *memdesc, u32 padding);
  327. /**
  328. * kgsl_mmu_pagetable_get_context_bank - Return the context bank number
  329. * @pagetable: A handle to a given pagetable
  330. * @context : LPAC or User context
  331. *
  332. * This function will find the context number of the given pagetable
  333. * Return: The context bank number the pagetable is attached to or
  334. * negative error on failure.
  335. */
  336. int kgsl_mmu_pagetable_get_context_bank(struct kgsl_pagetable *pagetable,
  337. struct kgsl_context *context);
  338. /**
  339. * kgsl_mmu_pagetable_get_asid - Return the ASID number
  340. * @pagetable: A handle to a given pagetable
  341. * @context: LPAC or GC context
  342. *
  343. * This function will find the ASID number of the given pagetable
  344. * Return: The ASID number the pagetable is attached to or
  345. * negative error on failure.
  346. */
  347. int kgsl_mmu_pagetable_get_asid(struct kgsl_pagetable *pagetable,
  348. struct kgsl_context *context);
  349. void kgsl_mmu_pagetable_init(struct kgsl_mmu *mmu,
  350. struct kgsl_pagetable *pagetable, u32 name);
  351. void kgsl_mmu_pagetable_add(struct kgsl_mmu *mmu, struct kgsl_pagetable *pagetable);
  352. #if IS_ENABLED(CONFIG_ARM_SMMU)
  353. int kgsl_iommu_bind(struct kgsl_device *device, struct platform_device *pdev);
  354. #else
  355. static inline int kgsl_iommu_bind(struct kgsl_device *device, struct platform_device *pdev)
  356. {
  357. return -ENODEV;
  358. }
  359. #endif
  360. /**
  361. * kgsl_mmu_map_sg - Map the given buffer to the IOMMU domain
  362. * @domain: The IOMMU domain to perform the mapping
  363. * @iova: The start address to map the buffer
  364. * @sgt: The sg_table object describing the buffer
  365. * @prot: IOMMU protection bits
  366. *
  367. * Creates a mapping at @iova for the buffer described by a scatterlist
  368. * stored in the given sg_table object in the provided IOMMU domain.
  369. */
  370. ssize_t kgsl_mmu_map_sg(struct iommu_domain *domain,
  371. unsigned long iova, struct scatterlist *sg,
  372. unsigned int nents, int prot);
  373. #endif /* __KGSL_MMU_H */