kgsl_mmu.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2002,2007-2021, The Linux Foundation. All rights reserved.
  4. * Copyright (c) 2022-2023, Qualcomm Innovation Center, Inc. All rights reserved.
  5. */
  6. #include <linux/component.h>
  7. #include <linux/of_platform.h>
  8. #include <linux/slab.h>
  9. #include <linux/version.h>
  10. #include "kgsl_device.h"
  11. #include "kgsl_mmu.h"
  12. #include "kgsl_sharedmem.h"
  13. static void pagetable_remove_sysfs_objects(struct kgsl_pagetable *pagetable);
  14. static void _deferred_destroy(struct work_struct *ws)
  15. {
  16. struct kgsl_pagetable *pagetable = container_of(ws,
  17. struct kgsl_pagetable, destroy_ws);
  18. WARN_ON(!list_empty(&pagetable->list));
  19. pagetable->pt_ops->mmu_destroy_pagetable(pagetable);
  20. }
  21. static void kgsl_destroy_pagetable(struct kref *kref)
  22. {
  23. struct kgsl_pagetable *pagetable = container_of(kref,
  24. struct kgsl_pagetable, refcount);
  25. kgsl_mmu_detach_pagetable(pagetable);
  26. kgsl_schedule_work(&pagetable->destroy_ws);
  27. }
  28. struct kgsl_pagetable *
  29. kgsl_get_pagetable(unsigned long name)
  30. {
  31. struct kgsl_pagetable *pt, *ret = NULL;
  32. unsigned long flags;
  33. spin_lock_irqsave(&kgsl_driver.ptlock, flags);
  34. list_for_each_entry(pt, &kgsl_driver.pagetable_list, list) {
  35. if (name == pt->name && kref_get_unless_zero(&pt->refcount)) {
  36. ret = pt;
  37. break;
  38. }
  39. }
  40. spin_unlock_irqrestore(&kgsl_driver.ptlock, flags);
  41. return ret;
  42. }
  43. static struct kgsl_pagetable *
  44. _get_pt_from_kobj(struct kobject *kobj)
  45. {
  46. unsigned int ptname;
  47. if (!kobj)
  48. return NULL;
  49. if (kstrtou32(kobj->name, 0, &ptname))
  50. return NULL;
  51. return kgsl_get_pagetable(ptname);
  52. }
  53. static ssize_t
  54. sysfs_show_entries(struct kobject *kobj,
  55. struct kobj_attribute *attr,
  56. char *buf)
  57. {
  58. struct kgsl_pagetable *pt;
  59. int ret = 0;
  60. pt = _get_pt_from_kobj(kobj);
  61. if (pt) {
  62. unsigned int val = atomic_read(&pt->stats.entries);
  63. ret += scnprintf(buf, PAGE_SIZE, "%d\n", val);
  64. }
  65. kref_put(&pt->refcount, kgsl_destroy_pagetable);
  66. return ret;
  67. }
  68. static ssize_t
  69. sysfs_show_mapped(struct kobject *kobj,
  70. struct kobj_attribute *attr,
  71. char *buf)
  72. {
  73. struct kgsl_pagetable *pt;
  74. int ret = 0;
  75. pt = _get_pt_from_kobj(kobj);
  76. if (pt) {
  77. uint64_t val = atomic_long_read(&pt->stats.mapped);
  78. ret += scnprintf(buf, PAGE_SIZE, "%llu\n", val);
  79. }
  80. kref_put(&pt->refcount, kgsl_destroy_pagetable);
  81. return ret;
  82. }
  83. static ssize_t
  84. sysfs_show_max_mapped(struct kobject *kobj,
  85. struct kobj_attribute *attr,
  86. char *buf)
  87. {
  88. struct kgsl_pagetable *pt;
  89. int ret = 0;
  90. pt = _get_pt_from_kobj(kobj);
  91. if (pt) {
  92. uint64_t val = atomic_long_read(&pt->stats.max_mapped);
  93. ret += scnprintf(buf, PAGE_SIZE, "%llu\n", val);
  94. }
  95. kref_put(&pt->refcount, kgsl_destroy_pagetable);
  96. return ret;
  97. }
  98. static struct kobj_attribute attr_entries = {
  99. .attr = { .name = "entries", .mode = 0444 },
  100. .show = sysfs_show_entries,
  101. .store = NULL,
  102. };
  103. static struct kobj_attribute attr_mapped = {
  104. .attr = { .name = "mapped", .mode = 0444 },
  105. .show = sysfs_show_mapped,
  106. .store = NULL,
  107. };
  108. static struct kobj_attribute attr_max_mapped = {
  109. .attr = { .name = "max_mapped", .mode = 0444 },
  110. .show = sysfs_show_max_mapped,
  111. .store = NULL,
  112. };
  113. static struct attribute *pagetable_attrs[] = {
  114. &attr_entries.attr,
  115. &attr_mapped.attr,
  116. &attr_max_mapped.attr,
  117. NULL,
  118. };
  119. static struct attribute_group pagetable_attr_group = {
  120. .attrs = pagetable_attrs,
  121. };
  122. static void
  123. pagetable_remove_sysfs_objects(struct kgsl_pagetable *pagetable)
  124. {
  125. if (pagetable->kobj)
  126. sysfs_remove_group(pagetable->kobj,
  127. &pagetable_attr_group);
  128. kobject_put(pagetable->kobj);
  129. pagetable->kobj = NULL;
  130. }
  131. static int
  132. pagetable_add_sysfs_objects(struct kgsl_pagetable *pagetable)
  133. {
  134. char ptname[16];
  135. int ret = -ENOMEM;
  136. snprintf(ptname, sizeof(ptname), "%d", pagetable->name);
  137. pagetable->kobj = kobject_create_and_add(ptname,
  138. kgsl_driver.ptkobj);
  139. if (pagetable->kobj == NULL)
  140. goto err;
  141. ret = sysfs_create_group(pagetable->kobj, &pagetable_attr_group);
  142. err:
  143. if (ret) {
  144. if (pagetable->kobj)
  145. kobject_put(pagetable->kobj);
  146. pagetable->kobj = NULL;
  147. }
  148. return ret;
  149. }
  150. #ifdef CONFIG_TRACE_GPU_MEM
  151. static void kgsl_mmu_trace_gpu_mem_pagetable(struct kgsl_pagetable *pagetable)
  152. {
  153. if (pagetable->name == KGSL_MMU_GLOBAL_PT ||
  154. pagetable->name == KGSL_MMU_SECURE_PT)
  155. return;
  156. trace_gpu_mem_total(0, pagetable->name,
  157. (u64)atomic_long_read(&pagetable->stats.mapped));
  158. }
  159. #else
  160. static void kgsl_mmu_trace_gpu_mem_pagetable(struct kgsl_pagetable *pagetable)
  161. {
  162. }
  163. #endif
  164. void
  165. kgsl_mmu_detach_pagetable(struct kgsl_pagetable *pagetable)
  166. {
  167. unsigned long flags;
  168. spin_lock_irqsave(&kgsl_driver.ptlock, flags);
  169. if (!list_empty(&pagetable->list))
  170. list_del_init(&pagetable->list);
  171. spin_unlock_irqrestore(&kgsl_driver.ptlock, flags);
  172. pagetable_remove_sysfs_objects(pagetable);
  173. }
  174. unsigned int
  175. kgsl_mmu_log_fault_addr(struct kgsl_mmu *mmu, u64 pt_base,
  176. uint64_t addr)
  177. {
  178. struct kgsl_pagetable *pt;
  179. unsigned int ret = 0;
  180. spin_lock(&kgsl_driver.ptlock);
  181. list_for_each_entry(pt, &kgsl_driver.pagetable_list, list) {
  182. if (kgsl_mmu_pagetable_get_ttbr0(pt) == MMU_SW_PT_BASE(pt_base)) {
  183. if ((addr & ~(PAGE_SIZE-1)) == pt->fault_addr) {
  184. ret = 1;
  185. break;
  186. }
  187. pt->fault_addr = (addr & ~(PAGE_SIZE-1));
  188. ret = 0;
  189. break;
  190. }
  191. }
  192. spin_unlock(&kgsl_driver.ptlock);
  193. return ret;
  194. }
  195. int kgsl_mmu_start(struct kgsl_device *device)
  196. {
  197. struct kgsl_mmu *mmu = &device->mmu;
  198. if (MMU_OP_VALID(mmu, mmu_start))
  199. return mmu->mmu_ops->mmu_start(mmu);
  200. return 0;
  201. }
  202. void kgsl_mmu_pagetable_init(struct kgsl_mmu *mmu,
  203. struct kgsl_pagetable *pagetable, u32 name)
  204. {
  205. kref_init(&pagetable->refcount);
  206. spin_lock_init(&pagetable->lock);
  207. INIT_WORK(&pagetable->destroy_ws, _deferred_destroy);
  208. pagetable->mmu = mmu;
  209. pagetable->name = name;
  210. atomic_set(&pagetable->stats.entries, 0);
  211. atomic_long_set(&pagetable->stats.mapped, 0);
  212. atomic_long_set(&pagetable->stats.max_mapped, 0);
  213. }
  214. void kgsl_mmu_pagetable_add(struct kgsl_mmu *mmu, struct kgsl_pagetable *pagetable)
  215. {
  216. unsigned long flags;
  217. spin_lock_irqsave(&kgsl_driver.ptlock, flags);
  218. list_add(&pagetable->list, &kgsl_driver.pagetable_list);
  219. spin_unlock_irqrestore(&kgsl_driver.ptlock, flags);
  220. /* Create the sysfs entries */
  221. pagetable_add_sysfs_objects(pagetable);
  222. }
  223. void kgsl_mmu_putpagetable(struct kgsl_pagetable *pagetable)
  224. {
  225. if (!IS_ERR_OR_NULL(pagetable))
  226. kref_put(&pagetable->refcount, kgsl_destroy_pagetable);
  227. }
  228. /**
  229. * kgsl_mmu_find_svm_region() - Find a empty spot in the SVM region
  230. * @pagetable: KGSL pagetable to search
  231. * @start: start of search range, must be within kgsl_mmu_svm_range()
  232. * @end: end of search range, must be within kgsl_mmu_svm_range()
  233. * @size: Size of the region to find
  234. * @align: Desired alignment of the address
  235. */
  236. uint64_t kgsl_mmu_find_svm_region(struct kgsl_pagetable *pagetable,
  237. uint64_t start, uint64_t end, uint64_t size,
  238. uint64_t align)
  239. {
  240. if (PT_OP_VALID(pagetable, find_svm_region))
  241. return pagetable->pt_ops->find_svm_region(pagetable, start,
  242. end, size, align);
  243. return -ENOMEM;
  244. }
  245. /**
  246. * kgsl_mmu_set_svm_region() - Check if a region is empty and reserve it if so
  247. * @pagetable: KGSL pagetable to search
  248. * @gpuaddr: GPU address to check/reserve
  249. * @size: Size of the region to check/reserve
  250. */
  251. int kgsl_mmu_set_svm_region(struct kgsl_pagetable *pagetable, uint64_t gpuaddr,
  252. uint64_t size)
  253. {
  254. if (PT_OP_VALID(pagetable, set_svm_region))
  255. return pagetable->pt_ops->set_svm_region(pagetable, gpuaddr,
  256. size);
  257. return -ENOMEM;
  258. }
  259. int
  260. kgsl_mmu_map(struct kgsl_pagetable *pagetable,
  261. struct kgsl_memdesc *memdesc)
  262. {
  263. int size;
  264. struct kgsl_device *device = KGSL_MMU_DEVICE(pagetable->mmu);
  265. if (!memdesc->gpuaddr)
  266. return -EINVAL;
  267. /* Only global mappings should be mapped multiple times */
  268. if (!kgsl_memdesc_is_global(memdesc) &&
  269. (KGSL_MEMDESC_MAPPED & memdesc->priv))
  270. return -EINVAL;
  271. if (memdesc->flags & KGSL_MEMFLAGS_VBO)
  272. return -EINVAL;
  273. size = kgsl_memdesc_footprint(memdesc);
  274. if (PT_OP_VALID(pagetable, mmu_map)) {
  275. int ret;
  276. ret = pagetable->pt_ops->mmu_map(pagetable, memdesc);
  277. if (ret)
  278. return ret;
  279. atomic_inc(&pagetable->stats.entries);
  280. KGSL_STATS_ADD(size, &pagetable->stats.mapped,
  281. &pagetable->stats.max_mapped);
  282. kgsl_mmu_trace_gpu_mem_pagetable(pagetable);
  283. if (!kgsl_memdesc_is_global(memdesc)
  284. && !(memdesc->flags & KGSL_MEMFLAGS_USERMEM_ION)) {
  285. kgsl_trace_gpu_mem_total(device, size);
  286. }
  287. memdesc->priv |= KGSL_MEMDESC_MAPPED;
  288. }
  289. return 0;
  290. }
  291. int kgsl_mmu_map_child(struct kgsl_pagetable *pt,
  292. struct kgsl_memdesc *memdesc, u64 offset,
  293. struct kgsl_memdesc *child, u64 child_offset,
  294. u64 length)
  295. {
  296. /* This only makes sense for virtual buffer objects */
  297. if (!(memdesc->flags & KGSL_MEMFLAGS_VBO))
  298. return -EINVAL;
  299. if (!memdesc->gpuaddr)
  300. return -EINVAL;
  301. if (PT_OP_VALID(pt, mmu_map_child)) {
  302. int ret;
  303. ret = pt->pt_ops->mmu_map_child(pt, memdesc,
  304. offset, child, child_offset, length);
  305. if (ret)
  306. return ret;
  307. KGSL_STATS_ADD(length, &pt->stats.mapped,
  308. &pt->stats.max_mapped);
  309. }
  310. return 0;
  311. }
  312. int kgsl_mmu_map_zero_page_to_range(struct kgsl_pagetable *pt,
  313. struct kgsl_memdesc *memdesc, u64 start, u64 length)
  314. {
  315. int ret = -EINVAL;
  316. /* This only makes sense for virtual buffer objects */
  317. if (!(memdesc->flags & KGSL_MEMFLAGS_VBO))
  318. return -EINVAL;
  319. if (!memdesc->gpuaddr)
  320. return -EINVAL;
  321. if (PT_OP_VALID(pt, mmu_map_zero_page_to_range)) {
  322. ret = pt->pt_ops->mmu_map_zero_page_to_range(pt,
  323. memdesc, start, length);
  324. if (ret)
  325. return ret;
  326. KGSL_STATS_ADD(length, &pt->stats.mapped,
  327. &pt->stats.max_mapped);
  328. }
  329. return 0;
  330. }
  331. /**
  332. * kgsl_mmu_svm_range() - Return the range for SVM (if applicable)
  333. * @pagetable: Pagetable to query the range from
  334. * @lo: Pointer to store the start of the SVM range
  335. * @hi: Pointer to store the end of the SVM range
  336. * @memflags: Flags from the buffer we are mapping
  337. */
  338. int kgsl_mmu_svm_range(struct kgsl_pagetable *pagetable,
  339. uint64_t *lo, uint64_t *hi, uint64_t memflags)
  340. {
  341. if (PT_OP_VALID(pagetable, svm_range))
  342. return pagetable->pt_ops->svm_range(pagetable, lo, hi,
  343. memflags);
  344. return -ENODEV;
  345. }
  346. int
  347. kgsl_mmu_unmap(struct kgsl_pagetable *pagetable,
  348. struct kgsl_memdesc *memdesc)
  349. {
  350. int ret = 0;
  351. struct kgsl_device *device = KGSL_MMU_DEVICE(pagetable->mmu);
  352. if (memdesc->size == 0)
  353. return -EINVAL;
  354. if ((memdesc->flags & KGSL_MEMFLAGS_VBO))
  355. return -EINVAL;
  356. /* Only global mappings should be mapped multiple times */
  357. if (!(KGSL_MEMDESC_MAPPED & memdesc->priv))
  358. return -EINVAL;
  359. if (PT_OP_VALID(pagetable, mmu_unmap)) {
  360. uint64_t size;
  361. size = kgsl_memdesc_footprint(memdesc);
  362. ret = pagetable->pt_ops->mmu_unmap(pagetable, memdesc);
  363. if (ret)
  364. return ret;
  365. atomic_dec(&pagetable->stats.entries);
  366. atomic_long_sub(size, &pagetable->stats.mapped);
  367. kgsl_mmu_trace_gpu_mem_pagetable(pagetable);
  368. if (!kgsl_memdesc_is_global(memdesc)) {
  369. memdesc->priv &= ~KGSL_MEMDESC_MAPPED;
  370. if (!(memdesc->flags & KGSL_MEMFLAGS_USERMEM_ION))
  371. kgsl_trace_gpu_mem_total(device, -(size));
  372. }
  373. }
  374. return ret;
  375. }
  376. int
  377. kgsl_mmu_unmap_range(struct kgsl_pagetable *pagetable,
  378. struct kgsl_memdesc *memdesc, u64 offset, u64 length)
  379. {
  380. int ret = 0;
  381. /* Only allow virtual buffer objects to use this function */
  382. if (!(memdesc->flags & KGSL_MEMFLAGS_VBO))
  383. return -EINVAL;
  384. if (PT_OP_VALID(pagetable, mmu_unmap_range)) {
  385. ret = pagetable->pt_ops->mmu_unmap_range(pagetable, memdesc,
  386. offset, length);
  387. if (!ret)
  388. atomic_long_sub(length, &pagetable->stats.mapped);
  389. }
  390. return ret;
  391. }
  392. void kgsl_mmu_map_global(struct kgsl_device *device,
  393. struct kgsl_memdesc *memdesc, u32 padding)
  394. {
  395. struct kgsl_mmu *mmu = &(device->mmu);
  396. if (MMU_OP_VALID(mmu, mmu_map_global))
  397. mmu->mmu_ops->mmu_map_global(mmu, memdesc, padding);
  398. }
  399. int kgsl_mmu_pagetable_get_context_bank(struct kgsl_pagetable *pagetable,
  400. struct kgsl_context *context)
  401. {
  402. if (PT_OP_VALID(pagetable, get_context_bank))
  403. return pagetable->pt_ops->get_context_bank(pagetable, context);
  404. return -ENOENT;
  405. }
  406. int kgsl_mmu_pagetable_get_asid(struct kgsl_pagetable *pagetable,
  407. struct kgsl_context *context)
  408. {
  409. if (PT_OP_VALID(pagetable, get_asid))
  410. return pagetable->pt_ops->get_asid(pagetable, context);
  411. return -ENOENT;
  412. }
  413. enum kgsl_mmutype kgsl_mmu_get_mmutype(struct kgsl_device *device)
  414. {
  415. return device ? device->mmu.type : KGSL_MMU_TYPE_NONE;
  416. }
  417. bool kgsl_mmu_gpuaddr_in_range(struct kgsl_pagetable *pagetable,
  418. uint64_t gpuaddr, uint64_t size)
  419. {
  420. if (PT_OP_VALID(pagetable, addr_in_range))
  421. return pagetable->pt_ops->addr_in_range(pagetable, gpuaddr, size);
  422. return false;
  423. }
  424. /*
  425. * NOMMU definitions - NOMMU really just means that the MMU is kept in pass
  426. * through and the GPU directly accesses physical memory. Used in debug mode
  427. * and when a real MMU isn't up and running yet.
  428. */
  429. static bool nommu_gpuaddr_in_range(struct kgsl_pagetable *pagetable,
  430. uint64_t gpuaddr, uint64_t size)
  431. {
  432. return (gpuaddr != 0) ? true : false;
  433. }
  434. static int nommu_get_gpuaddr(struct kgsl_pagetable *pagetable,
  435. struct kgsl_memdesc *memdesc)
  436. {
  437. if (WARN_ONCE(memdesc->sgt->nents > 1,
  438. "Attempt to map non-contiguous memory with NOMMU\n"))
  439. return -EINVAL;
  440. memdesc->gpuaddr = (uint64_t) sg_phys(memdesc->sgt->sgl);
  441. if (memdesc->gpuaddr) {
  442. memdesc->pagetable = pagetable;
  443. return 0;
  444. }
  445. return -ENOMEM;
  446. }
  447. static void nommu_destroy_pagetable(struct kgsl_pagetable *pt)
  448. {
  449. kfree(pt);
  450. }
  451. static const struct kgsl_mmu_pt_ops nommu_pt_ops = {
  452. .get_gpuaddr = nommu_get_gpuaddr,
  453. .addr_in_range = nommu_gpuaddr_in_range,
  454. .mmu_destroy_pagetable = nommu_destroy_pagetable,
  455. };
  456. static struct kgsl_pagetable *nommu_getpagetable(struct kgsl_mmu *mmu,
  457. unsigned long name)
  458. {
  459. struct kgsl_device *device = KGSL_MMU_DEVICE(mmu);
  460. struct kgsl_pagetable *pagetable;
  461. struct kgsl_global_memdesc *md;
  462. pagetable = kgsl_get_pagetable(KGSL_MMU_GLOBAL_PT);
  463. if (pagetable == NULL) {
  464. pagetable = kzalloc(sizeof(*pagetable), GFP_KERNEL);
  465. if (!pagetable)
  466. return ERR_PTR(-ENOMEM);
  467. kgsl_mmu_pagetable_init(mmu, pagetable, KGSL_MMU_GLOBAL_PT);
  468. pagetable->pt_ops = &nommu_pt_ops;
  469. list_for_each_entry(md, &device->globals, node)
  470. md->memdesc.gpuaddr =
  471. (uint64_t) sg_phys(md->memdesc.sgt->sgl);
  472. kgsl_mmu_pagetable_add(mmu, pagetable);
  473. }
  474. return pagetable;
  475. }
  476. static struct kgsl_mmu_ops kgsl_nommu_ops = {
  477. .mmu_getpagetable = nommu_getpagetable,
  478. };
  479. ssize_t kgsl_mmu_map_sg(struct iommu_domain *domain,
  480. unsigned long iova, struct scatterlist *sg,
  481. unsigned int nents, int prot)
  482. {
  483. #if (KERNEL_VERSION(6, 2, 0) <= LINUX_VERSION_CODE)
  484. return iommu_map_sg(domain, iova, sg, nents, prot, GFP_KERNEL);
  485. #else
  486. return iommu_map_sg(domain, iova, sg, nents, prot);
  487. #endif
  488. }
  489. static int kgsl_mmu_cb_bind(struct device *dev, struct device *master, void *data)
  490. {
  491. return 0;
  492. }
  493. static void kgsl_mmu_cb_unbind(struct device *dev, struct device *master,
  494. void *data)
  495. {
  496. }
  497. static int kgsl_mmu_bind(struct device *dev, struct device *master, void *data)
  498. {
  499. struct kgsl_device *device = dev_get_drvdata(master);
  500. struct kgsl_mmu *mmu = &device->mmu;
  501. int ret;
  502. /*
  503. * Try to bind the IOMMU and if it doesn't exist for some reason
  504. * go for the NOMMU option instead
  505. */
  506. ret = kgsl_iommu_bind(device, to_platform_device(dev));
  507. if (!ret || ret == -EPROBE_DEFER)
  508. return ret;
  509. mmu->mmu_ops = &kgsl_nommu_ops;
  510. mmu->type = KGSL_MMU_TYPE_NONE;
  511. return 0;
  512. }
  513. static void kgsl_mmu_unbind(struct device *dev, struct device *master,
  514. void *data)
  515. {
  516. struct kgsl_device *device = dev_get_drvdata(master);
  517. struct kgsl_mmu *mmu = &device->mmu;
  518. if (MMU_OP_VALID(mmu, mmu_close))
  519. mmu->mmu_ops->mmu_close(mmu);
  520. }
  521. static const struct component_ops kgsl_mmu_cb_component_ops = {
  522. .bind = kgsl_mmu_cb_bind,
  523. .unbind = kgsl_mmu_cb_unbind,
  524. };
  525. static const struct component_ops kgsl_mmu_component_ops = {
  526. .bind = kgsl_mmu_bind,
  527. .unbind = kgsl_mmu_unbind,
  528. };
  529. static int kgsl_mmu_dev_probe(struct platform_device *pdev)
  530. {
  531. /*
  532. * Add kgsl-smmu and context bank as a component device to establish
  533. * correct probe order with smmu driver.
  534. *
  535. * As context bank node in DT contains "iommus" property. fw_devlink
  536. * ensures that context bank is probed only after corresponding
  537. * supplier (smmu driver) probe is done.
  538. *
  539. * Adding context bank as a component device ensures master bind
  540. * (adreno_bind) is called only once component (kgsl-smmu and context
  541. * banks) probe is done thus ensuring correct probe order with smmu
  542. * driver.
  543. *
  544. * kgsl-smmu also need to be a component because we need kgsl-smmu
  545. * device info in order to initialize the context banks.
  546. */
  547. if (of_device_is_compatible(pdev->dev.of_node,
  548. "qcom,smmu-kgsl-cb")) {
  549. return component_add(&pdev->dev, &kgsl_mmu_cb_component_ops);
  550. }
  551. /* Fill out the rest of the devices in the node */
  552. of_platform_populate(pdev->dev.of_node, NULL, NULL, &pdev->dev);
  553. return component_add(&pdev->dev, &kgsl_mmu_component_ops);
  554. }
  555. static int kgsl_mmu_dev_remove(struct platform_device *pdev)
  556. {
  557. if (of_device_is_compatible(pdev->dev.of_node,
  558. "qcom,smmu-kgsl-cb")) {
  559. component_del(&pdev->dev, &kgsl_mmu_cb_component_ops);
  560. return 0;
  561. }
  562. component_del(&pdev->dev, &kgsl_mmu_component_ops);
  563. of_platform_depopulate(&pdev->dev);
  564. return 0;
  565. }
  566. static const struct of_device_id mmu_match_table[] = {
  567. { .compatible = "qcom,kgsl-smmu-v2" },
  568. { .compatible = "qcom,smmu-kgsl-cb" },
  569. {},
  570. };
  571. static struct platform_driver kgsl_mmu_driver = {
  572. .probe = kgsl_mmu_dev_probe,
  573. .remove = kgsl_mmu_dev_remove,
  574. .driver = {
  575. .name = "kgsl-iommu",
  576. .of_match_table = mmu_match_table,
  577. }
  578. };
  579. int __init kgsl_mmu_init(void)
  580. {
  581. return platform_driver_register(&kgsl_mmu_driver);
  582. }
  583. void kgsl_mmu_exit(void)
  584. {
  585. platform_driver_unregister(&kgsl_mmu_driver);
  586. }