sun50i-iommu.c 29 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080
  1. // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
  2. // Copyright (C) 2016-2018, Allwinner Technology CO., LTD.
  3. // Copyright (C) 2019-2020, Cerno
  4. #include <linux/bitfield.h>
  5. #include <linux/bug.h>
  6. #include <linux/clk.h>
  7. #include <linux/device.h>
  8. #include <linux/dma-direction.h>
  9. #include <linux/dma-mapping.h>
  10. #include <linux/err.h>
  11. #include <linux/errno.h>
  12. #include <linux/interrupt.h>
  13. #include <linux/iommu.h>
  14. #include <linux/iopoll.h>
  15. #include <linux/ioport.h>
  16. #include <linux/log2.h>
  17. #include <linux/module.h>
  18. #include <linux/of_platform.h>
  19. #include <linux/platform_device.h>
  20. #include <linux/pm.h>
  21. #include <linux/pm_runtime.h>
  22. #include <linux/reset.h>
  23. #include <linux/sizes.h>
  24. #include <linux/slab.h>
  25. #include <linux/spinlock.h>
  26. #include <linux/types.h>
  27. #define IOMMU_RESET_REG 0x010
  28. #define IOMMU_RESET_RELEASE_ALL 0xffffffff
  29. #define IOMMU_ENABLE_REG 0x020
  30. #define IOMMU_ENABLE_ENABLE BIT(0)
  31. #define IOMMU_BYPASS_REG 0x030
  32. #define IOMMU_AUTO_GATING_REG 0x040
  33. #define IOMMU_AUTO_GATING_ENABLE BIT(0)
  34. #define IOMMU_WBUF_CTRL_REG 0x044
  35. #define IOMMU_OOO_CTRL_REG 0x048
  36. #define IOMMU_4KB_BDY_PRT_CTRL_REG 0x04c
  37. #define IOMMU_TTB_REG 0x050
  38. #define IOMMU_TLB_ENABLE_REG 0x060
  39. #define IOMMU_TLB_PREFETCH_REG 0x070
  40. #define IOMMU_TLB_PREFETCH_MASTER_ENABLE(m) BIT(m)
  41. #define IOMMU_TLB_FLUSH_REG 0x080
  42. #define IOMMU_TLB_FLUSH_PTW_CACHE BIT(17)
  43. #define IOMMU_TLB_FLUSH_MACRO_TLB BIT(16)
  44. #define IOMMU_TLB_FLUSH_MICRO_TLB(i) (BIT(i) & GENMASK(5, 0))
  45. #define IOMMU_TLB_IVLD_ADDR_REG 0x090
  46. #define IOMMU_TLB_IVLD_ADDR_MASK_REG 0x094
  47. #define IOMMU_TLB_IVLD_ENABLE_REG 0x098
  48. #define IOMMU_TLB_IVLD_ENABLE_ENABLE BIT(0)
  49. #define IOMMU_PC_IVLD_ADDR_REG 0x0a0
  50. #define IOMMU_PC_IVLD_ENABLE_REG 0x0a8
  51. #define IOMMU_PC_IVLD_ENABLE_ENABLE BIT(0)
  52. #define IOMMU_DM_AUT_CTRL_REG(d) (0x0b0 + ((d) / 2) * 4)
  53. #define IOMMU_DM_AUT_CTRL_RD_UNAVAIL(d, m) (1 << (((d & 1) * 16) + ((m) * 2)))
  54. #define IOMMU_DM_AUT_CTRL_WR_UNAVAIL(d, m) (1 << (((d & 1) * 16) + ((m) * 2) + 1))
  55. #define IOMMU_DM_AUT_OVWT_REG 0x0d0
  56. #define IOMMU_INT_ENABLE_REG 0x100
  57. #define IOMMU_INT_CLR_REG 0x104
  58. #define IOMMU_INT_STA_REG 0x108
  59. #define IOMMU_INT_ERR_ADDR_REG(i) (0x110 + (i) * 4)
  60. #define IOMMU_INT_ERR_ADDR_L1_REG 0x130
  61. #define IOMMU_INT_ERR_ADDR_L2_REG 0x134
  62. #define IOMMU_INT_ERR_DATA_REG(i) (0x150 + (i) * 4)
  63. #define IOMMU_L1PG_INT_REG 0x0180
  64. #define IOMMU_L2PG_INT_REG 0x0184
  65. #define IOMMU_INT_INVALID_L2PG BIT(17)
  66. #define IOMMU_INT_INVALID_L1PG BIT(16)
  67. #define IOMMU_INT_MASTER_PERMISSION(m) BIT(m)
  68. #define IOMMU_INT_MASTER_MASK (IOMMU_INT_MASTER_PERMISSION(0) | \
  69. IOMMU_INT_MASTER_PERMISSION(1) | \
  70. IOMMU_INT_MASTER_PERMISSION(2) | \
  71. IOMMU_INT_MASTER_PERMISSION(3) | \
  72. IOMMU_INT_MASTER_PERMISSION(4) | \
  73. IOMMU_INT_MASTER_PERMISSION(5))
  74. #define IOMMU_INT_MASK (IOMMU_INT_INVALID_L1PG | \
  75. IOMMU_INT_INVALID_L2PG | \
  76. IOMMU_INT_MASTER_MASK)
  77. #define PT_ENTRY_SIZE sizeof(u32)
  78. #define NUM_DT_ENTRIES 4096
  79. #define DT_SIZE (NUM_DT_ENTRIES * PT_ENTRY_SIZE)
  80. #define NUM_PT_ENTRIES 256
  81. #define PT_SIZE (NUM_PT_ENTRIES * PT_ENTRY_SIZE)
  82. #define SPAGE_SIZE 4096
  83. struct sun50i_iommu {
  84. struct iommu_device iommu;
  85. /* Lock to modify the IOMMU registers */
  86. spinlock_t iommu_lock;
  87. struct device *dev;
  88. void __iomem *base;
  89. struct reset_control *reset;
  90. struct clk *clk;
  91. struct iommu_domain *domain;
  92. struct iommu_group *group;
  93. struct kmem_cache *pt_pool;
  94. };
  95. struct sun50i_iommu_domain {
  96. struct iommu_domain domain;
  97. /* Number of devices attached to the domain */
  98. refcount_t refcnt;
  99. /* L1 Page Table */
  100. u32 *dt;
  101. dma_addr_t dt_dma;
  102. struct sun50i_iommu *iommu;
  103. };
  104. static struct sun50i_iommu_domain *to_sun50i_domain(struct iommu_domain *domain)
  105. {
  106. return container_of(domain, struct sun50i_iommu_domain, domain);
  107. }
  108. static struct sun50i_iommu *sun50i_iommu_from_dev(struct device *dev)
  109. {
  110. return dev_iommu_priv_get(dev);
  111. }
  112. static u32 iommu_read(struct sun50i_iommu *iommu, u32 offset)
  113. {
  114. return readl(iommu->base + offset);
  115. }
  116. static void iommu_write(struct sun50i_iommu *iommu, u32 offset, u32 value)
  117. {
  118. writel(value, iommu->base + offset);
  119. }
  120. /*
  121. * The Allwinner H6 IOMMU uses a 2-level page table.
  122. *
  123. * The first level is the usual Directory Table (DT), that consists of
  124. * 4096 4-bytes Directory Table Entries (DTE), each pointing to a Page
  125. * Table (PT).
  126. *
  127. * Each PT consits of 256 4-bytes Page Table Entries (PTE), each
  128. * pointing to a 4kB page of physical memory.
  129. *
  130. * The IOMMU supports a single DT, pointed by the IOMMU_TTB_REG
  131. * register that contains its physical address.
  132. */
  133. #define SUN50I_IOVA_DTE_MASK GENMASK(31, 20)
  134. #define SUN50I_IOVA_PTE_MASK GENMASK(19, 12)
  135. #define SUN50I_IOVA_PAGE_MASK GENMASK(11, 0)
  136. static u32 sun50i_iova_get_dte_index(dma_addr_t iova)
  137. {
  138. return FIELD_GET(SUN50I_IOVA_DTE_MASK, iova);
  139. }
  140. static u32 sun50i_iova_get_pte_index(dma_addr_t iova)
  141. {
  142. return FIELD_GET(SUN50I_IOVA_PTE_MASK, iova);
  143. }
  144. static u32 sun50i_iova_get_page_offset(dma_addr_t iova)
  145. {
  146. return FIELD_GET(SUN50I_IOVA_PAGE_MASK, iova);
  147. }
  148. /*
  149. * Each Directory Table Entry has a Page Table address and a valid
  150. * bit:
  151. * +---------------------+-----------+-+
  152. * | PT address | Reserved |V|
  153. * +---------------------+-----------+-+
  154. * 31:10 - Page Table address
  155. * 9:2 - Reserved
  156. * 1:0 - 1 if the entry is valid
  157. */
  158. #define SUN50I_DTE_PT_ADDRESS_MASK GENMASK(31, 10)
  159. #define SUN50I_DTE_PT_ATTRS GENMASK(1, 0)
  160. #define SUN50I_DTE_PT_VALID 1
  161. static phys_addr_t sun50i_dte_get_pt_address(u32 dte)
  162. {
  163. return (phys_addr_t)dte & SUN50I_DTE_PT_ADDRESS_MASK;
  164. }
  165. static bool sun50i_dte_is_pt_valid(u32 dte)
  166. {
  167. return (dte & SUN50I_DTE_PT_ATTRS) == SUN50I_DTE_PT_VALID;
  168. }
  169. static u32 sun50i_mk_dte(dma_addr_t pt_dma)
  170. {
  171. return (pt_dma & SUN50I_DTE_PT_ADDRESS_MASK) | SUN50I_DTE_PT_VALID;
  172. }
  173. /*
  174. * Each PTE has a Page address, an authority index and a valid bit:
  175. *
  176. * +----------------+-----+-----+-----+---+-----+
  177. * | Page address | Rsv | ACI | Rsv | V | Rsv |
  178. * +----------------+-----+-----+-----+---+-----+
  179. * 31:12 - Page address
  180. * 11:8 - Reserved
  181. * 7:4 - Authority Control Index
  182. * 3:2 - Reserved
  183. * 1 - 1 if the entry is valid
  184. * 0 - Reserved
  185. *
  186. * The way permissions work is that the IOMMU has 16 "domains" that
  187. * can be configured to give each masters either read or write
  188. * permissions through the IOMMU_DM_AUT_CTRL_REG registers. The domain
  189. * 0 seems like the default domain, and its permissions in the
  190. * IOMMU_DM_AUT_CTRL_REG are only read-only, so it's not really
  191. * useful to enforce any particular permission.
  192. *
  193. * Each page entry will then have a reference to the domain they are
  194. * affected to, so that we can actually enforce them on a per-page
  195. * basis.
  196. *
  197. * In order to make it work with the IOMMU framework, we will be using
  198. * 4 different domains, starting at 1: RD_WR, RD, WR and NONE
  199. * depending on the permission we want to enforce. Each domain will
  200. * have each master setup in the same way, since the IOMMU framework
  201. * doesn't seem to restrict page access on a per-device basis. And
  202. * then we will use the relevant domain index when generating the page
  203. * table entry depending on the permissions we want to be enforced.
  204. */
  205. enum sun50i_iommu_aci {
  206. SUN50I_IOMMU_ACI_DO_NOT_USE = 0,
  207. SUN50I_IOMMU_ACI_NONE,
  208. SUN50I_IOMMU_ACI_RD,
  209. SUN50I_IOMMU_ACI_WR,
  210. SUN50I_IOMMU_ACI_RD_WR,
  211. };
  212. #define SUN50I_PTE_PAGE_ADDRESS_MASK GENMASK(31, 12)
  213. #define SUN50I_PTE_ACI_MASK GENMASK(7, 4)
  214. #define SUN50I_PTE_PAGE_VALID BIT(1)
  215. static phys_addr_t sun50i_pte_get_page_address(u32 pte)
  216. {
  217. return (phys_addr_t)pte & SUN50I_PTE_PAGE_ADDRESS_MASK;
  218. }
  219. static enum sun50i_iommu_aci sun50i_get_pte_aci(u32 pte)
  220. {
  221. return FIELD_GET(SUN50I_PTE_ACI_MASK, pte);
  222. }
  223. static bool sun50i_pte_is_page_valid(u32 pte)
  224. {
  225. return pte & SUN50I_PTE_PAGE_VALID;
  226. }
  227. static u32 sun50i_mk_pte(phys_addr_t page, int prot)
  228. {
  229. enum sun50i_iommu_aci aci;
  230. u32 flags = 0;
  231. if ((prot & (IOMMU_READ | IOMMU_WRITE)) == (IOMMU_READ | IOMMU_WRITE))
  232. aci = SUN50I_IOMMU_ACI_RD_WR;
  233. else if (prot & IOMMU_READ)
  234. aci = SUN50I_IOMMU_ACI_RD;
  235. else if (prot & IOMMU_WRITE)
  236. aci = SUN50I_IOMMU_ACI_WR;
  237. else
  238. aci = SUN50I_IOMMU_ACI_NONE;
  239. flags |= FIELD_PREP(SUN50I_PTE_ACI_MASK, aci);
  240. page &= SUN50I_PTE_PAGE_ADDRESS_MASK;
  241. return page | flags | SUN50I_PTE_PAGE_VALID;
  242. }
  243. static void sun50i_table_flush(struct sun50i_iommu_domain *sun50i_domain,
  244. void *vaddr, unsigned int count)
  245. {
  246. struct sun50i_iommu *iommu = sun50i_domain->iommu;
  247. dma_addr_t dma = virt_to_phys(vaddr);
  248. size_t size = count * PT_ENTRY_SIZE;
  249. dma_sync_single_for_device(iommu->dev, dma, size, DMA_TO_DEVICE);
  250. }
  251. static void sun50i_iommu_zap_iova(struct sun50i_iommu *iommu,
  252. unsigned long iova)
  253. {
  254. u32 reg;
  255. int ret;
  256. iommu_write(iommu, IOMMU_TLB_IVLD_ADDR_REG, iova);
  257. iommu_write(iommu, IOMMU_TLB_IVLD_ADDR_MASK_REG, GENMASK(31, 12));
  258. iommu_write(iommu, IOMMU_TLB_IVLD_ENABLE_REG,
  259. IOMMU_TLB_IVLD_ENABLE_ENABLE);
  260. ret = readl_poll_timeout_atomic(iommu->base + IOMMU_TLB_IVLD_ENABLE_REG,
  261. reg, !reg, 1, 2000);
  262. if (ret)
  263. dev_warn(iommu->dev, "TLB invalidation timed out!\n");
  264. }
  265. static void sun50i_iommu_zap_ptw_cache(struct sun50i_iommu *iommu,
  266. unsigned long iova)
  267. {
  268. u32 reg;
  269. int ret;
  270. iommu_write(iommu, IOMMU_PC_IVLD_ADDR_REG, iova);
  271. iommu_write(iommu, IOMMU_PC_IVLD_ENABLE_REG,
  272. IOMMU_PC_IVLD_ENABLE_ENABLE);
  273. ret = readl_poll_timeout_atomic(iommu->base + IOMMU_PC_IVLD_ENABLE_REG,
  274. reg, !reg, 1, 2000);
  275. if (ret)
  276. dev_warn(iommu->dev, "PTW cache invalidation timed out!\n");
  277. }
  278. static void sun50i_iommu_zap_range(struct sun50i_iommu *iommu,
  279. unsigned long iova, size_t size)
  280. {
  281. assert_spin_locked(&iommu->iommu_lock);
  282. iommu_write(iommu, IOMMU_AUTO_GATING_REG, 0);
  283. sun50i_iommu_zap_iova(iommu, iova);
  284. sun50i_iommu_zap_iova(iommu, iova + SPAGE_SIZE);
  285. if (size > SPAGE_SIZE) {
  286. sun50i_iommu_zap_iova(iommu, iova + size);
  287. sun50i_iommu_zap_iova(iommu, iova + size + SPAGE_SIZE);
  288. }
  289. sun50i_iommu_zap_ptw_cache(iommu, iova);
  290. sun50i_iommu_zap_ptw_cache(iommu, iova + SZ_1M);
  291. if (size > SZ_1M) {
  292. sun50i_iommu_zap_ptw_cache(iommu, iova + size);
  293. sun50i_iommu_zap_ptw_cache(iommu, iova + size + SZ_1M);
  294. }
  295. iommu_write(iommu, IOMMU_AUTO_GATING_REG, IOMMU_AUTO_GATING_ENABLE);
  296. }
  297. static int sun50i_iommu_flush_all_tlb(struct sun50i_iommu *iommu)
  298. {
  299. u32 reg;
  300. int ret;
  301. assert_spin_locked(&iommu->iommu_lock);
  302. iommu_write(iommu,
  303. IOMMU_TLB_FLUSH_REG,
  304. IOMMU_TLB_FLUSH_PTW_CACHE |
  305. IOMMU_TLB_FLUSH_MACRO_TLB |
  306. IOMMU_TLB_FLUSH_MICRO_TLB(5) |
  307. IOMMU_TLB_FLUSH_MICRO_TLB(4) |
  308. IOMMU_TLB_FLUSH_MICRO_TLB(3) |
  309. IOMMU_TLB_FLUSH_MICRO_TLB(2) |
  310. IOMMU_TLB_FLUSH_MICRO_TLB(1) |
  311. IOMMU_TLB_FLUSH_MICRO_TLB(0));
  312. ret = readl_poll_timeout_atomic(iommu->base + IOMMU_TLB_FLUSH_REG,
  313. reg, !reg,
  314. 1, 2000);
  315. if (ret)
  316. dev_warn(iommu->dev, "TLB Flush timed out!\n");
  317. return ret;
  318. }
  319. static void sun50i_iommu_flush_iotlb_all(struct iommu_domain *domain)
  320. {
  321. struct sun50i_iommu_domain *sun50i_domain = to_sun50i_domain(domain);
  322. struct sun50i_iommu *iommu = sun50i_domain->iommu;
  323. unsigned long flags;
  324. /*
  325. * At boot, we'll have a first call into .flush_iotlb_all right after
  326. * .probe_device, and since we link our (single) domain to our iommu in
  327. * the .attach_device callback, we don't have that pointer set.
  328. *
  329. * It shouldn't really be any trouble to ignore it though since we flush
  330. * all caches as part of the device powerup.
  331. */
  332. if (!iommu)
  333. return;
  334. spin_lock_irqsave(&iommu->iommu_lock, flags);
  335. sun50i_iommu_flush_all_tlb(iommu);
  336. spin_unlock_irqrestore(&iommu->iommu_lock, flags);
  337. }
  338. static void sun50i_iommu_iotlb_sync_map(struct iommu_domain *domain,
  339. unsigned long iova, size_t size)
  340. {
  341. struct sun50i_iommu_domain *sun50i_domain = to_sun50i_domain(domain);
  342. struct sun50i_iommu *iommu = sun50i_domain->iommu;
  343. unsigned long flags;
  344. spin_lock_irqsave(&iommu->iommu_lock, flags);
  345. sun50i_iommu_zap_range(iommu, iova, size);
  346. spin_unlock_irqrestore(&iommu->iommu_lock, flags);
  347. }
  348. static void sun50i_iommu_iotlb_sync(struct iommu_domain *domain,
  349. struct iommu_iotlb_gather *gather)
  350. {
  351. sun50i_iommu_flush_iotlb_all(domain);
  352. }
  353. static int sun50i_iommu_enable(struct sun50i_iommu *iommu)
  354. {
  355. struct sun50i_iommu_domain *sun50i_domain;
  356. unsigned long flags;
  357. int ret;
  358. if (!iommu->domain)
  359. return 0;
  360. sun50i_domain = to_sun50i_domain(iommu->domain);
  361. ret = reset_control_deassert(iommu->reset);
  362. if (ret)
  363. return ret;
  364. ret = clk_prepare_enable(iommu->clk);
  365. if (ret)
  366. goto err_reset_assert;
  367. spin_lock_irqsave(&iommu->iommu_lock, flags);
  368. iommu_write(iommu, IOMMU_TTB_REG, sun50i_domain->dt_dma);
  369. iommu_write(iommu, IOMMU_TLB_PREFETCH_REG,
  370. IOMMU_TLB_PREFETCH_MASTER_ENABLE(0) |
  371. IOMMU_TLB_PREFETCH_MASTER_ENABLE(1) |
  372. IOMMU_TLB_PREFETCH_MASTER_ENABLE(2) |
  373. IOMMU_TLB_PREFETCH_MASTER_ENABLE(3) |
  374. IOMMU_TLB_PREFETCH_MASTER_ENABLE(4) |
  375. IOMMU_TLB_PREFETCH_MASTER_ENABLE(5));
  376. iommu_write(iommu, IOMMU_INT_ENABLE_REG, IOMMU_INT_MASK);
  377. iommu_write(iommu, IOMMU_DM_AUT_CTRL_REG(SUN50I_IOMMU_ACI_NONE),
  378. IOMMU_DM_AUT_CTRL_RD_UNAVAIL(SUN50I_IOMMU_ACI_NONE, 0) |
  379. IOMMU_DM_AUT_CTRL_WR_UNAVAIL(SUN50I_IOMMU_ACI_NONE, 0) |
  380. IOMMU_DM_AUT_CTRL_RD_UNAVAIL(SUN50I_IOMMU_ACI_NONE, 1) |
  381. IOMMU_DM_AUT_CTRL_WR_UNAVAIL(SUN50I_IOMMU_ACI_NONE, 1) |
  382. IOMMU_DM_AUT_CTRL_RD_UNAVAIL(SUN50I_IOMMU_ACI_NONE, 2) |
  383. IOMMU_DM_AUT_CTRL_WR_UNAVAIL(SUN50I_IOMMU_ACI_NONE, 2) |
  384. IOMMU_DM_AUT_CTRL_RD_UNAVAIL(SUN50I_IOMMU_ACI_NONE, 3) |
  385. IOMMU_DM_AUT_CTRL_WR_UNAVAIL(SUN50I_IOMMU_ACI_NONE, 3) |
  386. IOMMU_DM_AUT_CTRL_RD_UNAVAIL(SUN50I_IOMMU_ACI_NONE, 4) |
  387. IOMMU_DM_AUT_CTRL_WR_UNAVAIL(SUN50I_IOMMU_ACI_NONE, 4) |
  388. IOMMU_DM_AUT_CTRL_RD_UNAVAIL(SUN50I_IOMMU_ACI_NONE, 5) |
  389. IOMMU_DM_AUT_CTRL_WR_UNAVAIL(SUN50I_IOMMU_ACI_NONE, 5));
  390. iommu_write(iommu, IOMMU_DM_AUT_CTRL_REG(SUN50I_IOMMU_ACI_RD),
  391. IOMMU_DM_AUT_CTRL_WR_UNAVAIL(SUN50I_IOMMU_ACI_RD, 0) |
  392. IOMMU_DM_AUT_CTRL_WR_UNAVAIL(SUN50I_IOMMU_ACI_RD, 1) |
  393. IOMMU_DM_AUT_CTRL_WR_UNAVAIL(SUN50I_IOMMU_ACI_RD, 2) |
  394. IOMMU_DM_AUT_CTRL_WR_UNAVAIL(SUN50I_IOMMU_ACI_RD, 3) |
  395. IOMMU_DM_AUT_CTRL_WR_UNAVAIL(SUN50I_IOMMU_ACI_RD, 4) |
  396. IOMMU_DM_AUT_CTRL_WR_UNAVAIL(SUN50I_IOMMU_ACI_RD, 5));
  397. iommu_write(iommu, IOMMU_DM_AUT_CTRL_REG(SUN50I_IOMMU_ACI_WR),
  398. IOMMU_DM_AUT_CTRL_RD_UNAVAIL(SUN50I_IOMMU_ACI_WR, 0) |
  399. IOMMU_DM_AUT_CTRL_RD_UNAVAIL(SUN50I_IOMMU_ACI_WR, 1) |
  400. IOMMU_DM_AUT_CTRL_RD_UNAVAIL(SUN50I_IOMMU_ACI_WR, 2) |
  401. IOMMU_DM_AUT_CTRL_RD_UNAVAIL(SUN50I_IOMMU_ACI_WR, 3) |
  402. IOMMU_DM_AUT_CTRL_RD_UNAVAIL(SUN50I_IOMMU_ACI_WR, 4) |
  403. IOMMU_DM_AUT_CTRL_RD_UNAVAIL(SUN50I_IOMMU_ACI_WR, 5));
  404. ret = sun50i_iommu_flush_all_tlb(iommu);
  405. if (ret) {
  406. spin_unlock_irqrestore(&iommu->iommu_lock, flags);
  407. goto err_clk_disable;
  408. }
  409. iommu_write(iommu, IOMMU_AUTO_GATING_REG, IOMMU_AUTO_GATING_ENABLE);
  410. iommu_write(iommu, IOMMU_ENABLE_REG, IOMMU_ENABLE_ENABLE);
  411. spin_unlock_irqrestore(&iommu->iommu_lock, flags);
  412. return 0;
  413. err_clk_disable:
  414. clk_disable_unprepare(iommu->clk);
  415. err_reset_assert:
  416. reset_control_assert(iommu->reset);
  417. return ret;
  418. }
  419. static void sun50i_iommu_disable(struct sun50i_iommu *iommu)
  420. {
  421. unsigned long flags;
  422. spin_lock_irqsave(&iommu->iommu_lock, flags);
  423. iommu_write(iommu, IOMMU_ENABLE_REG, 0);
  424. iommu_write(iommu, IOMMU_TTB_REG, 0);
  425. spin_unlock_irqrestore(&iommu->iommu_lock, flags);
  426. clk_disable_unprepare(iommu->clk);
  427. reset_control_assert(iommu->reset);
  428. }
  429. static void *sun50i_iommu_alloc_page_table(struct sun50i_iommu *iommu,
  430. gfp_t gfp)
  431. {
  432. dma_addr_t pt_dma;
  433. u32 *page_table;
  434. page_table = kmem_cache_zalloc(iommu->pt_pool, gfp);
  435. if (!page_table)
  436. return ERR_PTR(-ENOMEM);
  437. pt_dma = dma_map_single(iommu->dev, page_table, PT_SIZE, DMA_TO_DEVICE);
  438. if (dma_mapping_error(iommu->dev, pt_dma)) {
  439. dev_err(iommu->dev, "Couldn't map L2 Page Table\n");
  440. kmem_cache_free(iommu->pt_pool, page_table);
  441. return ERR_PTR(-ENOMEM);
  442. }
  443. /* We rely on the physical address and DMA address being the same */
  444. WARN_ON(pt_dma != virt_to_phys(page_table));
  445. return page_table;
  446. }
  447. static void sun50i_iommu_free_page_table(struct sun50i_iommu *iommu,
  448. u32 *page_table)
  449. {
  450. phys_addr_t pt_phys = virt_to_phys(page_table);
  451. dma_unmap_single(iommu->dev, pt_phys, PT_SIZE, DMA_TO_DEVICE);
  452. kmem_cache_free(iommu->pt_pool, page_table);
  453. }
  454. static u32 *sun50i_dte_get_page_table(struct sun50i_iommu_domain *sun50i_domain,
  455. dma_addr_t iova, gfp_t gfp)
  456. {
  457. struct sun50i_iommu *iommu = sun50i_domain->iommu;
  458. u32 *page_table;
  459. u32 *dte_addr;
  460. u32 old_dte;
  461. u32 dte;
  462. dte_addr = &sun50i_domain->dt[sun50i_iova_get_dte_index(iova)];
  463. dte = *dte_addr;
  464. if (sun50i_dte_is_pt_valid(dte)) {
  465. phys_addr_t pt_phys = sun50i_dte_get_pt_address(dte);
  466. return (u32 *)phys_to_virt(pt_phys);
  467. }
  468. page_table = sun50i_iommu_alloc_page_table(iommu, gfp);
  469. if (IS_ERR(page_table))
  470. return page_table;
  471. dte = sun50i_mk_dte(virt_to_phys(page_table));
  472. old_dte = cmpxchg(dte_addr, 0, dte);
  473. if (old_dte) {
  474. phys_addr_t installed_pt_phys =
  475. sun50i_dte_get_pt_address(old_dte);
  476. u32 *installed_pt = phys_to_virt(installed_pt_phys);
  477. u32 *drop_pt = page_table;
  478. page_table = installed_pt;
  479. dte = old_dte;
  480. sun50i_iommu_free_page_table(iommu, drop_pt);
  481. }
  482. sun50i_table_flush(sun50i_domain, page_table, NUM_PT_ENTRIES);
  483. sun50i_table_flush(sun50i_domain, dte_addr, 1);
  484. return page_table;
  485. }
  486. static int sun50i_iommu_map(struct iommu_domain *domain, unsigned long iova,
  487. phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
  488. {
  489. struct sun50i_iommu_domain *sun50i_domain = to_sun50i_domain(domain);
  490. struct sun50i_iommu *iommu = sun50i_domain->iommu;
  491. u32 pte_index;
  492. u32 *page_table, *pte_addr;
  493. int ret = 0;
  494. page_table = sun50i_dte_get_page_table(sun50i_domain, iova, gfp);
  495. if (IS_ERR(page_table)) {
  496. ret = PTR_ERR(page_table);
  497. goto out;
  498. }
  499. pte_index = sun50i_iova_get_pte_index(iova);
  500. pte_addr = &page_table[pte_index];
  501. if (unlikely(sun50i_pte_is_page_valid(*pte_addr))) {
  502. phys_addr_t page_phys = sun50i_pte_get_page_address(*pte_addr);
  503. dev_err(iommu->dev,
  504. "iova %pad already mapped to %pa cannot remap to %pa prot: %#x\n",
  505. &iova, &page_phys, &paddr, prot);
  506. ret = -EBUSY;
  507. goto out;
  508. }
  509. *pte_addr = sun50i_mk_pte(paddr, prot);
  510. sun50i_table_flush(sun50i_domain, pte_addr, 1);
  511. out:
  512. return ret;
  513. }
  514. static size_t sun50i_iommu_unmap(struct iommu_domain *domain, unsigned long iova,
  515. size_t size, struct iommu_iotlb_gather *gather)
  516. {
  517. struct sun50i_iommu_domain *sun50i_domain = to_sun50i_domain(domain);
  518. phys_addr_t pt_phys;
  519. u32 *pte_addr;
  520. u32 dte;
  521. dte = sun50i_domain->dt[sun50i_iova_get_dte_index(iova)];
  522. if (!sun50i_dte_is_pt_valid(dte))
  523. return 0;
  524. pt_phys = sun50i_dte_get_pt_address(dte);
  525. pte_addr = (u32 *)phys_to_virt(pt_phys) + sun50i_iova_get_pte_index(iova);
  526. if (!sun50i_pte_is_page_valid(*pte_addr))
  527. return 0;
  528. memset(pte_addr, 0, sizeof(*pte_addr));
  529. sun50i_table_flush(sun50i_domain, pte_addr, 1);
  530. return SZ_4K;
  531. }
  532. static phys_addr_t sun50i_iommu_iova_to_phys(struct iommu_domain *domain,
  533. dma_addr_t iova)
  534. {
  535. struct sun50i_iommu_domain *sun50i_domain = to_sun50i_domain(domain);
  536. phys_addr_t pt_phys;
  537. u32 *page_table;
  538. u32 dte, pte;
  539. dte = sun50i_domain->dt[sun50i_iova_get_dte_index(iova)];
  540. if (!sun50i_dte_is_pt_valid(dte))
  541. return 0;
  542. pt_phys = sun50i_dte_get_pt_address(dte);
  543. page_table = (u32 *)phys_to_virt(pt_phys);
  544. pte = page_table[sun50i_iova_get_pte_index(iova)];
  545. if (!sun50i_pte_is_page_valid(pte))
  546. return 0;
  547. return sun50i_pte_get_page_address(pte) +
  548. sun50i_iova_get_page_offset(iova);
  549. }
  550. static struct iommu_domain *sun50i_iommu_domain_alloc(unsigned type)
  551. {
  552. struct sun50i_iommu_domain *sun50i_domain;
  553. if (type != IOMMU_DOMAIN_DMA &&
  554. type != IOMMU_DOMAIN_UNMANAGED)
  555. return NULL;
  556. sun50i_domain = kzalloc(sizeof(*sun50i_domain), GFP_KERNEL);
  557. if (!sun50i_domain)
  558. return NULL;
  559. sun50i_domain->dt = (u32 *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
  560. get_order(DT_SIZE));
  561. if (!sun50i_domain->dt)
  562. goto err_free_domain;
  563. refcount_set(&sun50i_domain->refcnt, 1);
  564. sun50i_domain->domain.geometry.aperture_start = 0;
  565. sun50i_domain->domain.geometry.aperture_end = DMA_BIT_MASK(32);
  566. sun50i_domain->domain.geometry.force_aperture = true;
  567. return &sun50i_domain->domain;
  568. err_free_domain:
  569. kfree(sun50i_domain);
  570. return NULL;
  571. }
  572. static void sun50i_iommu_domain_free(struct iommu_domain *domain)
  573. {
  574. struct sun50i_iommu_domain *sun50i_domain = to_sun50i_domain(domain);
  575. free_pages((unsigned long)sun50i_domain->dt, get_order(DT_SIZE));
  576. sun50i_domain->dt = NULL;
  577. kfree(sun50i_domain);
  578. }
  579. static int sun50i_iommu_attach_domain(struct sun50i_iommu *iommu,
  580. struct sun50i_iommu_domain *sun50i_domain)
  581. {
  582. iommu->domain = &sun50i_domain->domain;
  583. sun50i_domain->iommu = iommu;
  584. sun50i_domain->dt_dma = dma_map_single(iommu->dev, sun50i_domain->dt,
  585. DT_SIZE, DMA_TO_DEVICE);
  586. if (dma_mapping_error(iommu->dev, sun50i_domain->dt_dma)) {
  587. dev_err(iommu->dev, "Couldn't map L1 Page Table\n");
  588. return -ENOMEM;
  589. }
  590. return sun50i_iommu_enable(iommu);
  591. }
  592. static void sun50i_iommu_detach_domain(struct sun50i_iommu *iommu,
  593. struct sun50i_iommu_domain *sun50i_domain)
  594. {
  595. unsigned int i;
  596. for (i = 0; i < NUM_DT_ENTRIES; i++) {
  597. phys_addr_t pt_phys;
  598. u32 *page_table;
  599. u32 *dte_addr;
  600. u32 dte;
  601. dte_addr = &sun50i_domain->dt[i];
  602. dte = *dte_addr;
  603. if (!sun50i_dte_is_pt_valid(dte))
  604. continue;
  605. memset(dte_addr, 0, sizeof(*dte_addr));
  606. sun50i_table_flush(sun50i_domain, dte_addr, 1);
  607. pt_phys = sun50i_dte_get_pt_address(dte);
  608. page_table = phys_to_virt(pt_phys);
  609. sun50i_iommu_free_page_table(iommu, page_table);
  610. }
  611. sun50i_iommu_disable(iommu);
  612. dma_unmap_single(iommu->dev, virt_to_phys(sun50i_domain->dt),
  613. DT_SIZE, DMA_TO_DEVICE);
  614. iommu->domain = NULL;
  615. }
  616. static void sun50i_iommu_detach_device(struct iommu_domain *domain,
  617. struct device *dev)
  618. {
  619. struct sun50i_iommu_domain *sun50i_domain = to_sun50i_domain(domain);
  620. struct sun50i_iommu *iommu = dev_iommu_priv_get(dev);
  621. dev_dbg(dev, "Detaching from IOMMU domain\n");
  622. if (iommu->domain != domain)
  623. return;
  624. if (refcount_dec_and_test(&sun50i_domain->refcnt))
  625. sun50i_iommu_detach_domain(iommu, sun50i_domain);
  626. }
  627. static int sun50i_iommu_attach_device(struct iommu_domain *domain,
  628. struct device *dev)
  629. {
  630. struct sun50i_iommu_domain *sun50i_domain = to_sun50i_domain(domain);
  631. struct sun50i_iommu *iommu;
  632. iommu = sun50i_iommu_from_dev(dev);
  633. if (!iommu)
  634. return -ENODEV;
  635. dev_dbg(dev, "Attaching to IOMMU domain\n");
  636. refcount_inc(&sun50i_domain->refcnt);
  637. if (iommu->domain == domain)
  638. return 0;
  639. if (iommu->domain)
  640. sun50i_iommu_detach_device(iommu->domain, dev);
  641. sun50i_iommu_attach_domain(iommu, sun50i_domain);
  642. return 0;
  643. }
  644. static struct iommu_device *sun50i_iommu_probe_device(struct device *dev)
  645. {
  646. struct sun50i_iommu *iommu;
  647. iommu = sun50i_iommu_from_dev(dev);
  648. if (!iommu)
  649. return ERR_PTR(-ENODEV);
  650. return &iommu->iommu;
  651. }
  652. static struct iommu_group *sun50i_iommu_device_group(struct device *dev)
  653. {
  654. struct sun50i_iommu *iommu = sun50i_iommu_from_dev(dev);
  655. return iommu_group_ref_get(iommu->group);
  656. }
  657. static int sun50i_iommu_of_xlate(struct device *dev,
  658. struct of_phandle_args *args)
  659. {
  660. struct platform_device *iommu_pdev = of_find_device_by_node(args->np);
  661. unsigned id = args->args[0];
  662. dev_iommu_priv_set(dev, platform_get_drvdata(iommu_pdev));
  663. return iommu_fwspec_add_ids(dev, &id, 1);
  664. }
  665. static const struct iommu_ops sun50i_iommu_ops = {
  666. .pgsize_bitmap = SZ_4K,
  667. .device_group = sun50i_iommu_device_group,
  668. .domain_alloc = sun50i_iommu_domain_alloc,
  669. .of_xlate = sun50i_iommu_of_xlate,
  670. .probe_device = sun50i_iommu_probe_device,
  671. .default_domain_ops = &(const struct iommu_domain_ops) {
  672. .attach_dev = sun50i_iommu_attach_device,
  673. .detach_dev = sun50i_iommu_detach_device,
  674. .flush_iotlb_all = sun50i_iommu_flush_iotlb_all,
  675. .iotlb_sync_map = sun50i_iommu_iotlb_sync_map,
  676. .iotlb_sync = sun50i_iommu_iotlb_sync,
  677. .iova_to_phys = sun50i_iommu_iova_to_phys,
  678. .map = sun50i_iommu_map,
  679. .unmap = sun50i_iommu_unmap,
  680. .free = sun50i_iommu_domain_free,
  681. }
  682. };
  683. static void sun50i_iommu_report_fault(struct sun50i_iommu *iommu,
  684. unsigned master, phys_addr_t iova,
  685. unsigned prot)
  686. {
  687. dev_err(iommu->dev, "Page fault for %pad (master %d, dir %s)\n",
  688. &iova, master, (prot == IOMMU_FAULT_WRITE) ? "wr" : "rd");
  689. if (iommu->domain)
  690. report_iommu_fault(iommu->domain, iommu->dev, iova, prot);
  691. else
  692. dev_err(iommu->dev, "Page fault while iommu not attached to any domain?\n");
  693. sun50i_iommu_zap_range(iommu, iova, SPAGE_SIZE);
  694. }
  695. static phys_addr_t sun50i_iommu_handle_pt_irq(struct sun50i_iommu *iommu,
  696. unsigned addr_reg,
  697. unsigned blame_reg)
  698. {
  699. phys_addr_t iova;
  700. unsigned master;
  701. u32 blame;
  702. assert_spin_locked(&iommu->iommu_lock);
  703. iova = iommu_read(iommu, addr_reg);
  704. blame = iommu_read(iommu, blame_reg);
  705. master = ilog2(blame & IOMMU_INT_MASTER_MASK);
  706. /*
  707. * If the address is not in the page table, we can't get what
  708. * operation triggered the fault. Assume it's a read
  709. * operation.
  710. */
  711. sun50i_iommu_report_fault(iommu, master, iova, IOMMU_FAULT_READ);
  712. return iova;
  713. }
  714. static phys_addr_t sun50i_iommu_handle_perm_irq(struct sun50i_iommu *iommu)
  715. {
  716. enum sun50i_iommu_aci aci;
  717. phys_addr_t iova;
  718. unsigned master;
  719. unsigned dir;
  720. u32 blame;
  721. assert_spin_locked(&iommu->iommu_lock);
  722. blame = iommu_read(iommu, IOMMU_INT_STA_REG);
  723. master = ilog2(blame & IOMMU_INT_MASTER_MASK);
  724. iova = iommu_read(iommu, IOMMU_INT_ERR_ADDR_REG(master));
  725. aci = sun50i_get_pte_aci(iommu_read(iommu,
  726. IOMMU_INT_ERR_DATA_REG(master)));
  727. switch (aci) {
  728. /*
  729. * If we are in the read-only domain, then it means we
  730. * tried to write.
  731. */
  732. case SUN50I_IOMMU_ACI_RD:
  733. dir = IOMMU_FAULT_WRITE;
  734. break;
  735. /*
  736. * If we are in the write-only domain, then it means
  737. * we tried to read.
  738. */
  739. case SUN50I_IOMMU_ACI_WR:
  740. /*
  741. * If we are in the domain without any permission, we
  742. * can't really tell. Let's default to a read
  743. * operation.
  744. */
  745. case SUN50I_IOMMU_ACI_NONE:
  746. /* WTF? */
  747. case SUN50I_IOMMU_ACI_RD_WR:
  748. default:
  749. dir = IOMMU_FAULT_READ;
  750. break;
  751. }
  752. /*
  753. * If the address is not in the page table, we can't get what
  754. * operation triggered the fault. Assume it's a read
  755. * operation.
  756. */
  757. sun50i_iommu_report_fault(iommu, master, iova, dir);
  758. return iova;
  759. }
  760. static irqreturn_t sun50i_iommu_irq(int irq, void *dev_id)
  761. {
  762. u32 status, l1_status, l2_status, resets;
  763. struct sun50i_iommu *iommu = dev_id;
  764. spin_lock(&iommu->iommu_lock);
  765. status = iommu_read(iommu, IOMMU_INT_STA_REG);
  766. if (!(status & IOMMU_INT_MASK)) {
  767. spin_unlock(&iommu->iommu_lock);
  768. return IRQ_NONE;
  769. }
  770. l1_status = iommu_read(iommu, IOMMU_L1PG_INT_REG);
  771. l2_status = iommu_read(iommu, IOMMU_L2PG_INT_REG);
  772. if (status & IOMMU_INT_INVALID_L2PG)
  773. sun50i_iommu_handle_pt_irq(iommu,
  774. IOMMU_INT_ERR_ADDR_L2_REG,
  775. IOMMU_L2PG_INT_REG);
  776. else if (status & IOMMU_INT_INVALID_L1PG)
  777. sun50i_iommu_handle_pt_irq(iommu,
  778. IOMMU_INT_ERR_ADDR_L1_REG,
  779. IOMMU_L1PG_INT_REG);
  780. else
  781. sun50i_iommu_handle_perm_irq(iommu);
  782. iommu_write(iommu, IOMMU_INT_CLR_REG, status);
  783. resets = (status | l1_status | l2_status) & IOMMU_INT_MASTER_MASK;
  784. iommu_write(iommu, IOMMU_RESET_REG, ~resets);
  785. iommu_write(iommu, IOMMU_RESET_REG, IOMMU_RESET_RELEASE_ALL);
  786. spin_unlock(&iommu->iommu_lock);
  787. return IRQ_HANDLED;
  788. }
  789. static int sun50i_iommu_probe(struct platform_device *pdev)
  790. {
  791. struct sun50i_iommu *iommu;
  792. int ret, irq;
  793. iommu = devm_kzalloc(&pdev->dev, sizeof(*iommu), GFP_KERNEL);
  794. if (!iommu)
  795. return -ENOMEM;
  796. spin_lock_init(&iommu->iommu_lock);
  797. platform_set_drvdata(pdev, iommu);
  798. iommu->dev = &pdev->dev;
  799. iommu->pt_pool = kmem_cache_create(dev_name(&pdev->dev),
  800. PT_SIZE, PT_SIZE,
  801. SLAB_HWCACHE_ALIGN,
  802. NULL);
  803. if (!iommu->pt_pool)
  804. return -ENOMEM;
  805. iommu->group = iommu_group_alloc();
  806. if (IS_ERR(iommu->group)) {
  807. ret = PTR_ERR(iommu->group);
  808. goto err_free_cache;
  809. }
  810. iommu->base = devm_platform_ioremap_resource(pdev, 0);
  811. if (IS_ERR(iommu->base)) {
  812. ret = PTR_ERR(iommu->base);
  813. goto err_free_group;
  814. }
  815. irq = platform_get_irq(pdev, 0);
  816. if (irq < 0) {
  817. ret = irq;
  818. goto err_free_group;
  819. }
  820. iommu->clk = devm_clk_get(&pdev->dev, NULL);
  821. if (IS_ERR(iommu->clk)) {
  822. dev_err(&pdev->dev, "Couldn't get our clock.\n");
  823. ret = PTR_ERR(iommu->clk);
  824. goto err_free_group;
  825. }
  826. iommu->reset = devm_reset_control_get(&pdev->dev, NULL);
  827. if (IS_ERR(iommu->reset)) {
  828. dev_err(&pdev->dev, "Couldn't get our reset line.\n");
  829. ret = PTR_ERR(iommu->reset);
  830. goto err_free_group;
  831. }
  832. ret = iommu_device_sysfs_add(&iommu->iommu, &pdev->dev,
  833. NULL, dev_name(&pdev->dev));
  834. if (ret)
  835. goto err_free_group;
  836. ret = iommu_device_register(&iommu->iommu, &sun50i_iommu_ops, &pdev->dev);
  837. if (ret)
  838. goto err_remove_sysfs;
  839. ret = devm_request_irq(&pdev->dev, irq, sun50i_iommu_irq, 0,
  840. dev_name(&pdev->dev), iommu);
  841. if (ret < 0)
  842. goto err_unregister;
  843. return 0;
  844. err_unregister:
  845. iommu_device_unregister(&iommu->iommu);
  846. err_remove_sysfs:
  847. iommu_device_sysfs_remove(&iommu->iommu);
  848. err_free_group:
  849. iommu_group_put(iommu->group);
  850. err_free_cache:
  851. kmem_cache_destroy(iommu->pt_pool);
  852. return ret;
  853. }
  854. static const struct of_device_id sun50i_iommu_dt[] = {
  855. { .compatible = "allwinner,sun50i-h6-iommu", },
  856. { /* sentinel */ },
  857. };
  858. MODULE_DEVICE_TABLE(of, sun50i_iommu_dt);
  859. static struct platform_driver sun50i_iommu_driver = {
  860. .driver = {
  861. .name = "sun50i-iommu",
  862. .of_match_table = sun50i_iommu_dt,
  863. .suppress_bind_attrs = true,
  864. }
  865. };
  866. builtin_platform_driver_probe(sun50i_iommu_driver, sun50i_iommu_probe);
  867. MODULE_DESCRIPTION("Allwinner H6 IOMMU driver");
  868. MODULE_AUTHOR("Maxime Ripard <[email protected]>");
  869. MODULE_AUTHOR("zhuxianbin <[email protected]>");
  870. MODULE_LICENSE("Dual BSD/GPL");