msm_iommu.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
  3. *
  4. * Author: Stepan Moskovchenko <[email protected]>
  5. */
  6. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  7. #include <linux/kernel.h>
  8. #include <linux/init.h>
  9. #include <linux/platform_device.h>
  10. #include <linux/errno.h>
  11. #include <linux/io.h>
  12. #include <linux/io-pgtable.h>
  13. #include <linux/interrupt.h>
  14. #include <linux/list.h>
  15. #include <linux/spinlock.h>
  16. #include <linux/slab.h>
  17. #include <linux/iommu.h>
  18. #include <linux/clk.h>
  19. #include <linux/err.h>
  20. #include <asm/cacheflush.h>
  21. #include <linux/sizes.h>
  22. #include "msm_iommu_hw-8xxx.h"
  23. #include "msm_iommu.h"
  24. #define MRC(reg, processor, op1, crn, crm, op2) \
  25. __asm__ __volatile__ ( \
  26. " mrc " #processor "," #op1 ", %0," #crn "," #crm "," #op2 "\n" \
  27. : "=r" (reg))
  28. /* bitmap of the page sizes currently supported */
  29. #define MSM_IOMMU_PGSIZES (SZ_4K | SZ_64K | SZ_1M | SZ_16M)
  30. static DEFINE_SPINLOCK(msm_iommu_lock);
  31. static LIST_HEAD(qcom_iommu_devices);
  32. static struct iommu_ops msm_iommu_ops;
  33. struct msm_priv {
  34. struct list_head list_attached;
  35. struct iommu_domain domain;
  36. struct io_pgtable_cfg cfg;
  37. struct io_pgtable_ops *iop;
  38. struct device *dev;
  39. spinlock_t pgtlock; /* pagetable lock */
  40. };
  41. static struct msm_priv *to_msm_priv(struct iommu_domain *dom)
  42. {
  43. return container_of(dom, struct msm_priv, domain);
  44. }
  45. static int __enable_clocks(struct msm_iommu_dev *iommu)
  46. {
  47. int ret;
  48. ret = clk_enable(iommu->pclk);
  49. if (ret)
  50. goto fail;
  51. if (iommu->clk) {
  52. ret = clk_enable(iommu->clk);
  53. if (ret)
  54. clk_disable(iommu->pclk);
  55. }
  56. fail:
  57. return ret;
  58. }
  59. static void __disable_clocks(struct msm_iommu_dev *iommu)
  60. {
  61. if (iommu->clk)
  62. clk_disable(iommu->clk);
  63. clk_disable(iommu->pclk);
  64. }
  65. static void msm_iommu_reset(void __iomem *base, int ncb)
  66. {
  67. int ctx;
  68. SET_RPUE(base, 0);
  69. SET_RPUEIE(base, 0);
  70. SET_ESRRESTORE(base, 0);
  71. SET_TBE(base, 0);
  72. SET_CR(base, 0);
  73. SET_SPDMBE(base, 0);
  74. SET_TESTBUSCR(base, 0);
  75. SET_TLBRSW(base, 0);
  76. SET_GLOBAL_TLBIALL(base, 0);
  77. SET_RPU_ACR(base, 0);
  78. SET_TLBLKCRWE(base, 1);
  79. for (ctx = 0; ctx < ncb; ctx++) {
  80. SET_BPRCOSH(base, ctx, 0);
  81. SET_BPRCISH(base, ctx, 0);
  82. SET_BPRCNSH(base, ctx, 0);
  83. SET_BPSHCFG(base, ctx, 0);
  84. SET_BPMTCFG(base, ctx, 0);
  85. SET_ACTLR(base, ctx, 0);
  86. SET_SCTLR(base, ctx, 0);
  87. SET_FSRRESTORE(base, ctx, 0);
  88. SET_TTBR0(base, ctx, 0);
  89. SET_TTBR1(base, ctx, 0);
  90. SET_TTBCR(base, ctx, 0);
  91. SET_BFBCR(base, ctx, 0);
  92. SET_PAR(base, ctx, 0);
  93. SET_FAR(base, ctx, 0);
  94. SET_CTX_TLBIALL(base, ctx, 0);
  95. SET_TLBFLPTER(base, ctx, 0);
  96. SET_TLBSLPTER(base, ctx, 0);
  97. SET_TLBLKCR(base, ctx, 0);
  98. SET_CONTEXTIDR(base, ctx, 0);
  99. }
  100. }
  101. static void __flush_iotlb(void *cookie)
  102. {
  103. struct msm_priv *priv = cookie;
  104. struct msm_iommu_dev *iommu = NULL;
  105. struct msm_iommu_ctx_dev *master;
  106. int ret = 0;
  107. list_for_each_entry(iommu, &priv->list_attached, dom_node) {
  108. ret = __enable_clocks(iommu);
  109. if (ret)
  110. goto fail;
  111. list_for_each_entry(master, &iommu->ctx_list, list)
  112. SET_CTX_TLBIALL(iommu->base, master->num, 0);
  113. __disable_clocks(iommu);
  114. }
  115. fail:
  116. return;
  117. }
  118. static void __flush_iotlb_range(unsigned long iova, size_t size,
  119. size_t granule, bool leaf, void *cookie)
  120. {
  121. struct msm_priv *priv = cookie;
  122. struct msm_iommu_dev *iommu = NULL;
  123. struct msm_iommu_ctx_dev *master;
  124. int ret = 0;
  125. int temp_size;
  126. list_for_each_entry(iommu, &priv->list_attached, dom_node) {
  127. ret = __enable_clocks(iommu);
  128. if (ret)
  129. goto fail;
  130. list_for_each_entry(master, &iommu->ctx_list, list) {
  131. temp_size = size;
  132. do {
  133. iova &= TLBIVA_VA;
  134. iova |= GET_CONTEXTIDR_ASID(iommu->base,
  135. master->num);
  136. SET_TLBIVA(iommu->base, master->num, iova);
  137. iova += granule;
  138. } while (temp_size -= granule);
  139. }
  140. __disable_clocks(iommu);
  141. }
  142. fail:
  143. return;
  144. }
  145. static void __flush_iotlb_walk(unsigned long iova, size_t size,
  146. size_t granule, void *cookie)
  147. {
  148. __flush_iotlb_range(iova, size, granule, false, cookie);
  149. }
  150. static void __flush_iotlb_page(struct iommu_iotlb_gather *gather,
  151. unsigned long iova, size_t granule, void *cookie)
  152. {
  153. __flush_iotlb_range(iova, granule, granule, true, cookie);
  154. }
  155. static const struct iommu_flush_ops msm_iommu_flush_ops = {
  156. .tlb_flush_all = __flush_iotlb,
  157. .tlb_flush_walk = __flush_iotlb_walk,
  158. .tlb_add_page = __flush_iotlb_page,
  159. };
  160. static int msm_iommu_alloc_ctx(unsigned long *map, int start, int end)
  161. {
  162. int idx;
  163. do {
  164. idx = find_next_zero_bit(map, end, start);
  165. if (idx == end)
  166. return -ENOSPC;
  167. } while (test_and_set_bit(idx, map));
  168. return idx;
  169. }
  170. static void msm_iommu_free_ctx(unsigned long *map, int idx)
  171. {
  172. clear_bit(idx, map);
  173. }
  174. static void config_mids(struct msm_iommu_dev *iommu,
  175. struct msm_iommu_ctx_dev *master)
  176. {
  177. int mid, ctx, i;
  178. for (i = 0; i < master->num_mids; i++) {
  179. mid = master->mids[i];
  180. ctx = master->num;
  181. SET_M2VCBR_N(iommu->base, mid, 0);
  182. SET_CBACR_N(iommu->base, ctx, 0);
  183. /* Set VMID = 0 */
  184. SET_VMID(iommu->base, mid, 0);
  185. /* Set the context number for that MID to this context */
  186. SET_CBNDX(iommu->base, mid, ctx);
  187. /* Set MID associated with this context bank to 0*/
  188. SET_CBVMID(iommu->base, ctx, 0);
  189. /* Set the ASID for TLB tagging for this context */
  190. SET_CONTEXTIDR_ASID(iommu->base, ctx, ctx);
  191. /* Set security bit override to be Non-secure */
  192. SET_NSCFG(iommu->base, mid, 3);
  193. }
  194. }
  195. static void __reset_context(void __iomem *base, int ctx)
  196. {
  197. SET_BPRCOSH(base, ctx, 0);
  198. SET_BPRCISH(base, ctx, 0);
  199. SET_BPRCNSH(base, ctx, 0);
  200. SET_BPSHCFG(base, ctx, 0);
  201. SET_BPMTCFG(base, ctx, 0);
  202. SET_ACTLR(base, ctx, 0);
  203. SET_SCTLR(base, ctx, 0);
  204. SET_FSRRESTORE(base, ctx, 0);
  205. SET_TTBR0(base, ctx, 0);
  206. SET_TTBR1(base, ctx, 0);
  207. SET_TTBCR(base, ctx, 0);
  208. SET_BFBCR(base, ctx, 0);
  209. SET_PAR(base, ctx, 0);
  210. SET_FAR(base, ctx, 0);
  211. SET_CTX_TLBIALL(base, ctx, 0);
  212. SET_TLBFLPTER(base, ctx, 0);
  213. SET_TLBSLPTER(base, ctx, 0);
  214. SET_TLBLKCR(base, ctx, 0);
  215. }
  216. static void __program_context(void __iomem *base, int ctx,
  217. struct msm_priv *priv)
  218. {
  219. __reset_context(base, ctx);
  220. /* Turn on TEX Remap */
  221. SET_TRE(base, ctx, 1);
  222. SET_AFE(base, ctx, 1);
  223. /* Set up HTW mode */
  224. /* TLB miss configuration: perform HTW on miss */
  225. SET_TLBMCFG(base, ctx, 0x3);
  226. /* V2P configuration: HTW for access */
  227. SET_V2PCFG(base, ctx, 0x3);
  228. SET_TTBCR(base, ctx, priv->cfg.arm_v7s_cfg.tcr);
  229. SET_TTBR0(base, ctx, priv->cfg.arm_v7s_cfg.ttbr);
  230. SET_TTBR1(base, ctx, 0);
  231. /* Set prrr and nmrr */
  232. SET_PRRR(base, ctx, priv->cfg.arm_v7s_cfg.prrr);
  233. SET_NMRR(base, ctx, priv->cfg.arm_v7s_cfg.nmrr);
  234. /* Invalidate the TLB for this context */
  235. SET_CTX_TLBIALL(base, ctx, 0);
  236. /* Set interrupt number to "secure" interrupt */
  237. SET_IRPTNDX(base, ctx, 0);
  238. /* Enable context fault interrupt */
  239. SET_CFEIE(base, ctx, 1);
  240. /* Stall access on a context fault and let the handler deal with it */
  241. SET_CFCFG(base, ctx, 1);
  242. /* Redirect all cacheable requests to L2 slave port. */
  243. SET_RCISH(base, ctx, 1);
  244. SET_RCOSH(base, ctx, 1);
  245. SET_RCNSH(base, ctx, 1);
  246. /* Turn on BFB prefetch */
  247. SET_BFBDFE(base, ctx, 1);
  248. /* Enable the MMU */
  249. SET_M(base, ctx, 1);
  250. }
  251. static struct iommu_domain *msm_iommu_domain_alloc(unsigned type)
  252. {
  253. struct msm_priv *priv;
  254. if (type != IOMMU_DOMAIN_UNMANAGED)
  255. return NULL;
  256. priv = kzalloc(sizeof(*priv), GFP_KERNEL);
  257. if (!priv)
  258. goto fail_nomem;
  259. INIT_LIST_HEAD(&priv->list_attached);
  260. priv->domain.geometry.aperture_start = 0;
  261. priv->domain.geometry.aperture_end = (1ULL << 32) - 1;
  262. priv->domain.geometry.force_aperture = true;
  263. return &priv->domain;
  264. fail_nomem:
  265. kfree(priv);
  266. return NULL;
  267. }
  268. static void msm_iommu_domain_free(struct iommu_domain *domain)
  269. {
  270. struct msm_priv *priv;
  271. unsigned long flags;
  272. spin_lock_irqsave(&msm_iommu_lock, flags);
  273. priv = to_msm_priv(domain);
  274. kfree(priv);
  275. spin_unlock_irqrestore(&msm_iommu_lock, flags);
  276. }
  277. static int msm_iommu_domain_config(struct msm_priv *priv)
  278. {
  279. spin_lock_init(&priv->pgtlock);
  280. priv->cfg = (struct io_pgtable_cfg) {
  281. .pgsize_bitmap = msm_iommu_ops.pgsize_bitmap,
  282. .ias = 32,
  283. .oas = 32,
  284. .tlb = &msm_iommu_flush_ops,
  285. .iommu_dev = priv->dev,
  286. };
  287. priv->iop = alloc_io_pgtable_ops(ARM_V7S, &priv->cfg, priv);
  288. if (!priv->iop) {
  289. dev_err(priv->dev, "Failed to allocate pgtable\n");
  290. return -EINVAL;
  291. }
  292. msm_iommu_ops.pgsize_bitmap = priv->cfg.pgsize_bitmap;
  293. return 0;
  294. }
  295. /* Must be called under msm_iommu_lock */
  296. static struct msm_iommu_dev *find_iommu_for_dev(struct device *dev)
  297. {
  298. struct msm_iommu_dev *iommu, *ret = NULL;
  299. struct msm_iommu_ctx_dev *master;
  300. list_for_each_entry(iommu, &qcom_iommu_devices, dev_node) {
  301. master = list_first_entry(&iommu->ctx_list,
  302. struct msm_iommu_ctx_dev,
  303. list);
  304. if (master->of_node == dev->of_node) {
  305. ret = iommu;
  306. break;
  307. }
  308. }
  309. return ret;
  310. }
  311. static struct iommu_device *msm_iommu_probe_device(struct device *dev)
  312. {
  313. struct msm_iommu_dev *iommu;
  314. unsigned long flags;
  315. spin_lock_irqsave(&msm_iommu_lock, flags);
  316. iommu = find_iommu_for_dev(dev);
  317. spin_unlock_irqrestore(&msm_iommu_lock, flags);
  318. if (!iommu)
  319. return ERR_PTR(-ENODEV);
  320. return &iommu->iommu;
  321. }
  322. static int msm_iommu_attach_dev(struct iommu_domain *domain, struct device *dev)
  323. {
  324. int ret = 0;
  325. unsigned long flags;
  326. struct msm_iommu_dev *iommu;
  327. struct msm_priv *priv = to_msm_priv(domain);
  328. struct msm_iommu_ctx_dev *master;
  329. priv->dev = dev;
  330. msm_iommu_domain_config(priv);
  331. spin_lock_irqsave(&msm_iommu_lock, flags);
  332. list_for_each_entry(iommu, &qcom_iommu_devices, dev_node) {
  333. master = list_first_entry(&iommu->ctx_list,
  334. struct msm_iommu_ctx_dev,
  335. list);
  336. if (master->of_node == dev->of_node) {
  337. ret = __enable_clocks(iommu);
  338. if (ret)
  339. goto fail;
  340. list_for_each_entry(master, &iommu->ctx_list, list) {
  341. if (master->num) {
  342. dev_err(dev, "domain already attached");
  343. ret = -EEXIST;
  344. goto fail;
  345. }
  346. master->num =
  347. msm_iommu_alloc_ctx(iommu->context_map,
  348. 0, iommu->ncb);
  349. if (IS_ERR_VALUE(master->num)) {
  350. ret = -ENODEV;
  351. goto fail;
  352. }
  353. config_mids(iommu, master);
  354. __program_context(iommu->base, master->num,
  355. priv);
  356. }
  357. __disable_clocks(iommu);
  358. list_add(&iommu->dom_node, &priv->list_attached);
  359. }
  360. }
  361. fail:
  362. spin_unlock_irqrestore(&msm_iommu_lock, flags);
  363. return ret;
  364. }
  365. static void msm_iommu_detach_dev(struct iommu_domain *domain,
  366. struct device *dev)
  367. {
  368. struct msm_priv *priv = to_msm_priv(domain);
  369. unsigned long flags;
  370. struct msm_iommu_dev *iommu;
  371. struct msm_iommu_ctx_dev *master;
  372. int ret;
  373. free_io_pgtable_ops(priv->iop);
  374. spin_lock_irqsave(&msm_iommu_lock, flags);
  375. list_for_each_entry(iommu, &priv->list_attached, dom_node) {
  376. ret = __enable_clocks(iommu);
  377. if (ret)
  378. goto fail;
  379. list_for_each_entry(master, &iommu->ctx_list, list) {
  380. msm_iommu_free_ctx(iommu->context_map, master->num);
  381. __reset_context(iommu->base, master->num);
  382. }
  383. __disable_clocks(iommu);
  384. }
  385. fail:
  386. spin_unlock_irqrestore(&msm_iommu_lock, flags);
  387. }
  388. static int msm_iommu_map(struct iommu_domain *domain, unsigned long iova,
  389. phys_addr_t pa, size_t len, int prot, gfp_t gfp)
  390. {
  391. struct msm_priv *priv = to_msm_priv(domain);
  392. unsigned long flags;
  393. int ret;
  394. spin_lock_irqsave(&priv->pgtlock, flags);
  395. ret = priv->iop->map(priv->iop, iova, pa, len, prot, GFP_ATOMIC);
  396. spin_unlock_irqrestore(&priv->pgtlock, flags);
  397. return ret;
  398. }
  399. static void msm_iommu_sync_map(struct iommu_domain *domain, unsigned long iova,
  400. size_t size)
  401. {
  402. struct msm_priv *priv = to_msm_priv(domain);
  403. __flush_iotlb_range(iova, size, SZ_4K, false, priv);
  404. }
  405. static size_t msm_iommu_unmap(struct iommu_domain *domain, unsigned long iova,
  406. size_t len, struct iommu_iotlb_gather *gather)
  407. {
  408. struct msm_priv *priv = to_msm_priv(domain);
  409. unsigned long flags;
  410. spin_lock_irqsave(&priv->pgtlock, flags);
  411. len = priv->iop->unmap(priv->iop, iova, len, gather);
  412. spin_unlock_irqrestore(&priv->pgtlock, flags);
  413. return len;
  414. }
  415. static phys_addr_t msm_iommu_iova_to_phys(struct iommu_domain *domain,
  416. dma_addr_t va)
  417. {
  418. struct msm_priv *priv;
  419. struct msm_iommu_dev *iommu;
  420. struct msm_iommu_ctx_dev *master;
  421. unsigned int par;
  422. unsigned long flags;
  423. phys_addr_t ret = 0;
  424. spin_lock_irqsave(&msm_iommu_lock, flags);
  425. priv = to_msm_priv(domain);
  426. iommu = list_first_entry(&priv->list_attached,
  427. struct msm_iommu_dev, dom_node);
  428. if (list_empty(&iommu->ctx_list))
  429. goto fail;
  430. master = list_first_entry(&iommu->ctx_list,
  431. struct msm_iommu_ctx_dev, list);
  432. if (!master)
  433. goto fail;
  434. ret = __enable_clocks(iommu);
  435. if (ret)
  436. goto fail;
  437. /* Invalidate context TLB */
  438. SET_CTX_TLBIALL(iommu->base, master->num, 0);
  439. SET_V2PPR(iommu->base, master->num, va & V2Pxx_VA);
  440. par = GET_PAR(iommu->base, master->num);
  441. /* We are dealing with a supersection */
  442. if (GET_NOFAULT_SS(iommu->base, master->num))
  443. ret = (par & 0xFF000000) | (va & 0x00FFFFFF);
  444. else /* Upper 20 bits from PAR, lower 12 from VA */
  445. ret = (par & 0xFFFFF000) | (va & 0x00000FFF);
  446. if (GET_FAULT(iommu->base, master->num))
  447. ret = 0;
  448. __disable_clocks(iommu);
  449. fail:
  450. spin_unlock_irqrestore(&msm_iommu_lock, flags);
  451. return ret;
  452. }
  453. static void print_ctx_regs(void __iomem *base, int ctx)
  454. {
  455. unsigned int fsr = GET_FSR(base, ctx);
  456. pr_err("FAR = %08x PAR = %08x\n",
  457. GET_FAR(base, ctx), GET_PAR(base, ctx));
  458. pr_err("FSR = %08x [%s%s%s%s%s%s%s%s%s%s]\n", fsr,
  459. (fsr & 0x02) ? "TF " : "",
  460. (fsr & 0x04) ? "AFF " : "",
  461. (fsr & 0x08) ? "APF " : "",
  462. (fsr & 0x10) ? "TLBMF " : "",
  463. (fsr & 0x20) ? "HTWDEEF " : "",
  464. (fsr & 0x40) ? "HTWSEEF " : "",
  465. (fsr & 0x80) ? "MHF " : "",
  466. (fsr & 0x10000) ? "SL " : "",
  467. (fsr & 0x40000000) ? "SS " : "",
  468. (fsr & 0x80000000) ? "MULTI " : "");
  469. pr_err("FSYNR0 = %08x FSYNR1 = %08x\n",
  470. GET_FSYNR0(base, ctx), GET_FSYNR1(base, ctx));
  471. pr_err("TTBR0 = %08x TTBR1 = %08x\n",
  472. GET_TTBR0(base, ctx), GET_TTBR1(base, ctx));
  473. pr_err("SCTLR = %08x ACTLR = %08x\n",
  474. GET_SCTLR(base, ctx), GET_ACTLR(base, ctx));
  475. }
  476. static int insert_iommu_master(struct device *dev,
  477. struct msm_iommu_dev **iommu,
  478. struct of_phandle_args *spec)
  479. {
  480. struct msm_iommu_ctx_dev *master = dev_iommu_priv_get(dev);
  481. int sid;
  482. if (list_empty(&(*iommu)->ctx_list)) {
  483. master = kzalloc(sizeof(*master), GFP_ATOMIC);
  484. if (!master) {
  485. dev_err(dev, "Failed to allocate iommu_master\n");
  486. return -ENOMEM;
  487. }
  488. master->of_node = dev->of_node;
  489. list_add(&master->list, &(*iommu)->ctx_list);
  490. dev_iommu_priv_set(dev, master);
  491. }
  492. for (sid = 0; sid < master->num_mids; sid++)
  493. if (master->mids[sid] == spec->args[0]) {
  494. dev_warn(dev, "Stream ID 0x%x repeated; ignoring\n",
  495. sid);
  496. return 0;
  497. }
  498. master->mids[master->num_mids++] = spec->args[0];
  499. return 0;
  500. }
  501. static int qcom_iommu_of_xlate(struct device *dev,
  502. struct of_phandle_args *spec)
  503. {
  504. struct msm_iommu_dev *iommu = NULL, *iter;
  505. unsigned long flags;
  506. int ret = 0;
  507. spin_lock_irqsave(&msm_iommu_lock, flags);
  508. list_for_each_entry(iter, &qcom_iommu_devices, dev_node) {
  509. if (iter->dev->of_node == spec->np) {
  510. iommu = iter;
  511. break;
  512. }
  513. }
  514. if (!iommu) {
  515. ret = -ENODEV;
  516. goto fail;
  517. }
  518. ret = insert_iommu_master(dev, &iommu, spec);
  519. fail:
  520. spin_unlock_irqrestore(&msm_iommu_lock, flags);
  521. return ret;
  522. }
  523. irqreturn_t msm_iommu_fault_handler(int irq, void *dev_id)
  524. {
  525. struct msm_iommu_dev *iommu = dev_id;
  526. unsigned int fsr;
  527. int i, ret;
  528. spin_lock(&msm_iommu_lock);
  529. if (!iommu) {
  530. pr_err("Invalid device ID in context interrupt handler\n");
  531. goto fail;
  532. }
  533. pr_err("Unexpected IOMMU page fault!\n");
  534. pr_err("base = %08x\n", (unsigned int)iommu->base);
  535. ret = __enable_clocks(iommu);
  536. if (ret)
  537. goto fail;
  538. for (i = 0; i < iommu->ncb; i++) {
  539. fsr = GET_FSR(iommu->base, i);
  540. if (fsr) {
  541. pr_err("Fault occurred in context %d.\n", i);
  542. pr_err("Interesting registers:\n");
  543. print_ctx_regs(iommu->base, i);
  544. SET_FSR(iommu->base, i, 0x4000000F);
  545. }
  546. }
  547. __disable_clocks(iommu);
  548. fail:
  549. spin_unlock(&msm_iommu_lock);
  550. return 0;
  551. }
  552. static struct iommu_ops msm_iommu_ops = {
  553. .domain_alloc = msm_iommu_domain_alloc,
  554. .probe_device = msm_iommu_probe_device,
  555. .device_group = generic_device_group,
  556. .pgsize_bitmap = MSM_IOMMU_PGSIZES,
  557. .of_xlate = qcom_iommu_of_xlate,
  558. .default_domain_ops = &(const struct iommu_domain_ops) {
  559. .attach_dev = msm_iommu_attach_dev,
  560. .detach_dev = msm_iommu_detach_dev,
  561. .map = msm_iommu_map,
  562. .unmap = msm_iommu_unmap,
  563. /*
  564. * Nothing is needed here, the barrier to guarantee
  565. * completion of the tlb sync operation is implicitly
  566. * taken care when the iommu client does a writel before
  567. * kick starting the other master.
  568. */
  569. .iotlb_sync = NULL,
  570. .iotlb_sync_map = msm_iommu_sync_map,
  571. .iova_to_phys = msm_iommu_iova_to_phys,
  572. .free = msm_iommu_domain_free,
  573. }
  574. };
  575. static int msm_iommu_probe(struct platform_device *pdev)
  576. {
  577. struct resource *r;
  578. resource_size_t ioaddr;
  579. struct msm_iommu_dev *iommu;
  580. int ret, par, val;
  581. iommu = devm_kzalloc(&pdev->dev, sizeof(*iommu), GFP_KERNEL);
  582. if (!iommu)
  583. return -ENODEV;
  584. iommu->dev = &pdev->dev;
  585. INIT_LIST_HEAD(&iommu->ctx_list);
  586. iommu->pclk = devm_clk_get(iommu->dev, "smmu_pclk");
  587. if (IS_ERR(iommu->pclk))
  588. return dev_err_probe(iommu->dev, PTR_ERR(iommu->pclk),
  589. "could not get smmu_pclk\n");
  590. ret = clk_prepare(iommu->pclk);
  591. if (ret)
  592. return dev_err_probe(iommu->dev, ret,
  593. "could not prepare smmu_pclk\n");
  594. iommu->clk = devm_clk_get(iommu->dev, "iommu_clk");
  595. if (IS_ERR(iommu->clk)) {
  596. clk_unprepare(iommu->pclk);
  597. return dev_err_probe(iommu->dev, PTR_ERR(iommu->clk),
  598. "could not get iommu_clk\n");
  599. }
  600. ret = clk_prepare(iommu->clk);
  601. if (ret) {
  602. clk_unprepare(iommu->pclk);
  603. return dev_err_probe(iommu->dev, ret, "could not prepare iommu_clk\n");
  604. }
  605. r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  606. iommu->base = devm_ioremap_resource(iommu->dev, r);
  607. if (IS_ERR(iommu->base)) {
  608. ret = dev_err_probe(iommu->dev, PTR_ERR(iommu->base), "could not get iommu base\n");
  609. goto fail;
  610. }
  611. ioaddr = r->start;
  612. iommu->irq = platform_get_irq(pdev, 0);
  613. if (iommu->irq < 0) {
  614. ret = -ENODEV;
  615. goto fail;
  616. }
  617. ret = of_property_read_u32(iommu->dev->of_node, "qcom,ncb", &val);
  618. if (ret) {
  619. dev_err(iommu->dev, "could not get ncb\n");
  620. goto fail;
  621. }
  622. iommu->ncb = val;
  623. msm_iommu_reset(iommu->base, iommu->ncb);
  624. SET_M(iommu->base, 0, 1);
  625. SET_PAR(iommu->base, 0, 0);
  626. SET_V2PCFG(iommu->base, 0, 1);
  627. SET_V2PPR(iommu->base, 0, 0);
  628. par = GET_PAR(iommu->base, 0);
  629. SET_V2PCFG(iommu->base, 0, 0);
  630. SET_M(iommu->base, 0, 0);
  631. if (!par) {
  632. pr_err("Invalid PAR value detected\n");
  633. ret = -ENODEV;
  634. goto fail;
  635. }
  636. ret = devm_request_threaded_irq(iommu->dev, iommu->irq, NULL,
  637. msm_iommu_fault_handler,
  638. IRQF_ONESHOT | IRQF_SHARED,
  639. "msm_iommu_secure_irpt_handler",
  640. iommu);
  641. if (ret) {
  642. pr_err("Request IRQ %d failed with ret=%d\n", iommu->irq, ret);
  643. goto fail;
  644. }
  645. list_add(&iommu->dev_node, &qcom_iommu_devices);
  646. ret = iommu_device_sysfs_add(&iommu->iommu, iommu->dev, NULL,
  647. "msm-smmu.%pa", &ioaddr);
  648. if (ret) {
  649. pr_err("Could not add msm-smmu at %pa to sysfs\n", &ioaddr);
  650. goto fail;
  651. }
  652. ret = iommu_device_register(&iommu->iommu, &msm_iommu_ops, &pdev->dev);
  653. if (ret) {
  654. pr_err("Could not register msm-smmu at %pa\n", &ioaddr);
  655. goto fail;
  656. }
  657. pr_info("device mapped at %p, irq %d with %d ctx banks\n",
  658. iommu->base, iommu->irq, iommu->ncb);
  659. return ret;
  660. fail:
  661. clk_unprepare(iommu->clk);
  662. clk_unprepare(iommu->pclk);
  663. return ret;
  664. }
  665. static const struct of_device_id msm_iommu_dt_match[] = {
  666. { .compatible = "qcom,apq8064-iommu" },
  667. {}
  668. };
  669. static int msm_iommu_remove(struct platform_device *pdev)
  670. {
  671. struct msm_iommu_dev *iommu = platform_get_drvdata(pdev);
  672. clk_unprepare(iommu->clk);
  673. clk_unprepare(iommu->pclk);
  674. return 0;
  675. }
  676. static struct platform_driver msm_iommu_driver = {
  677. .driver = {
  678. .name = "msm_iommu",
  679. .of_match_table = msm_iommu_dt_match,
  680. },
  681. .probe = msm_iommu_probe,
  682. .remove = msm_iommu_remove,
  683. };
  684. builtin_platform_driver(msm_iommu_driver);