fsl_pamu_domain.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. *
  4. * Copyright (C) 2013 Freescale Semiconductor, Inc.
  5. * Author: Varun Sethi <[email protected]>
  6. */
  7. #define pr_fmt(fmt) "fsl-pamu-domain: %s: " fmt, __func__
  8. #include "fsl_pamu_domain.h"
  9. #include <linux/platform_device.h>
  10. #include <sysdev/fsl_pci.h>
  11. /*
  12. * Global spinlock that needs to be held while
  13. * configuring PAMU.
  14. */
  15. static DEFINE_SPINLOCK(iommu_lock);
  16. static struct kmem_cache *fsl_pamu_domain_cache;
  17. static struct kmem_cache *iommu_devinfo_cache;
  18. static DEFINE_SPINLOCK(device_domain_lock);
  19. struct iommu_device pamu_iommu; /* IOMMU core code handle */
  20. static struct fsl_dma_domain *to_fsl_dma_domain(struct iommu_domain *dom)
  21. {
  22. return container_of(dom, struct fsl_dma_domain, iommu_domain);
  23. }
  24. static int __init iommu_init_mempool(void)
  25. {
  26. fsl_pamu_domain_cache = kmem_cache_create("fsl_pamu_domain",
  27. sizeof(struct fsl_dma_domain),
  28. 0,
  29. SLAB_HWCACHE_ALIGN,
  30. NULL);
  31. if (!fsl_pamu_domain_cache) {
  32. pr_debug("Couldn't create fsl iommu_domain cache\n");
  33. return -ENOMEM;
  34. }
  35. iommu_devinfo_cache = kmem_cache_create("iommu_devinfo",
  36. sizeof(struct device_domain_info),
  37. 0,
  38. SLAB_HWCACHE_ALIGN,
  39. NULL);
  40. if (!iommu_devinfo_cache) {
  41. pr_debug("Couldn't create devinfo cache\n");
  42. kmem_cache_destroy(fsl_pamu_domain_cache);
  43. return -ENOMEM;
  44. }
  45. return 0;
  46. }
  47. static int update_liodn_stash(int liodn, struct fsl_dma_domain *dma_domain,
  48. u32 val)
  49. {
  50. int ret = 0;
  51. unsigned long flags;
  52. spin_lock_irqsave(&iommu_lock, flags);
  53. ret = pamu_update_paace_stash(liodn, val);
  54. if (ret) {
  55. pr_debug("Failed to update SPAACE for liodn %d\n ", liodn);
  56. spin_unlock_irqrestore(&iommu_lock, flags);
  57. return ret;
  58. }
  59. spin_unlock_irqrestore(&iommu_lock, flags);
  60. return ret;
  61. }
  62. /* Set the geometry parameters for a LIODN */
  63. static int pamu_set_liodn(struct fsl_dma_domain *dma_domain, struct device *dev,
  64. int liodn)
  65. {
  66. u32 omi_index = ~(u32)0;
  67. unsigned long flags;
  68. int ret;
  69. /*
  70. * Configure the omi_index at the geometry setup time.
  71. * This is a static value which depends on the type of
  72. * device and would not change thereafter.
  73. */
  74. get_ome_index(&omi_index, dev);
  75. spin_lock_irqsave(&iommu_lock, flags);
  76. ret = pamu_disable_liodn(liodn);
  77. if (ret)
  78. goto out_unlock;
  79. ret = pamu_config_ppaace(liodn, omi_index, dma_domain->stash_id, 0);
  80. if (ret)
  81. goto out_unlock;
  82. ret = pamu_config_ppaace(liodn, ~(u32)0, dma_domain->stash_id,
  83. PAACE_AP_PERMS_QUERY | PAACE_AP_PERMS_UPDATE);
  84. out_unlock:
  85. spin_unlock_irqrestore(&iommu_lock, flags);
  86. if (ret) {
  87. pr_debug("PAACE configuration failed for liodn %d\n",
  88. liodn);
  89. }
  90. return ret;
  91. }
  92. static void remove_device_ref(struct device_domain_info *info)
  93. {
  94. unsigned long flags;
  95. list_del(&info->link);
  96. spin_lock_irqsave(&iommu_lock, flags);
  97. pamu_disable_liodn(info->liodn);
  98. spin_unlock_irqrestore(&iommu_lock, flags);
  99. spin_lock_irqsave(&device_domain_lock, flags);
  100. dev_iommu_priv_set(info->dev, NULL);
  101. kmem_cache_free(iommu_devinfo_cache, info);
  102. spin_unlock_irqrestore(&device_domain_lock, flags);
  103. }
  104. static void detach_device(struct device *dev, struct fsl_dma_domain *dma_domain)
  105. {
  106. struct device_domain_info *info, *tmp;
  107. unsigned long flags;
  108. spin_lock_irqsave(&dma_domain->domain_lock, flags);
  109. /* Remove the device from the domain device list */
  110. list_for_each_entry_safe(info, tmp, &dma_domain->devices, link) {
  111. if (!dev || (info->dev == dev))
  112. remove_device_ref(info);
  113. }
  114. spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
  115. }
  116. static void attach_device(struct fsl_dma_domain *dma_domain, int liodn, struct device *dev)
  117. {
  118. struct device_domain_info *info, *old_domain_info;
  119. unsigned long flags;
  120. spin_lock_irqsave(&device_domain_lock, flags);
  121. /*
  122. * Check here if the device is already attached to domain or not.
  123. * If the device is already attached to a domain detach it.
  124. */
  125. old_domain_info = dev_iommu_priv_get(dev);
  126. if (old_domain_info && old_domain_info->domain != dma_domain) {
  127. spin_unlock_irqrestore(&device_domain_lock, flags);
  128. detach_device(dev, old_domain_info->domain);
  129. spin_lock_irqsave(&device_domain_lock, flags);
  130. }
  131. info = kmem_cache_zalloc(iommu_devinfo_cache, GFP_ATOMIC);
  132. info->dev = dev;
  133. info->liodn = liodn;
  134. info->domain = dma_domain;
  135. list_add(&info->link, &dma_domain->devices);
  136. /*
  137. * In case of devices with multiple LIODNs just store
  138. * the info for the first LIODN as all
  139. * LIODNs share the same domain
  140. */
  141. if (!dev_iommu_priv_get(dev))
  142. dev_iommu_priv_set(dev, info);
  143. spin_unlock_irqrestore(&device_domain_lock, flags);
  144. }
  145. static phys_addr_t fsl_pamu_iova_to_phys(struct iommu_domain *domain,
  146. dma_addr_t iova)
  147. {
  148. if (iova < domain->geometry.aperture_start ||
  149. iova > domain->geometry.aperture_end)
  150. return 0;
  151. return iova;
  152. }
  153. static bool fsl_pamu_capable(struct device *dev, enum iommu_cap cap)
  154. {
  155. return cap == IOMMU_CAP_CACHE_COHERENCY;
  156. }
  157. static void fsl_pamu_domain_free(struct iommu_domain *domain)
  158. {
  159. struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain);
  160. /* remove all the devices from the device list */
  161. detach_device(NULL, dma_domain);
  162. kmem_cache_free(fsl_pamu_domain_cache, dma_domain);
  163. }
  164. static struct iommu_domain *fsl_pamu_domain_alloc(unsigned type)
  165. {
  166. struct fsl_dma_domain *dma_domain;
  167. if (type != IOMMU_DOMAIN_UNMANAGED)
  168. return NULL;
  169. dma_domain = kmem_cache_zalloc(fsl_pamu_domain_cache, GFP_KERNEL);
  170. if (!dma_domain)
  171. return NULL;
  172. dma_domain->stash_id = ~(u32)0;
  173. INIT_LIST_HEAD(&dma_domain->devices);
  174. spin_lock_init(&dma_domain->domain_lock);
  175. /* default geometry 64 GB i.e. maximum system address */
  176. dma_domain->iommu_domain. geometry.aperture_start = 0;
  177. dma_domain->iommu_domain.geometry.aperture_end = (1ULL << 36) - 1;
  178. dma_domain->iommu_domain.geometry.force_aperture = true;
  179. return &dma_domain->iommu_domain;
  180. }
  181. /* Update stash destination for all LIODNs associated with the domain */
  182. static int update_domain_stash(struct fsl_dma_domain *dma_domain, u32 val)
  183. {
  184. struct device_domain_info *info;
  185. int ret = 0;
  186. list_for_each_entry(info, &dma_domain->devices, link) {
  187. ret = update_liodn_stash(info->liodn, dma_domain, val);
  188. if (ret)
  189. break;
  190. }
  191. return ret;
  192. }
  193. static int fsl_pamu_attach_device(struct iommu_domain *domain,
  194. struct device *dev)
  195. {
  196. struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain);
  197. unsigned long flags;
  198. int len, ret = 0, i;
  199. const u32 *liodn;
  200. struct pci_dev *pdev = NULL;
  201. struct pci_controller *pci_ctl;
  202. /*
  203. * Use LIODN of the PCI controller while attaching a
  204. * PCI device.
  205. */
  206. if (dev_is_pci(dev)) {
  207. pdev = to_pci_dev(dev);
  208. pci_ctl = pci_bus_to_host(pdev->bus);
  209. /*
  210. * make dev point to pci controller device
  211. * so we can get the LIODN programmed by
  212. * u-boot.
  213. */
  214. dev = pci_ctl->parent;
  215. }
  216. liodn = of_get_property(dev->of_node, "fsl,liodn", &len);
  217. if (!liodn) {
  218. pr_debug("missing fsl,liodn property at %pOF\n", dev->of_node);
  219. return -EINVAL;
  220. }
  221. spin_lock_irqsave(&dma_domain->domain_lock, flags);
  222. for (i = 0; i < len / sizeof(u32); i++) {
  223. /* Ensure that LIODN value is valid */
  224. if (liodn[i] >= PAACE_NUMBER_ENTRIES) {
  225. pr_debug("Invalid liodn %d, attach device failed for %pOF\n",
  226. liodn[i], dev->of_node);
  227. ret = -EINVAL;
  228. break;
  229. }
  230. attach_device(dma_domain, liodn[i], dev);
  231. ret = pamu_set_liodn(dma_domain, dev, liodn[i]);
  232. if (ret)
  233. break;
  234. ret = pamu_enable_liodn(liodn[i]);
  235. if (ret)
  236. break;
  237. }
  238. spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
  239. return ret;
  240. }
  241. static void fsl_pamu_detach_device(struct iommu_domain *domain,
  242. struct device *dev)
  243. {
  244. struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain);
  245. const u32 *prop;
  246. int len;
  247. struct pci_dev *pdev = NULL;
  248. struct pci_controller *pci_ctl;
  249. /*
  250. * Use LIODN of the PCI controller while detaching a
  251. * PCI device.
  252. */
  253. if (dev_is_pci(dev)) {
  254. pdev = to_pci_dev(dev);
  255. pci_ctl = pci_bus_to_host(pdev->bus);
  256. /*
  257. * make dev point to pci controller device
  258. * so we can get the LIODN programmed by
  259. * u-boot.
  260. */
  261. dev = pci_ctl->parent;
  262. }
  263. prop = of_get_property(dev->of_node, "fsl,liodn", &len);
  264. if (prop)
  265. detach_device(dev, dma_domain);
  266. else
  267. pr_debug("missing fsl,liodn property at %pOF\n", dev->of_node);
  268. }
  269. /* Set the domain stash attribute */
  270. int fsl_pamu_configure_l1_stash(struct iommu_domain *domain, u32 cpu)
  271. {
  272. struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain);
  273. unsigned long flags;
  274. int ret;
  275. spin_lock_irqsave(&dma_domain->domain_lock, flags);
  276. dma_domain->stash_id = get_stash_id(PAMU_ATTR_CACHE_L1, cpu);
  277. if (dma_domain->stash_id == ~(u32)0) {
  278. pr_debug("Invalid stash attributes\n");
  279. spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
  280. return -EINVAL;
  281. }
  282. ret = update_domain_stash(dma_domain, dma_domain->stash_id);
  283. spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
  284. return ret;
  285. }
  286. static struct iommu_group *get_device_iommu_group(struct device *dev)
  287. {
  288. struct iommu_group *group;
  289. group = iommu_group_get(dev);
  290. if (!group)
  291. group = iommu_group_alloc();
  292. return group;
  293. }
  294. static bool check_pci_ctl_endpt_part(struct pci_controller *pci_ctl)
  295. {
  296. u32 version;
  297. /* Check the PCI controller version number by readding BRR1 register */
  298. version = in_be32(pci_ctl->cfg_addr + (PCI_FSL_BRR1 >> 2));
  299. version &= PCI_FSL_BRR1_VER;
  300. /* If PCI controller version is >= 0x204 we can partition endpoints */
  301. return version >= 0x204;
  302. }
  303. /* Get iommu group information from peer devices or devices on the parent bus */
  304. static struct iommu_group *get_shared_pci_device_group(struct pci_dev *pdev)
  305. {
  306. struct pci_dev *tmp;
  307. struct iommu_group *group;
  308. struct pci_bus *bus = pdev->bus;
  309. /*
  310. * Traverese the pci bus device list to get
  311. * the shared iommu group.
  312. */
  313. while (bus) {
  314. list_for_each_entry(tmp, &bus->devices, bus_list) {
  315. if (tmp == pdev)
  316. continue;
  317. group = iommu_group_get(&tmp->dev);
  318. if (group)
  319. return group;
  320. }
  321. bus = bus->parent;
  322. }
  323. return NULL;
  324. }
  325. static struct iommu_group *get_pci_device_group(struct pci_dev *pdev)
  326. {
  327. struct pci_controller *pci_ctl;
  328. bool pci_endpt_partitioning;
  329. struct iommu_group *group = NULL;
  330. pci_ctl = pci_bus_to_host(pdev->bus);
  331. pci_endpt_partitioning = check_pci_ctl_endpt_part(pci_ctl);
  332. /* We can partition PCIe devices so assign device group to the device */
  333. if (pci_endpt_partitioning) {
  334. group = pci_device_group(&pdev->dev);
  335. /*
  336. * PCIe controller is not a paritionable entity
  337. * free the controller device iommu_group.
  338. */
  339. if (pci_ctl->parent->iommu_group)
  340. iommu_group_remove_device(pci_ctl->parent);
  341. } else {
  342. /*
  343. * All devices connected to the controller will share the
  344. * PCI controllers device group. If this is the first
  345. * device to be probed for the pci controller, copy the
  346. * device group information from the PCI controller device
  347. * node and remove the PCI controller iommu group.
  348. * For subsequent devices, the iommu group information can
  349. * be obtained from sibling devices (i.e. from the bus_devices
  350. * link list).
  351. */
  352. if (pci_ctl->parent->iommu_group) {
  353. group = get_device_iommu_group(pci_ctl->parent);
  354. iommu_group_remove_device(pci_ctl->parent);
  355. } else {
  356. group = get_shared_pci_device_group(pdev);
  357. }
  358. }
  359. if (!group)
  360. group = ERR_PTR(-ENODEV);
  361. return group;
  362. }
  363. static struct iommu_group *fsl_pamu_device_group(struct device *dev)
  364. {
  365. struct iommu_group *group = ERR_PTR(-ENODEV);
  366. int len;
  367. /*
  368. * For platform devices we allocate a separate group for
  369. * each of the devices.
  370. */
  371. if (dev_is_pci(dev))
  372. group = get_pci_device_group(to_pci_dev(dev));
  373. else if (of_get_property(dev->of_node, "fsl,liodn", &len))
  374. group = get_device_iommu_group(dev);
  375. return group;
  376. }
  377. static struct iommu_device *fsl_pamu_probe_device(struct device *dev)
  378. {
  379. return &pamu_iommu;
  380. }
  381. static const struct iommu_ops fsl_pamu_ops = {
  382. .capable = fsl_pamu_capable,
  383. .domain_alloc = fsl_pamu_domain_alloc,
  384. .probe_device = fsl_pamu_probe_device,
  385. .device_group = fsl_pamu_device_group,
  386. .default_domain_ops = &(const struct iommu_domain_ops) {
  387. .attach_dev = fsl_pamu_attach_device,
  388. .detach_dev = fsl_pamu_detach_device,
  389. .iova_to_phys = fsl_pamu_iova_to_phys,
  390. .free = fsl_pamu_domain_free,
  391. }
  392. };
  393. int __init pamu_domain_init(void)
  394. {
  395. int ret = 0;
  396. ret = iommu_init_mempool();
  397. if (ret)
  398. return ret;
  399. ret = iommu_device_sysfs_add(&pamu_iommu, NULL, NULL, "iommu0");
  400. if (ret)
  401. return ret;
  402. ret = iommu_device_register(&pamu_iommu, &fsl_pamu_ops, NULL);
  403. if (ret) {
  404. iommu_device_sysfs_remove(&pamu_iommu);
  405. pr_err("Can't register iommu device\n");
  406. }
  407. return ret;
  408. }