qcom-iommu-util.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Portions based off of __alloc_and_insert_iova_range() implementation
  4. * in drivers/iommu/iova.c:
  5. * Author: Anil S Keshavamurthy <[email protected]>
  6. * Copyright © 2006-2009, Intel Corporation.
  7. *
  8. * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved.
  9. * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
  10. */
  11. #include <linux/dma-mapping-fast.h>
  12. #include <linux/module.h>
  13. #include <linux/slab.h>
  14. #include <linux/qcom-iommu-util.h>
  15. #include <linux/iova.h>
  16. #include <linux/qcom-io-pgtable.h>
  17. #include <trace/hooks/iommu.h>
  18. #include "qcom-dma-iommu-generic.h"
  19. #include "qcom-io-pgtable-alloc.h"
  20. struct qcom_iommu_range_prop_cb_data {
  21. int (*range_prop_entry_cb_fn)(const __be32 *p, int naddr, int nsize, void *arg);
  22. void *arg;
  23. };
  24. struct iova_range {
  25. u64 base;
  26. u64 end;
  27. };
  28. struct device_node *qcom_iommu_group_parse_phandle(struct device *dev)
  29. {
  30. struct device_node *np;
  31. if (!dev->of_node)
  32. return NULL;
  33. np = of_parse_phandle(dev->of_node, "qcom,iommu-group", 0);
  34. return np ? np : dev->of_node;
  35. }
  36. static int of_property_walk_each_entry(struct device *dev, const char *propname,
  37. struct qcom_iommu_range_prop_cb_data *cb_data)
  38. {
  39. struct device_node *np;
  40. const __be32 *p, *property_end;
  41. int ret, len, naddr, nsize;
  42. np = qcom_iommu_group_parse_phandle(dev);
  43. if (!np)
  44. return -EINVAL;
  45. p = of_get_property(np, propname, &len);
  46. if (!p)
  47. return -ENODEV;
  48. len /= sizeof(u32);
  49. naddr = of_n_addr_cells(np);
  50. nsize = of_n_size_cells(np);
  51. if (!naddr || !nsize || len % (naddr + nsize)) {
  52. dev_err(dev, "%s Invalid length %d. Address cells %d. Size cells %d\n",
  53. propname, len, naddr, nsize);
  54. return -EINVAL;
  55. }
  56. property_end = p + len;
  57. while (p < property_end) {
  58. ret = cb_data->range_prop_entry_cb_fn(p, naddr, nsize, cb_data->arg);
  59. if (ret)
  60. return ret;
  61. p += naddr + nsize;
  62. }
  63. return 0;
  64. }
  65. static bool check_overlap(struct iommu_resv_region *region, u64 start, u64 end)
  66. {
  67. u64 region_end = region->start + region->length - 1;
  68. return end >= region->start && start <= region_end;
  69. }
  70. static int insert_range(const __be32 *p, int naddr, int nsize, void *arg)
  71. {
  72. struct list_head *head = arg;
  73. struct iommu_resv_region *region, *new;
  74. u64 start = of_read_number(p, naddr);
  75. u64 end = start + of_read_number(p + naddr, nsize) - 1;
  76. list_for_each_entry(region, head, list) {
  77. if (check_overlap(region, start, end))
  78. return -EINVAL;
  79. if (start < region->start)
  80. break;
  81. }
  82. new = iommu_alloc_resv_region(start, end - start + 1,
  83. 0, IOMMU_RESV_RESERVED, GFP_KERNEL);
  84. if (!new)
  85. return -ENOMEM;
  86. list_add_tail(&new->list, &region->list);
  87. return 0;
  88. }
  89. /*
  90. * Returns a sorted list of all regions described by the
  91. * "qcom,iommu-dma-addr-pool" property.
  92. *
  93. * Caller is responsible for freeing the entries on the list via
  94. * iommu_put_resv_regions
  95. */
  96. int qcom_iommu_generate_dma_regions(struct device *dev,
  97. struct list_head *head)
  98. {
  99. struct qcom_iommu_range_prop_cb_data insert_range_cb_data = {
  100. .range_prop_entry_cb_fn = insert_range,
  101. .arg = head,
  102. };
  103. return of_property_walk_each_entry(dev, "qcom,iommu-dma-addr-pool",
  104. &insert_range_cb_data);
  105. }
  106. EXPORT_SYMBOL(qcom_iommu_generate_dma_regions);
  107. static int invert_regions(struct list_head *head, struct list_head *inverted)
  108. {
  109. struct iommu_resv_region *prev, *curr, *new;
  110. phys_addr_t rsv_start;
  111. size_t rsv_size;
  112. int ret = 0;
  113. /*
  114. * Since its not possible to express start 0, size 1<<64 return
  115. * an error instead. Also an iova allocator without any iovas doesn't
  116. * make sense.
  117. */
  118. if (list_empty(head))
  119. return -EINVAL;
  120. /*
  121. * Handle case where there is a non-zero sized area between
  122. * iommu_resv_regions A & B.
  123. */
  124. prev = NULL;
  125. list_for_each_entry(curr, head, list) {
  126. if (!prev)
  127. goto next;
  128. rsv_start = prev->start + prev->length;
  129. rsv_size = curr->start - rsv_start;
  130. if (!rsv_size)
  131. goto next;
  132. new = iommu_alloc_resv_region(rsv_start, rsv_size,
  133. 0, IOMMU_RESV_RESERVED, GFP_KERNEL);
  134. if (!new) {
  135. ret = -ENOMEM;
  136. goto out_err;
  137. }
  138. list_add_tail(&new->list, inverted);
  139. next:
  140. prev = curr;
  141. }
  142. /* Now handle the beginning */
  143. curr = list_first_entry(head, struct iommu_resv_region, list);
  144. rsv_start = 0;
  145. rsv_size = curr->start;
  146. if (rsv_size) {
  147. new = iommu_alloc_resv_region(rsv_start, rsv_size,
  148. 0, IOMMU_RESV_RESERVED, GFP_KERNEL);
  149. if (!new) {
  150. ret = -ENOMEM;
  151. goto out_err;
  152. }
  153. list_add(&new->list, inverted);
  154. }
  155. /* Handle the end - checking for overflow */
  156. rsv_start = prev->start + prev->length;
  157. rsv_size = -rsv_start;
  158. if (rsv_size && (U64_MAX - prev->start > prev->length)) {
  159. new = iommu_alloc_resv_region(rsv_start, rsv_size,
  160. 0, IOMMU_RESV_RESERVED, GFP_KERNEL);
  161. if (!new) {
  162. ret = -ENOMEM;
  163. goto out_err;
  164. }
  165. list_add_tail(&new->list, inverted);
  166. }
  167. return 0;
  168. out_err:
  169. list_for_each_entry_safe(curr, prev, inverted, list)
  170. kfree(curr);
  171. return ret;
  172. }
  173. /* Used by iommu drivers to generate reserved regions for qcom,iommu-dma-addr-pool property */
  174. void qcom_iommu_generate_resv_regions(struct device *dev,
  175. struct list_head *head)
  176. {
  177. struct iommu_resv_region *region;
  178. LIST_HEAD(dma_regions);
  179. LIST_HEAD(resv_regions);
  180. int ret;
  181. ret = qcom_iommu_generate_dma_regions(dev, &dma_regions);
  182. if (ret)
  183. return;
  184. ret = invert_regions(&dma_regions, &resv_regions);
  185. iommu_put_resv_regions(dev, &dma_regions);
  186. if (ret)
  187. return;
  188. list_for_each_entry(region, &resv_regions, list) {
  189. dev_dbg(dev, "Reserved region %llx-%llx\n",
  190. (u64)region->start,
  191. (u64)(region->start + region->length - 1));
  192. }
  193. list_splice(&resv_regions, head);
  194. }
  195. EXPORT_SYMBOL(qcom_iommu_generate_resv_regions);
  196. void qcom_iommu_get_resv_regions(struct device *dev, struct list_head *list)
  197. {
  198. const struct iommu_ops *ops = dev->bus->iommu_ops;
  199. if (ops && ops->get_resv_regions)
  200. ops->get_resv_regions(dev, list);
  201. }
  202. EXPORT_SYMBOL(qcom_iommu_get_resv_regions);
  203. static int get_addr_range(const __be32 *p, int naddr, int nsize, void *arg)
  204. {
  205. u64 start = of_read_number(p, naddr);
  206. u64 end = start + of_read_number(p + naddr, nsize) - 1;
  207. struct iova_range *range = arg;
  208. if (start >= SZ_4G || end >= SZ_4G) {
  209. pr_err("fastmap does not support IOVAs >= 4 GB\n");
  210. return -EINVAL;
  211. }
  212. range->base = min_not_zero(range->base, start);
  213. range->end = max(range->end, end);
  214. return 0;
  215. }
  216. int qcom_iommu_get_fast_iova_range(struct device *dev, dma_addr_t *ret_iova_base,
  217. dma_addr_t *ret_iova_end)
  218. {
  219. struct iova_range dma_range = {};
  220. struct iova_range geometry_range = {};
  221. struct qcom_iommu_range_prop_cb_data get_addr_range_cb_data = {
  222. .range_prop_entry_cb_fn = get_addr_range,
  223. };
  224. int ret;
  225. if (!dev || !ret_iova_base || !ret_iova_end)
  226. return -EINVAL;
  227. get_addr_range_cb_data.arg = &dma_range;
  228. ret = of_property_walk_each_entry(dev, "qcom,iommu-dma-addr-pool",
  229. &get_addr_range_cb_data);
  230. if (ret == -ENODEV) {
  231. dma_range.base = 0;
  232. dma_range.end = SZ_4G - 1;
  233. } else if (ret) {
  234. return ret;
  235. }
  236. get_addr_range_cb_data.arg = &geometry_range;
  237. ret = of_property_walk_each_entry(dev, "qcom,iommu-geometry",
  238. &get_addr_range_cb_data);
  239. if (ret == -ENODEV) {
  240. geometry_range.base = 0;
  241. geometry_range.end = SZ_4G - 1;
  242. } else if (ret) {
  243. return ret;
  244. }
  245. *ret_iova_base = min(geometry_range.base, dma_range.base);
  246. *ret_iova_end = max(geometry_range.end, dma_range.end);
  247. return 0;
  248. }
  249. EXPORT_SYMBOL(qcom_iommu_get_fast_iova_range);
  250. phys_addr_t qcom_iommu_iova_to_phys_hard(struct iommu_domain *domain,
  251. struct qcom_iommu_atos_txn *txn)
  252. {
  253. struct qcom_iommu_ops *ops = to_qcom_iommu_ops(domain->ops);
  254. if (unlikely(ops->iova_to_phys_hard == NULL))
  255. return 0;
  256. return ops->iova_to_phys_hard(domain, txn);
  257. }
  258. EXPORT_SYMBOL(qcom_iommu_iova_to_phys_hard);
  259. int qcom_iommu_sid_switch(struct device *dev, enum sid_switch_direction dir)
  260. {
  261. struct qcom_iommu_ops *ops;
  262. struct iommu_domain *domain;
  263. domain = iommu_get_domain_for_dev(dev);
  264. if (!domain)
  265. return -EINVAL;
  266. ops = to_qcom_iommu_ops(domain->ops);
  267. if (unlikely(ops->sid_switch == NULL))
  268. return -EINVAL;
  269. return ops->sid_switch(dev, dir);
  270. }
  271. EXPORT_SYMBOL(qcom_iommu_sid_switch);
  272. int qcom_iommu_get_fault_ids(struct iommu_domain *domain,
  273. struct qcom_iommu_fault_ids *f_ids)
  274. {
  275. struct qcom_iommu_ops *ops = to_qcom_iommu_ops(domain->ops);
  276. if (unlikely(ops->get_fault_ids == NULL))
  277. return -EINVAL;
  278. return ops->get_fault_ids(domain, f_ids);
  279. }
  280. EXPORT_SYMBOL(qcom_iommu_get_fault_ids);
  281. int qcom_skip_tlb_management(struct device *dev, bool skip)
  282. {
  283. struct qcom_iommu_ops *ops;
  284. struct iommu_domain *domain;
  285. domain = iommu_get_domain_for_dev(dev);
  286. if (!domain)
  287. return -EINVAL;
  288. ops = to_qcom_iommu_ops(domain->ops);
  289. if (unlikely(ops->skip_tlb_management == NULL))
  290. return -EINVAL;
  291. ops->skip_tlb_management(domain, skip);
  292. return 0;
  293. }
  294. EXPORT_SYMBOL(qcom_skip_tlb_management);
  295. int qcom_iommu_get_msi_size(struct device *dev, u32 *msi_size)
  296. {
  297. struct device_node *np = qcom_iommu_group_parse_phandle(dev);
  298. if (!np)
  299. return -EINVAL;
  300. return of_property_read_u32(np, "qcom,iommu-msi-size", msi_size);
  301. }
  302. int qcom_iommu_get_context_bank_nr(struct iommu_domain *domain)
  303. {
  304. struct qcom_iommu_ops *ops = to_qcom_iommu_ops(domain->ops);
  305. if (unlikely(ops->get_context_bank_nr == NULL))
  306. return -EINVAL;
  307. return ops->get_context_bank_nr(domain);
  308. }
  309. EXPORT_SYMBOL(qcom_iommu_get_context_bank_nr);
  310. int qcom_iommu_get_asid_nr(struct iommu_domain *domain)
  311. {
  312. struct qcom_iommu_ops *ops = to_qcom_iommu_ops(domain->ops);
  313. if (unlikely(ops->get_asid_nr == NULL))
  314. return -EINVAL;
  315. return ops->get_asid_nr(domain);
  316. }
  317. EXPORT_SYMBOL(qcom_iommu_get_asid_nr);
  318. int qcom_iommu_set_secure_vmid(struct iommu_domain *domain, enum vmid vmid)
  319. {
  320. struct qcom_iommu_ops *ops = to_qcom_iommu_ops(domain->ops);
  321. if (unlikely(ops->set_secure_vmid == NULL))
  322. return -EINVAL;
  323. return ops->set_secure_vmid(domain, vmid);
  324. }
  325. EXPORT_SYMBOL(qcom_iommu_set_secure_vmid);
  326. int qcom_iommu_set_fault_model(struct iommu_domain *domain, int fault_model)
  327. {
  328. struct qcom_iommu_ops *ops = to_qcom_iommu_ops(domain->ops);
  329. if (unlikely(ops->set_fault_model == NULL))
  330. return -EINVAL;
  331. else if (fault_model & ~(QCOM_IOMMU_FAULT_MODEL_NON_FATAL |
  332. QCOM_IOMMU_FAULT_MODEL_NO_CFRE |
  333. QCOM_IOMMU_FAULT_MODEL_NO_STALL |
  334. QCOM_IOMMU_FAULT_MODEL_HUPCF))
  335. return -EINVAL;
  336. return ops->set_fault_model(domain, fault_model);
  337. }
  338. EXPORT_SYMBOL(qcom_iommu_set_fault_model);
  339. /*
  340. * Sets the client function which gets called during non-threaded irq
  341. * fault handler when registered.
  342. */
  343. int qcom_iommu_set_fault_handler_irq(struct iommu_domain *domain,
  344. fault_handler_irq_t handler_irq, void *token)
  345. {
  346. struct qcom_iommu_ops *ops = to_qcom_iommu_ops(domain->ops);
  347. if (unlikely(ops->set_fault_handler_irq == NULL))
  348. return -EINVAL;
  349. ops->set_fault_handler_irq(domain, handler_irq, token);
  350. return 0;
  351. }
  352. EXPORT_SYMBOL(qcom_iommu_set_fault_handler_irq);
  353. int qcom_iommu_enable_s1_translation(struct iommu_domain *domain)
  354. {
  355. struct qcom_iommu_ops *ops = to_qcom_iommu_ops(domain->ops);
  356. if (unlikely(ops->enable_s1_translation == NULL))
  357. return -EINVAL;
  358. return ops->enable_s1_translation(domain);
  359. }
  360. EXPORT_SYMBOL(qcom_iommu_enable_s1_translation);
  361. int qcom_iommu_get_mappings_configuration(struct iommu_domain *domain)
  362. {
  363. struct qcom_iommu_ops *ops = to_qcom_iommu_ops(domain->ops);
  364. if (unlikely(ops->get_mappings_configuration == NULL))
  365. return -EINVAL;
  366. return ops->get_mappings_configuration(domain);
  367. }
  368. EXPORT_SYMBOL(qcom_iommu_get_mappings_configuration);
  369. struct io_pgtable_ops *qcom_alloc_io_pgtable_ops(enum io_pgtable_fmt fmt,
  370. struct qcom_io_pgtable_info *pgtbl_info,
  371. void *cookie)
  372. {
  373. struct io_pgtable *iop;
  374. const struct io_pgtable_init_fns *fns;
  375. struct io_pgtable_cfg *cfg = &pgtbl_info->cfg;
  376. if (fmt < IO_PGTABLE_NUM_FMTS)
  377. return alloc_io_pgtable_ops(fmt, cfg, cookie);
  378. #ifdef CONFIG_IOMMU_IO_PGTABLE_FAST
  379. else if (fmt == ARM_V8L_FAST)
  380. fns = &io_pgtable_av8l_fast_init_fns;
  381. #endif
  382. #ifdef CONFIG_IOMMU_IO_PGTABLE_LPAE
  383. else if (fmt == QCOM_ARM_64_LPAE_S1)
  384. fns = &qcom_io_pgtable_arm_64_lpae_s1_init_fns;
  385. #endif
  386. else {
  387. pr_err("Invalid io-pgtable fmt %u\n", fmt);
  388. return NULL;
  389. }
  390. iop = fns->alloc(cfg, cookie);
  391. if (!iop)
  392. return NULL;
  393. iop->fmt = fmt;
  394. iop->cookie = cookie;
  395. iop->cfg = *cfg;
  396. return &iop->ops;
  397. }
  398. EXPORT_SYMBOL(qcom_alloc_io_pgtable_ops);
  399. void qcom_free_io_pgtable_ops(struct io_pgtable_ops *ops)
  400. {
  401. struct io_pgtable *iop;
  402. enum io_pgtable_fmt fmt;
  403. const struct io_pgtable_init_fns *fns;
  404. if (!ops)
  405. return;
  406. iop = io_pgtable_ops_to_pgtable(ops);
  407. fmt = iop->fmt;
  408. if (fmt < IO_PGTABLE_NUM_FMTS)
  409. return free_io_pgtable_ops(ops);
  410. #ifdef CONFIG_IOMMU_IO_PGTABLE_FAST
  411. else if (fmt == ARM_V8L_FAST)
  412. fns = &io_pgtable_av8l_fast_init_fns;
  413. #endif
  414. #ifdef CONFIG_IOMMU_IO_PGTABLE_LPAE
  415. else if (fmt == QCOM_ARM_64_LPAE_S1)
  416. fns = &qcom_io_pgtable_arm_64_lpae_s1_init_fns;
  417. #endif
  418. else {
  419. pr_err("Invalid io-pgtable fmt %u\n", fmt);
  420. return;
  421. }
  422. io_pgtable_tlb_flush_all(iop);
  423. fns->free(iop);
  424. }
  425. EXPORT_SYMBOL(qcom_free_io_pgtable_ops);
  426. #if defined(CONFIG_TRACEPOINTS) && defined(CONFIG_ANDROID_VENDOR_HOOKS) && defined(CONFIG_ANDROID_VENDOR_OEM_DATA)
  427. /*
  428. * iovad->vendor_data1 i.e, ANDROID_VENDOR_DATA(1), field is a 64-bit field.
  429. *
  430. * Use Bits 7:0 to encode the max_alignment_shift.
  431. * Use Bit 16 for selecting best_fit algorithm.
  432. * Reserve remaining bits for future use.
  433. */
  434. #define QCOM_IOVAD_VENDOR_BEST_FIT_MASK BIT_MASK(16)
  435. #define QCOM_IOVAD_VENDOR_MAX_ALIGN_SHIFT_MASK GENMASK(7, 0)
  436. static inline void iovad_set_best_fit_iova(struct iova_domain *iovad)
  437. {
  438. iovad->android_vendor_data1 |= QCOM_IOVAD_VENDOR_BEST_FIT_MASK;
  439. }
  440. static inline bool iovad_use_best_fit_iova(struct iova_domain *iovad)
  441. {
  442. return !!(iovad->android_vendor_data1 & QCOM_IOVAD_VENDOR_BEST_FIT_MASK);
  443. }
  444. static inline void iovad_set_max_align_shift(struct iova_domain *iovad,
  445. unsigned long max_shift)
  446. {
  447. if (max_shift > QCOM_IOVAD_VENDOR_MAX_ALIGN_SHIFT_MASK) {
  448. /* Use the default value of 9, or 2M alignment for 4K pages */
  449. WARN_ON_ONCE("Invalid value of max_align_shift!\n");
  450. max_shift = 9;
  451. }
  452. /*
  453. * When extracting/computing max_align_shift, we assume that it
  454. * is encoded in the LSB of ->android_vendor_data. Ensure this
  455. * with BUILD_BUG_ON.
  456. */
  457. BUILD_BUG_ON(QCOM_IOVAD_VENDOR_MAX_ALIGN_SHIFT_MASK > 255);
  458. iovad->android_vendor_data1 |= max_shift;
  459. }
  460. static inline unsigned long iovad_get_max_align_shift(struct iova_domain *iovad)
  461. {
  462. u64 max_shift = iovad->android_vendor_data1;
  463. /*
  464. * When extracting/computing max_align_shift, we assume that it
  465. * is encoded in the LSB of ->android_vendor_data. Ensure this
  466. * with BUILD_BUG_ON.
  467. */
  468. BUILD_BUG_ON(QCOM_IOVAD_VENDOR_MAX_ALIGN_SHIFT_MASK > 255);
  469. max_shift &= QCOM_IOVAD_VENDOR_MAX_ALIGN_SHIFT_MASK;
  470. return (unsigned long)max_shift;
  471. }
  472. static void init_iovad_attr(void *unused, struct device *dev,
  473. struct iova_domain *iovad)
  474. {
  475. struct device_node *node;
  476. u32 shift;
  477. node = dev->of_node;
  478. if (of_property_read_bool(node, "qcom,iova-best-fit"))
  479. iovad_set_best_fit_iova(iovad);
  480. if (!of_property_read_u32(node, "qcom,iova-max-align-shift", &shift))
  481. iovad_set_max_align_shift(iovad, (unsigned long)shift);
  482. }
  483. static void register_iommu_iovad_init_alloc_algo_vh(void)
  484. {
  485. if (register_trace_android_rvh_iommu_iovad_init_alloc_algo(
  486. init_iovad_attr, NULL))
  487. pr_err("Failed to register init_iovad_attr vendor hook\n");
  488. }
  489. static struct iova *to_iova(struct rb_node *node)
  490. {
  491. return rb_entry(node, struct iova, node);
  492. }
  493. /* Insert the iova into domain rbtree by holding writer lock */
  494. static void iova_insert_rbtree(struct rb_root *root, struct iova *iova,
  495. struct rb_node *start)
  496. {
  497. struct rb_node **new, *parent = NULL;
  498. new = (start) ? &start : &(root->rb_node);
  499. /* Figure out where to put new node */
  500. while (*new) {
  501. struct iova *this = to_iova(*new);
  502. parent = *new;
  503. if (iova->pfn_lo < this->pfn_lo)
  504. new = &((*new)->rb_left);
  505. else if (iova->pfn_lo > this->pfn_lo)
  506. new = &((*new)->rb_right);
  507. else {
  508. WARN_ON(1); /* this should not happen */
  509. return;
  510. }
  511. }
  512. /* Add new node and rebalance tree. */
  513. rb_link_node(&iova->node, parent, new);
  514. rb_insert_color(&iova->node, root);
  515. }
  516. static unsigned long limit_align_shift(struct iova_domain *iovad,
  517. unsigned long shift)
  518. {
  519. unsigned long max_align_shift;
  520. unsigned long new_shift;
  521. new_shift = iovad_get_max_align_shift(iovad);
  522. /* If device doesn't override reuse current value */
  523. if (!new_shift)
  524. return shift;
  525. max_align_shift = new_shift + PAGE_SHIFT - iova_shift(iovad);
  526. return min_t(unsigned long, max_align_shift, shift);
  527. }
  528. static int __alloc_and_insert_iova_best_fit(struct iova_domain *iovad,
  529. unsigned long size,
  530. unsigned long limit_pfn,
  531. struct iova *new,
  532. bool size_aligned)
  533. {
  534. struct rb_node *curr, *prev;
  535. struct iova *curr_iova, *prev_iova;
  536. unsigned long flags;
  537. unsigned long align_mask = ~0UL;
  538. struct rb_node *candidate_rb_parent;
  539. unsigned long new_pfn, candidate_pfn = ~0UL;
  540. unsigned long gap, candidate_gap = ~0UL;
  541. if (!iovad_use_best_fit_iova(iovad))
  542. return -EINVAL;
  543. if (size_aligned)
  544. align_mask <<= limit_align_shift(iovad, fls_long(size - 1));
  545. /* Walk the tree backwards */
  546. spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
  547. curr = &iovad->anchor.node;
  548. prev = rb_prev(curr);
  549. for (; prev; curr = prev, prev = rb_prev(curr)) {
  550. curr_iova = rb_entry(curr, struct iova, node);
  551. prev_iova = rb_entry(prev, struct iova, node);
  552. limit_pfn = min(limit_pfn, curr_iova->pfn_lo);
  553. new_pfn = (limit_pfn - size) & align_mask;
  554. gap = curr_iova->pfn_lo - prev_iova->pfn_hi - 1;
  555. if ((limit_pfn >= size) && (new_pfn > prev_iova->pfn_hi)
  556. && (gap < candidate_gap)) {
  557. candidate_gap = gap;
  558. candidate_pfn = new_pfn;
  559. candidate_rb_parent = curr;
  560. if (gap == size)
  561. goto insert;
  562. }
  563. }
  564. curr_iova = rb_entry(curr, struct iova, node);
  565. limit_pfn = min(limit_pfn, curr_iova->pfn_lo);
  566. new_pfn = (limit_pfn - size) & align_mask;
  567. gap = curr_iova->pfn_lo - iovad->start_pfn;
  568. if (limit_pfn >= size && new_pfn >= iovad->start_pfn &&
  569. gap < candidate_gap) {
  570. candidate_gap = gap;
  571. candidate_pfn = new_pfn;
  572. candidate_rb_parent = curr;
  573. }
  574. insert:
  575. if (candidate_pfn == ~0UL) {
  576. spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
  577. return -ENOMEM;
  578. }
  579. /* pfn_lo will point to size aligned address if size_aligned is set */
  580. new->pfn_lo = candidate_pfn;
  581. new->pfn_hi = new->pfn_lo + size - 1;
  582. /* If we have 'prev', it's a valid place to start the insertion. */
  583. iova_insert_rbtree(&iovad->rbroot, new, candidate_rb_parent);
  584. spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
  585. return 0;
  586. }
  587. static void __qcom_alloc_insert_iova(void *data, struct iova_domain *iovad,
  588. unsigned long size,
  589. unsigned long limit_pfn, struct iova *new,
  590. bool size_aligned, int *ret)
  591. {
  592. *ret = __alloc_and_insert_iova_best_fit(iovad, size, limit_pfn, new,
  593. size_aligned);
  594. }
  595. static void register_iommu_alloc_insert_iova_vh(void)
  596. {
  597. if (register_trace_android_rvh_iommu_alloc_insert_iova(
  598. __qcom_alloc_insert_iova, NULL)) {
  599. pr_err("Failed to register alloc_inser_iova vendor hook\n");
  600. }
  601. }
  602. static void __qcom_limit_align_shift(void *data, struct iova_domain *iovad,
  603. unsigned long size, unsigned long *shift)
  604. {
  605. *shift = limit_align_shift(iovad, *shift);
  606. }
  607. static void register_iommu_limit_align_shift(void)
  608. {
  609. if (register_trace_android_rvh_iommu_limit_align_shift(
  610. __qcom_limit_align_shift, NULL)) {
  611. pr_err("Failed to register limit_align_shift vendor hook\n");
  612. }
  613. }
  614. #else
  615. static void register_iommu_iovad_init_alloc_algo_vh(void)
  616. {
  617. }
  618. static void register_iommu_alloc_insert_iova_vh(void)
  619. {
  620. }
  621. static void register_iommu_limit_align_shift(void)
  622. {
  623. }
  624. #endif
  625. /*
  626. * These tables must have the same length.
  627. * It is allowed to have a NULL exitcall corresponding to a non-NULL initcall.
  628. */
  629. static initcall_t init_table[] __initdata = {
  630. dma_mapping_fast_init,
  631. qcom_dma_iommu_generic_driver_init,
  632. qcom_arm_lpae_do_selftests,
  633. qcom_io_pgtable_alloc_init,
  634. NULL
  635. };
  636. static exitcall_t exit_table[] = {
  637. NULL, /* dma_mapping_fast_exit */
  638. qcom_dma_iommu_generic_driver_exit,
  639. NULL, /*qcom_arm_lpae_do_selftests */
  640. qcom_io_pgtable_alloc_exit,
  641. NULL,
  642. };
  643. static int __init qcom_iommu_util_init(void)
  644. {
  645. initcall_t *init_fn;
  646. exitcall_t *exit_fn;
  647. int ret;
  648. if (ARRAY_SIZE(init_table) != ARRAY_SIZE(exit_table)) {
  649. pr_err("qcom-iommu-util: Invalid initcall/exitcall table\n");
  650. return -EINVAL;
  651. }
  652. for (init_fn = init_table; *init_fn; init_fn++) {
  653. ret = (**init_fn)();
  654. if (ret) {
  655. pr_err("%ps returned %d\n", *init_fn, ret);
  656. goto out_undo;
  657. }
  658. }
  659. register_iommu_iovad_init_alloc_algo_vh();
  660. register_iommu_alloc_insert_iova_vh();
  661. register_iommu_limit_align_shift();
  662. return 0;
  663. out_undo:
  664. exit_fn = exit_table + (init_fn - init_table);
  665. for (exit_fn--; exit_fn >= exit_table; exit_fn--) {
  666. if (!*exit_fn)
  667. continue;
  668. (**exit_fn)();
  669. }
  670. return ret;
  671. }
  672. #if IS_MODULE(CONFIG_QCOM_IOMMU_UTIL)
  673. module_init(qcom_iommu_util_init);
  674. #else
  675. arch_initcall_sync(qcom_iommu_util_init);
  676. #endif
  677. MODULE_LICENSE("GPL v2");