iommu.h 35 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179
  1. /* SPDX-License-Identifier: GPL-2.0-only */
  2. /*
  3. * Copyright (C) 2007-2008 Advanced Micro Devices, Inc.
  4. * Author: Joerg Roedel <[email protected]>
  5. */
  6. #ifndef __LINUX_IOMMU_H
  7. #define __LINUX_IOMMU_H
  8. #include <linux/scatterlist.h>
  9. #include <linux/device.h>
  10. #include <linux/types.h>
  11. #include <linux/errno.h>
  12. #include <linux/err.h>
  13. #include <linux/of.h>
  14. #include <linux/ioasid.h>
  15. #include <uapi/linux/iommu.h>
  16. #define IOMMU_READ (1 << 0)
  17. #define IOMMU_WRITE (1 << 1)
  18. #define IOMMU_CACHE (1 << 2) /* DMA cache coherency */
  19. #define IOMMU_NOEXEC (1 << 3)
  20. #define IOMMU_MMIO (1 << 4) /* e.g. things like MSI doorbells */
  21. /*
  22. * Where the bus hardware includes a privilege level as part of its access type
  23. * markings, and certain devices are capable of issuing transactions marked as
  24. * either 'supervisor' or 'user', the IOMMU_PRIV flag requests that the other
  25. * given permission flags only apply to accesses at the higher privilege level,
  26. * and that unprivileged transactions should have as little access as possible.
  27. * This would usually imply the same permissions as kernel mappings on the CPU,
  28. * if the IOMMU page table format is equivalent.
  29. */
  30. #define IOMMU_PRIV (1 << 5)
  31. /*
  32. * Allow caching in a transparent outer level of cache, also known as
  33. * the last-level or system cache, with a read/write allocation policy.
  34. * Does not depend on IOMMU_CACHE. Incompatible with IOMMU_SYS_CACHE_NWA.
  35. */
  36. #define IOMMU_SYS_CACHE (1 << 6)
  37. /*
  38. * Allow caching in a transparent outer level of cache, also known as
  39. * the last-level or system cache, with a read allocation policy.
  40. * Does not depend on IOMMU_CACHE. Incompatible with IOMMU_SYS_CACHE.
  41. */
  42. #define IOMMU_SYS_CACHE_NWA (1 << 7)
  43. struct iommu_ops;
  44. struct iommu_group;
  45. struct bus_type;
  46. struct device;
  47. struct iommu_domain;
  48. struct iommu_domain_ops;
  49. struct notifier_block;
  50. struct iommu_sva;
  51. struct iommu_fault_event;
  52. struct iommu_dma_cookie;
  53. /* iommu fault flags */
  54. #define IOMMU_FAULT_READ 0x0
  55. #define IOMMU_FAULT_WRITE 0x1
  56. typedef int (*iommu_fault_handler_t)(struct iommu_domain *,
  57. struct device *, unsigned long, int, void *);
  58. typedef int (*iommu_dev_fault_handler_t)(struct iommu_fault *, void *);
  59. struct iommu_domain_geometry {
  60. dma_addr_t aperture_start; /* First address that can be mapped */
  61. dma_addr_t aperture_end; /* Last address that can be mapped */
  62. bool force_aperture; /* DMA only allowed in mappable range? */
  63. };
  64. /* Domain feature flags */
  65. #define __IOMMU_DOMAIN_PAGING (1U << 0) /* Support for iommu_map/unmap */
  66. #define __IOMMU_DOMAIN_DMA_API (1U << 1) /* Domain for use in DMA-API
  67. implementation */
  68. #define __IOMMU_DOMAIN_PT (1U << 2) /* Domain is identity mapped */
  69. #define __IOMMU_DOMAIN_DMA_FQ (1U << 3) /* DMA-API uses flush queue */
  70. #define __IOMMU_DOMAIN_SVA (1U << 4) /* Shared process address space */
  71. /*
  72. * This are the possible domain-types
  73. *
  74. * IOMMU_DOMAIN_BLOCKED - All DMA is blocked, can be used to isolate
  75. * devices
  76. * IOMMU_DOMAIN_IDENTITY - DMA addresses are system physical addresses
  77. * IOMMU_DOMAIN_UNMANAGED - DMA mappings managed by IOMMU-API user, used
  78. * for VMs
  79. * IOMMU_DOMAIN_DMA - Internally used for DMA-API implementations.
  80. * This flag allows IOMMU drivers to implement
  81. * certain optimizations for these domains
  82. * IOMMU_DOMAIN_DMA_FQ - As above, but definitely using batched TLB
  83. * invalidation.
  84. * IOMMU_DOMAIN_SVA - DMA addresses are shared process addresses
  85. * represented by mm_struct's.
  86. */
  87. #define IOMMU_DOMAIN_BLOCKED (0U)
  88. #define IOMMU_DOMAIN_IDENTITY (__IOMMU_DOMAIN_PT)
  89. #define IOMMU_DOMAIN_UNMANAGED (__IOMMU_DOMAIN_PAGING)
  90. #define IOMMU_DOMAIN_DMA (__IOMMU_DOMAIN_PAGING | \
  91. __IOMMU_DOMAIN_DMA_API)
  92. #define IOMMU_DOMAIN_DMA_FQ (__IOMMU_DOMAIN_PAGING | \
  93. __IOMMU_DOMAIN_DMA_API | \
  94. __IOMMU_DOMAIN_DMA_FQ)
  95. #define IOMMU_DOMAIN_SVA (__IOMMU_DOMAIN_SVA)
  96. struct iommu_domain {
  97. unsigned type;
  98. const struct iommu_domain_ops *ops;
  99. unsigned long pgsize_bitmap; /* Bitmap of page sizes in use */
  100. struct iommu_domain_geometry geometry;
  101. struct iommu_dma_cookie *iova_cookie;
  102. enum iommu_page_response_code (*iopf_handler)(struct iommu_fault *fault,
  103. void *data);
  104. void *fault_data;
  105. union {
  106. struct {
  107. iommu_fault_handler_t handler;
  108. void *handler_token;
  109. };
  110. struct { /* IOMMU_DOMAIN_SVA */
  111. struct mm_struct *mm;
  112. int users;
  113. };
  114. };
  115. };
  116. static inline bool iommu_is_dma_domain(struct iommu_domain *domain)
  117. {
  118. return domain->type & __IOMMU_DOMAIN_DMA_API;
  119. }
  120. enum iommu_cap {
  121. IOMMU_CAP_CACHE_COHERENCY, /* IOMMU_CACHE is supported */
  122. IOMMU_CAP_INTR_REMAP, /* IOMMU supports interrupt isolation */
  123. IOMMU_CAP_NOEXEC, /* IOMMU_NOEXEC flag */
  124. IOMMU_CAP_PRE_BOOT_PROTECTION, /* Firmware says it used the IOMMU for
  125. DMA protection and we should too */
  126. };
  127. /* These are the possible reserved region types */
  128. enum iommu_resv_type {
  129. /* Memory regions which must be mapped 1:1 at all times */
  130. IOMMU_RESV_DIRECT,
  131. /*
  132. * Memory regions which are advertised to be 1:1 but are
  133. * commonly considered relaxable in some conditions,
  134. * for instance in device assignment use case (USB, Graphics)
  135. */
  136. IOMMU_RESV_DIRECT_RELAXABLE,
  137. /* Arbitrary "never map this or give it to a device" address ranges */
  138. IOMMU_RESV_RESERVED,
  139. /* Hardware MSI region (untranslated) */
  140. IOMMU_RESV_MSI,
  141. /* Software-managed MSI translation window */
  142. IOMMU_RESV_SW_MSI,
  143. };
  144. /**
  145. * struct iommu_resv_region - descriptor for a reserved memory region
  146. * @list: Linked list pointers
  147. * @start: System physical start address of the region
  148. * @length: Length of the region in bytes
  149. * @prot: IOMMU Protection flags (READ/WRITE/...)
  150. * @type: Type of the reserved region
  151. * @free: Callback to free associated memory allocations
  152. */
  153. struct iommu_resv_region {
  154. struct list_head list;
  155. phys_addr_t start;
  156. size_t length;
  157. int prot;
  158. enum iommu_resv_type type;
  159. void (*free)(struct device *dev, struct iommu_resv_region *region);
  160. };
  161. struct iommu_iort_rmr_data {
  162. struct iommu_resv_region rr;
  163. /* Stream IDs associated with IORT RMR entry */
  164. const u32 *sids;
  165. u32 num_sids;
  166. };
  167. /**
  168. * enum iommu_dev_features - Per device IOMMU features
  169. * @IOMMU_DEV_FEAT_SVA: Shared Virtual Addresses
  170. * @IOMMU_DEV_FEAT_IOPF: I/O Page Faults such as PRI or Stall. Generally
  171. * enabling %IOMMU_DEV_FEAT_SVA requires
  172. * %IOMMU_DEV_FEAT_IOPF, but some devices manage I/O Page
  173. * Faults themselves instead of relying on the IOMMU. When
  174. * supported, this feature must be enabled before and
  175. * disabled after %IOMMU_DEV_FEAT_SVA.
  176. *
  177. * Device drivers enable a feature using iommu_dev_enable_feature().
  178. */
  179. enum iommu_dev_features {
  180. IOMMU_DEV_FEAT_SVA,
  181. IOMMU_DEV_FEAT_IOPF,
  182. };
  183. #define IOMMU_PASID_INVALID (-1U)
  184. #ifdef CONFIG_IOMMU_API
  185. /**
  186. * struct iommu_iotlb_gather - Range information for a pending IOTLB flush
  187. *
  188. * @start: IOVA representing the start of the range to be flushed
  189. * @end: IOVA representing the end of the range to be flushed (inclusive)
  190. * @pgsize: The interval at which to perform the flush
  191. * @freelist: Removed pages to free after sync
  192. * @queued: Indicates that the flush will be queued
  193. *
  194. * This structure is intended to be updated by multiple calls to the
  195. * ->unmap() function in struct iommu_ops before eventually being passed
  196. * into ->iotlb_sync(). Drivers can add pages to @freelist to be freed after
  197. * ->iotlb_sync() or ->iotlb_flush_all() have cleared all cached references to
  198. * them. @queued is set to indicate when ->iotlb_flush_all() will be called
  199. * later instead of ->iotlb_sync(), so drivers may optimise accordingly.
  200. */
  201. struct iommu_iotlb_gather {
  202. unsigned long start;
  203. unsigned long end;
  204. size_t pgsize;
  205. struct list_head freelist;
  206. bool queued;
  207. };
  208. /**
  209. * struct iommu_ops - iommu ops and capabilities
  210. * @capable: check capability
  211. * @domain_alloc: allocate iommu domain
  212. * @probe_device: Add device to iommu driver handling
  213. * @release_device: Remove device from iommu driver handling
  214. * @probe_finalize: Do final setup work after the device is added to an IOMMU
  215. * group and attached to the groups domain
  216. * @device_group: find iommu group for a particular device
  217. * @get_resv_regions: Request list of reserved regions for a device
  218. * @of_xlate: add OF master IDs to iommu grouping
  219. * @is_attach_deferred: Check if domain attach should be deferred from iommu
  220. * driver init to device driver init (default no)
  221. * @dev_enable/disable_feat: per device entries to enable/disable
  222. * iommu specific features.
  223. * @page_response: handle page request response
  224. * @def_domain_type: device default domain type, return value:
  225. * - IOMMU_DOMAIN_IDENTITY: must use an identity domain
  226. * - IOMMU_DOMAIN_DMA: must use a dma domain
  227. * - 0: use the default setting
  228. * @default_domain_ops: the default ops for domains
  229. * @remove_dev_pasid: Remove any translation configurations of a specific
  230. * pasid, so that any DMA transactions with this pasid
  231. * will be blocked by the hardware.
  232. * @pgsize_bitmap: bitmap of all possible supported page sizes
  233. * @owner: Driver module providing these ops
  234. */
  235. struct iommu_ops {
  236. bool (*capable)(struct device *dev, enum iommu_cap);
  237. /* Domain allocation and freeing by the iommu driver */
  238. struct iommu_domain *(*domain_alloc)(unsigned iommu_domain_type);
  239. struct iommu_device *(*probe_device)(struct device *dev);
  240. void (*release_device)(struct device *dev);
  241. void (*probe_finalize)(struct device *dev);
  242. struct iommu_group *(*device_group)(struct device *dev);
  243. /* Request/Free a list of reserved regions for a device */
  244. void (*get_resv_regions)(struct device *dev, struct list_head *list);
  245. int (*of_xlate)(struct device *dev, struct of_phandle_args *args);
  246. bool (*is_attach_deferred)(struct device *dev);
  247. /* Per device IOMMU features */
  248. int (*dev_enable_feat)(struct device *dev, enum iommu_dev_features f);
  249. int (*dev_disable_feat)(struct device *dev, enum iommu_dev_features f);
  250. int (*page_response)(struct device *dev,
  251. struct iommu_fault_event *evt,
  252. struct iommu_page_response *msg);
  253. int (*def_domain_type)(struct device *dev);
  254. void (*remove_dev_pasid)(struct device *dev, ioasid_t pasid);
  255. const struct iommu_domain_ops *default_domain_ops;
  256. unsigned long pgsize_bitmap;
  257. struct module *owner;
  258. };
  259. /**
  260. * struct iommu_domain_ops - domain specific operations
  261. * @attach_dev: attach an iommu domain to a device
  262. * @detach_dev: detach an iommu domain from a device
  263. * @set_dev_pasid: set an iommu domain to a pasid of device
  264. * @map: map a physically contiguous memory region to an iommu domain
  265. * @map_pages: map a physically contiguous set of pages of the same size to
  266. * an iommu domain.
  267. * @unmap: unmap a physically contiguous memory region from an iommu domain
  268. * @unmap_pages: unmap a number of pages of the same size from an iommu domain
  269. * @flush_iotlb_all: Synchronously flush all hardware TLBs for this domain
  270. * @iotlb_sync_map: Sync mappings created recently using @map to the hardware
  271. * @iotlb_sync: Flush all queued ranges from the hardware TLBs and empty flush
  272. * queue
  273. * @iova_to_phys: translate iova to physical address
  274. * @enforce_cache_coherency: Prevent any kind of DMA from bypassing IOMMU_CACHE,
  275. * including no-snoop TLPs on PCIe or other platform
  276. * specific mechanisms.
  277. * @enable_nesting: Enable nesting
  278. * @set_pgtable_quirks: Set io page table quirks (IO_PGTABLE_QUIRK_*)
  279. * @free: Release the domain after use.
  280. */
  281. struct iommu_domain_ops {
  282. int (*attach_dev)(struct iommu_domain *domain, struct device *dev);
  283. void (*detach_dev)(struct iommu_domain *domain, struct device *dev);
  284. int (*set_dev_pasid)(struct iommu_domain *domain, struct device *dev,
  285. ioasid_t pasid);
  286. int (*map)(struct iommu_domain *domain, unsigned long iova,
  287. phys_addr_t paddr, size_t size, int prot, gfp_t gfp);
  288. int (*map_pages)(struct iommu_domain *domain, unsigned long iova,
  289. phys_addr_t paddr, size_t pgsize, size_t pgcount,
  290. int prot, gfp_t gfp, size_t *mapped);
  291. size_t (*unmap)(struct iommu_domain *domain, unsigned long iova,
  292. size_t size, struct iommu_iotlb_gather *iotlb_gather);
  293. size_t (*unmap_pages)(struct iommu_domain *domain, unsigned long iova,
  294. size_t pgsize, size_t pgcount,
  295. struct iommu_iotlb_gather *iotlb_gather);
  296. void (*flush_iotlb_all)(struct iommu_domain *domain);
  297. void (*iotlb_sync_map)(struct iommu_domain *domain, unsigned long iova,
  298. size_t size);
  299. void (*iotlb_sync)(struct iommu_domain *domain,
  300. struct iommu_iotlb_gather *iotlb_gather);
  301. phys_addr_t (*iova_to_phys)(struct iommu_domain *domain,
  302. dma_addr_t iova);
  303. bool (*enforce_cache_coherency)(struct iommu_domain *domain);
  304. int (*enable_nesting)(struct iommu_domain *domain);
  305. int (*set_pgtable_quirks)(struct iommu_domain *domain,
  306. unsigned long quirks);
  307. void (*free)(struct iommu_domain *domain);
  308. };
  309. /**
  310. * struct iommu_device - IOMMU core representation of one IOMMU hardware
  311. * instance
  312. * @list: Used by the iommu-core to keep a list of registered iommus
  313. * @ops: iommu-ops for talking to this iommu
  314. * @dev: struct device for sysfs handling
  315. * @max_pasids: number of supported PASIDs
  316. */
  317. struct iommu_device {
  318. struct list_head list;
  319. const struct iommu_ops *ops;
  320. struct fwnode_handle *fwnode;
  321. struct device *dev;
  322. u32 max_pasids;
  323. };
  324. /**
  325. * struct iommu_fault_event - Generic fault event
  326. *
  327. * Can represent recoverable faults such as a page requests or
  328. * unrecoverable faults such as DMA or IRQ remapping faults.
  329. *
  330. * @fault: fault descriptor
  331. * @list: pending fault event list, used for tracking responses
  332. */
  333. struct iommu_fault_event {
  334. struct iommu_fault fault;
  335. struct list_head list;
  336. };
  337. /**
  338. * struct iommu_fault_param - per-device IOMMU fault data
  339. * @handler: Callback function to handle IOMMU faults at device level
  340. * @data: handler private data
  341. * @faults: holds the pending faults which needs response
  342. * @lock: protect pending faults list
  343. */
  344. struct iommu_fault_param {
  345. iommu_dev_fault_handler_t handler;
  346. void *data;
  347. struct list_head faults;
  348. struct mutex lock;
  349. };
  350. /**
  351. * struct dev_iommu - Collection of per-device IOMMU data
  352. *
  353. * @fault_param: IOMMU detected device fault reporting data
  354. * @iopf_param: I/O Page Fault queue and data
  355. * @fwspec: IOMMU fwspec data
  356. * @iommu_dev: IOMMU device this device is linked to
  357. * @priv: IOMMU Driver private data
  358. * @max_pasids: number of PASIDs this device can consume
  359. *
  360. * TODO: migrate other per device data pointers under iommu_dev_data, e.g.
  361. * struct iommu_group *iommu_group;
  362. */
  363. struct dev_iommu {
  364. struct mutex lock;
  365. struct iommu_fault_param *fault_param;
  366. struct iopf_device_param *iopf_param;
  367. struct iommu_fwspec *fwspec;
  368. struct iommu_device *iommu_dev;
  369. void *priv;
  370. u32 max_pasids;
  371. };
  372. int iommu_device_register(struct iommu_device *iommu,
  373. const struct iommu_ops *ops,
  374. struct device *hwdev);
  375. void iommu_device_unregister(struct iommu_device *iommu);
  376. int iommu_device_sysfs_add(struct iommu_device *iommu,
  377. struct device *parent,
  378. const struct attribute_group **groups,
  379. const char *fmt, ...) __printf(4, 5);
  380. void iommu_device_sysfs_remove(struct iommu_device *iommu);
  381. int iommu_device_link(struct iommu_device *iommu, struct device *link);
  382. void iommu_device_unlink(struct iommu_device *iommu, struct device *link);
  383. int iommu_deferred_attach(struct device *dev, struct iommu_domain *domain);
  384. static inline struct iommu_device *dev_to_iommu_device(struct device *dev)
  385. {
  386. return (struct iommu_device *)dev_get_drvdata(dev);
  387. }
  388. static inline void iommu_iotlb_gather_init(struct iommu_iotlb_gather *gather)
  389. {
  390. *gather = (struct iommu_iotlb_gather) {
  391. .start = ULONG_MAX,
  392. .freelist = LIST_HEAD_INIT(gather->freelist),
  393. };
  394. }
  395. static inline const struct iommu_ops *dev_iommu_ops(struct device *dev)
  396. {
  397. /*
  398. * Assume that valid ops must be installed if iommu_probe_device()
  399. * has succeeded. The device ops are essentially for internal use
  400. * within the IOMMU subsystem itself, so we should be able to trust
  401. * ourselves not to misuse the helper.
  402. */
  403. return dev->iommu->iommu_dev->ops;
  404. }
  405. extern int bus_iommu_probe(struct bus_type *bus);
  406. extern bool iommu_present(struct bus_type *bus);
  407. extern bool device_iommu_capable(struct device *dev, enum iommu_cap cap);
  408. extern struct iommu_domain *iommu_domain_alloc(struct bus_type *bus);
  409. extern struct iommu_group *iommu_group_get_by_id(int id);
  410. extern void iommu_domain_free(struct iommu_domain *domain);
  411. extern int iommu_attach_device(struct iommu_domain *domain,
  412. struct device *dev);
  413. extern void iommu_detach_device(struct iommu_domain *domain,
  414. struct device *dev);
  415. extern int iommu_sva_unbind_gpasid(struct iommu_domain *domain,
  416. struct device *dev, ioasid_t pasid);
  417. extern struct iommu_domain *iommu_get_domain_for_dev(struct device *dev);
  418. extern struct iommu_domain *iommu_get_dma_domain(struct device *dev);
  419. extern int iommu_map(struct iommu_domain *domain, unsigned long iova,
  420. phys_addr_t paddr, size_t size, int prot);
  421. extern int iommu_map_atomic(struct iommu_domain *domain, unsigned long iova,
  422. phys_addr_t paddr, size_t size, int prot);
  423. extern size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova,
  424. size_t size);
  425. extern size_t iommu_unmap_fast(struct iommu_domain *domain,
  426. unsigned long iova, size_t size,
  427. struct iommu_iotlb_gather *iotlb_gather);
  428. extern ssize_t iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
  429. struct scatterlist *sg, unsigned int nents, int prot);
  430. extern ssize_t iommu_map_sg_atomic(struct iommu_domain *domain,
  431. unsigned long iova, struct scatterlist *sg,
  432. unsigned int nents, int prot);
  433. extern phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova);
  434. extern void iommu_set_fault_handler(struct iommu_domain *domain,
  435. iommu_fault_handler_t handler, void *token);
  436. extern void iommu_get_resv_regions(struct device *dev, struct list_head *list);
  437. extern void iommu_put_resv_regions(struct device *dev, struct list_head *list);
  438. extern void iommu_set_default_passthrough(bool cmd_line);
  439. extern void iommu_set_default_translated(bool cmd_line);
  440. extern bool iommu_default_passthrough(void);
  441. extern struct iommu_resv_region *
  442. iommu_alloc_resv_region(phys_addr_t start, size_t length, int prot,
  443. enum iommu_resv_type type, gfp_t gfp);
  444. extern int iommu_get_group_resv_regions(struct iommu_group *group,
  445. struct list_head *head);
  446. extern int iommu_attach_group(struct iommu_domain *domain,
  447. struct iommu_group *group);
  448. extern void iommu_detach_group(struct iommu_domain *domain,
  449. struct iommu_group *group);
  450. extern struct iommu_group *iommu_group_alloc(void);
  451. extern void *iommu_group_get_iommudata(struct iommu_group *group);
  452. extern void iommu_group_set_iommudata(struct iommu_group *group,
  453. void *iommu_data,
  454. void (*release)(void *iommu_data));
  455. extern int iommu_group_set_name(struct iommu_group *group, const char *name);
  456. extern int iommu_group_add_device(struct iommu_group *group,
  457. struct device *dev);
  458. extern void iommu_group_remove_device(struct device *dev);
  459. extern int iommu_group_for_each_dev(struct iommu_group *group, void *data,
  460. int (*fn)(struct device *, void *));
  461. extern struct iommu_group *iommu_group_get(struct device *dev);
  462. extern struct iommu_group *iommu_group_ref_get(struct iommu_group *group);
  463. extern void iommu_group_put(struct iommu_group *group);
  464. extern int iommu_register_device_fault_handler(struct device *dev,
  465. iommu_dev_fault_handler_t handler,
  466. void *data);
  467. extern int iommu_unregister_device_fault_handler(struct device *dev);
  468. extern int iommu_report_device_fault(struct device *dev,
  469. struct iommu_fault_event *evt);
  470. extern int iommu_page_response(struct device *dev,
  471. struct iommu_page_response *msg);
  472. extern int iommu_group_id(struct iommu_group *group);
  473. extern struct iommu_domain *iommu_group_default_domain(struct iommu_group *);
  474. int iommu_enable_nesting(struct iommu_domain *domain);
  475. int iommu_set_pgtable_quirks(struct iommu_domain *domain,
  476. unsigned long quirks);
  477. void iommu_set_dma_strict(void);
  478. extern int report_iommu_fault(struct iommu_domain *domain, struct device *dev,
  479. unsigned long iova, int flags);
  480. static inline void iommu_flush_iotlb_all(struct iommu_domain *domain)
  481. {
  482. if (domain->ops->flush_iotlb_all)
  483. domain->ops->flush_iotlb_all(domain);
  484. }
  485. static inline void iommu_iotlb_sync(struct iommu_domain *domain,
  486. struct iommu_iotlb_gather *iotlb_gather)
  487. {
  488. if (domain->ops->iotlb_sync)
  489. domain->ops->iotlb_sync(domain, iotlb_gather);
  490. iommu_iotlb_gather_init(iotlb_gather);
  491. }
  492. /**
  493. * iommu_iotlb_gather_is_disjoint - Checks whether a new range is disjoint
  494. *
  495. * @gather: TLB gather data
  496. * @iova: start of page to invalidate
  497. * @size: size of page to invalidate
  498. *
  499. * Helper for IOMMU drivers to check whether a new range and the gathered range
  500. * are disjoint. For many IOMMUs, flushing the IOMMU in this case is better
  501. * than merging the two, which might lead to unnecessary invalidations.
  502. */
  503. static inline
  504. bool iommu_iotlb_gather_is_disjoint(struct iommu_iotlb_gather *gather,
  505. unsigned long iova, size_t size)
  506. {
  507. unsigned long start = iova, end = start + size - 1;
  508. return gather->end != 0 &&
  509. (end + 1 < gather->start || start > gather->end + 1);
  510. }
  511. /**
  512. * iommu_iotlb_gather_add_range - Gather for address-based TLB invalidation
  513. * @gather: TLB gather data
  514. * @iova: start of page to invalidate
  515. * @size: size of page to invalidate
  516. *
  517. * Helper for IOMMU drivers to build arbitrarily-sized invalidation commands
  518. * where only the address range matters, and simply minimising intermediate
  519. * syncs is preferred.
  520. */
  521. static inline void iommu_iotlb_gather_add_range(struct iommu_iotlb_gather *gather,
  522. unsigned long iova, size_t size)
  523. {
  524. unsigned long end = iova + size - 1;
  525. if (gather->start > iova)
  526. gather->start = iova;
  527. if (gather->end < end)
  528. gather->end = end;
  529. }
  530. /**
  531. * iommu_iotlb_gather_add_page - Gather for page-based TLB invalidation
  532. * @domain: IOMMU domain to be invalidated
  533. * @gather: TLB gather data
  534. * @iova: start of page to invalidate
  535. * @size: size of page to invalidate
  536. *
  537. * Helper for IOMMU drivers to build invalidation commands based on individual
  538. * pages, or with page size/table level hints which cannot be gathered if they
  539. * differ.
  540. */
  541. static inline void iommu_iotlb_gather_add_page(struct iommu_domain *domain,
  542. struct iommu_iotlb_gather *gather,
  543. unsigned long iova, size_t size)
  544. {
  545. /*
  546. * If the new page is disjoint from the current range or is mapped at
  547. * a different granularity, then sync the TLB so that the gather
  548. * structure can be rewritten.
  549. */
  550. if ((gather->pgsize && gather->pgsize != size) ||
  551. iommu_iotlb_gather_is_disjoint(gather, iova, size))
  552. iommu_iotlb_sync(domain, gather);
  553. gather->pgsize = size;
  554. iommu_iotlb_gather_add_range(gather, iova, size);
  555. }
  556. static inline bool iommu_iotlb_gather_queued(struct iommu_iotlb_gather *gather)
  557. {
  558. return gather && gather->queued;
  559. }
  560. /* PCI device grouping function */
  561. extern struct iommu_group *pci_device_group(struct device *dev);
  562. /* Generic device grouping function */
  563. extern struct iommu_group *generic_device_group(struct device *dev);
  564. /* FSL-MC device grouping function */
  565. struct iommu_group *fsl_mc_device_group(struct device *dev);
  566. /**
  567. * struct iommu_fwspec - per-device IOMMU instance data
  568. * @ops: ops for this device's IOMMU
  569. * @iommu_fwnode: firmware handle for this device's IOMMU
  570. * @flags: IOMMU_FWSPEC_* flags
  571. * @num_ids: number of associated device IDs
  572. * @ids: IDs which this device may present to the IOMMU
  573. */
  574. struct iommu_fwspec {
  575. const struct iommu_ops *ops;
  576. struct fwnode_handle *iommu_fwnode;
  577. u32 flags;
  578. unsigned int num_ids;
  579. u32 ids[];
  580. };
  581. /* ATS is supported */
  582. #define IOMMU_FWSPEC_PCI_RC_ATS (1 << 0)
  583. /**
  584. * struct iommu_sva - handle to a device-mm bond
  585. */
  586. struct iommu_sva {
  587. struct device *dev;
  588. struct iommu_domain *domain;
  589. };
  590. int iommu_fwspec_init(struct device *dev, struct fwnode_handle *iommu_fwnode,
  591. const struct iommu_ops *ops);
  592. void iommu_fwspec_free(struct device *dev);
  593. int iommu_fwspec_add_ids(struct device *dev, u32 *ids, int num_ids);
  594. const struct iommu_ops *iommu_ops_from_fwnode(struct fwnode_handle *fwnode);
  595. static inline struct iommu_fwspec *dev_iommu_fwspec_get(struct device *dev)
  596. {
  597. struct dev_iommu *iommu;
  598. smp_wmb();
  599. iommu = READ_ONCE(dev->iommu);
  600. if (iommu) {
  601. struct iommu_fwspec *fwspec;
  602. smp_wmb();
  603. fwspec = READ_ONCE(iommu->fwspec);
  604. return virt_addr_valid(fwspec) ? fwspec : NULL;
  605. } else
  606. return NULL;
  607. }
  608. static inline void dev_iommu_fwspec_set(struct device *dev,
  609. struct iommu_fwspec *fwspec)
  610. {
  611. dev->iommu->fwspec = fwspec;
  612. }
  613. static inline void *dev_iommu_priv_get(struct device *dev)
  614. {
  615. if (dev->iommu)
  616. return dev->iommu->priv;
  617. else
  618. return NULL;
  619. }
  620. static inline void dev_iommu_priv_set(struct device *dev, void *priv)
  621. {
  622. dev->iommu->priv = priv;
  623. }
  624. extern struct mutex iommu_probe_device_lock;
  625. int iommu_probe_device(struct device *dev);
  626. void iommu_release_device(struct device *dev);
  627. int iommu_dev_enable_feature(struct device *dev, enum iommu_dev_features f);
  628. int iommu_dev_disable_feature(struct device *dev, enum iommu_dev_features f);
  629. int iommu_device_use_default_domain(struct device *dev);
  630. void iommu_device_unuse_default_domain(struct device *dev);
  631. int iommu_group_claim_dma_owner(struct iommu_group *group, void *owner);
  632. void iommu_group_release_dma_owner(struct iommu_group *group);
  633. bool iommu_group_dma_owner_claimed(struct iommu_group *group);
  634. struct iommu_domain *iommu_sva_domain_alloc(struct device *dev,
  635. struct mm_struct *mm);
  636. int iommu_attach_device_pasid(struct iommu_domain *domain,
  637. struct device *dev, ioasid_t pasid);
  638. void iommu_detach_device_pasid(struct iommu_domain *domain,
  639. struct device *dev, ioasid_t pasid);
  640. struct iommu_domain *
  641. iommu_get_domain_for_dev_pasid(struct device *dev, ioasid_t pasid,
  642. unsigned int type);
  643. #else /* CONFIG_IOMMU_API */
  644. struct iommu_ops {};
  645. struct iommu_group {};
  646. struct iommu_fwspec {};
  647. struct iommu_device {};
  648. struct iommu_fault_param {};
  649. struct iommu_iotlb_gather {};
  650. static inline bool iommu_present(struct bus_type *bus)
  651. {
  652. return false;
  653. }
  654. static inline bool device_iommu_capable(struct device *dev, enum iommu_cap cap)
  655. {
  656. return false;
  657. }
  658. static inline struct iommu_domain *iommu_domain_alloc(struct bus_type *bus)
  659. {
  660. return NULL;
  661. }
  662. static inline struct iommu_group *iommu_group_get_by_id(int id)
  663. {
  664. return NULL;
  665. }
  666. static inline void iommu_domain_free(struct iommu_domain *domain)
  667. {
  668. }
  669. static inline int iommu_attach_device(struct iommu_domain *domain,
  670. struct device *dev)
  671. {
  672. return -ENODEV;
  673. }
  674. static inline void iommu_detach_device(struct iommu_domain *domain,
  675. struct device *dev)
  676. {
  677. }
  678. static inline struct iommu_domain *iommu_get_domain_for_dev(struct device *dev)
  679. {
  680. return NULL;
  681. }
  682. static inline int iommu_map(struct iommu_domain *domain, unsigned long iova,
  683. phys_addr_t paddr, size_t size, int prot)
  684. {
  685. return -ENODEV;
  686. }
  687. static inline int iommu_map_atomic(struct iommu_domain *domain,
  688. unsigned long iova, phys_addr_t paddr,
  689. size_t size, int prot)
  690. {
  691. return -ENODEV;
  692. }
  693. static inline size_t iommu_unmap(struct iommu_domain *domain,
  694. unsigned long iova, size_t size)
  695. {
  696. return 0;
  697. }
  698. static inline size_t iommu_unmap_fast(struct iommu_domain *domain,
  699. unsigned long iova, int gfp_order,
  700. struct iommu_iotlb_gather *iotlb_gather)
  701. {
  702. return 0;
  703. }
  704. static inline ssize_t iommu_map_sg(struct iommu_domain *domain,
  705. unsigned long iova, struct scatterlist *sg,
  706. unsigned int nents, int prot)
  707. {
  708. return -ENODEV;
  709. }
  710. static inline ssize_t iommu_map_sg_atomic(struct iommu_domain *domain,
  711. unsigned long iova, struct scatterlist *sg,
  712. unsigned int nents, int prot)
  713. {
  714. return -ENODEV;
  715. }
  716. static inline void iommu_flush_iotlb_all(struct iommu_domain *domain)
  717. {
  718. }
  719. static inline void iommu_iotlb_sync(struct iommu_domain *domain,
  720. struct iommu_iotlb_gather *iotlb_gather)
  721. {
  722. }
  723. static inline phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova)
  724. {
  725. return 0;
  726. }
  727. static inline void iommu_set_fault_handler(struct iommu_domain *domain,
  728. iommu_fault_handler_t handler, void *token)
  729. {
  730. }
  731. static inline void iommu_get_resv_regions(struct device *dev,
  732. struct list_head *list)
  733. {
  734. }
  735. static inline void iommu_put_resv_regions(struct device *dev,
  736. struct list_head *list)
  737. {
  738. }
  739. static inline int iommu_get_group_resv_regions(struct iommu_group *group,
  740. struct list_head *head)
  741. {
  742. return -ENODEV;
  743. }
  744. static inline void iommu_set_default_passthrough(bool cmd_line)
  745. {
  746. }
  747. static inline void iommu_set_default_translated(bool cmd_line)
  748. {
  749. }
  750. static inline bool iommu_default_passthrough(void)
  751. {
  752. return true;
  753. }
  754. static inline int iommu_attach_group(struct iommu_domain *domain,
  755. struct iommu_group *group)
  756. {
  757. return -ENODEV;
  758. }
  759. static inline void iommu_detach_group(struct iommu_domain *domain,
  760. struct iommu_group *group)
  761. {
  762. }
  763. static inline struct iommu_group *iommu_group_alloc(void)
  764. {
  765. return ERR_PTR(-ENODEV);
  766. }
  767. static inline void *iommu_group_get_iommudata(struct iommu_group *group)
  768. {
  769. return NULL;
  770. }
  771. static inline void iommu_group_set_iommudata(struct iommu_group *group,
  772. void *iommu_data,
  773. void (*release)(void *iommu_data))
  774. {
  775. }
  776. static inline int iommu_group_set_name(struct iommu_group *group,
  777. const char *name)
  778. {
  779. return -ENODEV;
  780. }
  781. static inline int iommu_group_add_device(struct iommu_group *group,
  782. struct device *dev)
  783. {
  784. return -ENODEV;
  785. }
  786. static inline void iommu_group_remove_device(struct device *dev)
  787. {
  788. }
  789. static inline int iommu_group_for_each_dev(struct iommu_group *group,
  790. void *data,
  791. int (*fn)(struct device *, void *))
  792. {
  793. return -ENODEV;
  794. }
  795. static inline struct iommu_group *iommu_group_get(struct device *dev)
  796. {
  797. return NULL;
  798. }
  799. static inline void iommu_group_put(struct iommu_group *group)
  800. {
  801. }
  802. static inline
  803. int iommu_register_device_fault_handler(struct device *dev,
  804. iommu_dev_fault_handler_t handler,
  805. void *data)
  806. {
  807. return -ENODEV;
  808. }
  809. static inline int iommu_unregister_device_fault_handler(struct device *dev)
  810. {
  811. return 0;
  812. }
  813. static inline
  814. int iommu_report_device_fault(struct device *dev, struct iommu_fault_event *evt)
  815. {
  816. return -ENODEV;
  817. }
  818. static inline int iommu_page_response(struct device *dev,
  819. struct iommu_page_response *msg)
  820. {
  821. return -ENODEV;
  822. }
  823. static inline int iommu_group_id(struct iommu_group *group)
  824. {
  825. return -ENODEV;
  826. }
  827. static inline int iommu_set_pgtable_quirks(struct iommu_domain *domain,
  828. unsigned long quirks)
  829. {
  830. return 0;
  831. }
  832. static inline int iommu_device_register(struct iommu_device *iommu,
  833. const struct iommu_ops *ops,
  834. struct device *hwdev)
  835. {
  836. return -ENODEV;
  837. }
  838. static inline struct iommu_device *dev_to_iommu_device(struct device *dev)
  839. {
  840. return NULL;
  841. }
  842. static inline void iommu_iotlb_gather_init(struct iommu_iotlb_gather *gather)
  843. {
  844. }
  845. static inline void iommu_iotlb_gather_add_page(struct iommu_domain *domain,
  846. struct iommu_iotlb_gather *gather,
  847. unsigned long iova, size_t size)
  848. {
  849. }
  850. static inline bool iommu_iotlb_gather_queued(struct iommu_iotlb_gather *gather)
  851. {
  852. return false;
  853. }
  854. static inline void iommu_device_unregister(struct iommu_device *iommu)
  855. {
  856. }
  857. static inline int iommu_device_sysfs_add(struct iommu_device *iommu,
  858. struct device *parent,
  859. const struct attribute_group **groups,
  860. const char *fmt, ...)
  861. {
  862. return -ENODEV;
  863. }
  864. static inline void iommu_device_sysfs_remove(struct iommu_device *iommu)
  865. {
  866. }
  867. static inline int iommu_device_link(struct device *dev, struct device *link)
  868. {
  869. return -EINVAL;
  870. }
  871. static inline void iommu_device_unlink(struct device *dev, struct device *link)
  872. {
  873. }
  874. static inline int iommu_fwspec_init(struct device *dev,
  875. struct fwnode_handle *iommu_fwnode,
  876. const struct iommu_ops *ops)
  877. {
  878. return -ENODEV;
  879. }
  880. static inline void iommu_fwspec_free(struct device *dev)
  881. {
  882. }
  883. static inline int iommu_fwspec_add_ids(struct device *dev, u32 *ids,
  884. int num_ids)
  885. {
  886. return -ENODEV;
  887. }
  888. static inline
  889. const struct iommu_ops *iommu_ops_from_fwnode(struct fwnode_handle *fwnode)
  890. {
  891. return NULL;
  892. }
  893. static inline int
  894. iommu_dev_enable_feature(struct device *dev, enum iommu_dev_features feat)
  895. {
  896. return -ENODEV;
  897. }
  898. static inline int
  899. iommu_dev_disable_feature(struct device *dev, enum iommu_dev_features feat)
  900. {
  901. return -ENODEV;
  902. }
  903. static inline struct iommu_fwspec *dev_iommu_fwspec_get(struct device *dev)
  904. {
  905. return NULL;
  906. }
  907. static inline int iommu_device_use_default_domain(struct device *dev)
  908. {
  909. return 0;
  910. }
  911. static inline void iommu_device_unuse_default_domain(struct device *dev)
  912. {
  913. }
  914. static inline int
  915. iommu_group_claim_dma_owner(struct iommu_group *group, void *owner)
  916. {
  917. return -ENODEV;
  918. }
  919. static inline void iommu_group_release_dma_owner(struct iommu_group *group)
  920. {
  921. }
  922. static inline bool iommu_group_dma_owner_claimed(struct iommu_group *group)
  923. {
  924. return false;
  925. }
  926. static inline struct iommu_domain *
  927. iommu_sva_domain_alloc(struct device *dev, struct mm_struct *mm)
  928. {
  929. return NULL;
  930. }
  931. static inline int iommu_attach_device_pasid(struct iommu_domain *domain,
  932. struct device *dev, ioasid_t pasid)
  933. {
  934. return -ENODEV;
  935. }
  936. static inline void iommu_detach_device_pasid(struct iommu_domain *domain,
  937. struct device *dev, ioasid_t pasid)
  938. {
  939. }
  940. static inline struct iommu_domain *
  941. iommu_get_domain_for_dev_pasid(struct device *dev, ioasid_t pasid,
  942. unsigned int type)
  943. {
  944. return NULL;
  945. }
  946. #endif /* CONFIG_IOMMU_API */
  947. /**
  948. * iommu_map_sgtable - Map the given buffer to the IOMMU domain
  949. * @domain: The IOMMU domain to perform the mapping
  950. * @iova: The start address to map the buffer
  951. * @sgt: The sg_table object describing the buffer
  952. * @prot: IOMMU protection bits
  953. *
  954. * Creates a mapping at @iova for the buffer described by a scatterlist
  955. * stored in the given sg_table object in the provided IOMMU domain.
  956. */
  957. static inline size_t iommu_map_sgtable(struct iommu_domain *domain,
  958. unsigned long iova, struct sg_table *sgt, int prot)
  959. {
  960. return iommu_map_sg(domain, iova, sgt->sgl, sgt->orig_nents, prot);
  961. }
  962. #ifdef CONFIG_IOMMU_DEBUGFS
  963. extern struct dentry *iommu_debugfs_dir;
  964. void iommu_debugfs_setup(void);
  965. #else
  966. static inline void iommu_debugfs_setup(void) {}
  967. #endif
  968. #ifdef CONFIG_IOMMU_DMA
  969. #include <linux/msi.h>
  970. /* Setup call for arch DMA mapping code */
  971. void iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 dma_limit);
  972. int iommu_get_msi_cookie(struct iommu_domain *domain, dma_addr_t base);
  973. int iommu_dma_prepare_msi(struct msi_desc *desc, phys_addr_t msi_addr);
  974. void iommu_dma_compose_msi_msg(struct msi_desc *desc, struct msi_msg *msg);
  975. #else /* CONFIG_IOMMU_DMA */
  976. struct msi_desc;
  977. struct msi_msg;
  978. static inline void iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 dma_limit)
  979. {
  980. }
  981. static inline int iommu_get_msi_cookie(struct iommu_domain *domain, dma_addr_t base)
  982. {
  983. return -ENODEV;
  984. }
  985. static inline int iommu_dma_prepare_msi(struct msi_desc *desc, phys_addr_t msi_addr)
  986. {
  987. return 0;
  988. }
  989. static inline void iommu_dma_compose_msi_msg(struct msi_desc *desc, struct msi_msg *msg)
  990. {
  991. }
  992. #endif /* CONFIG_IOMMU_DMA */
  993. #ifdef CONFIG_IOMMU_SVA
  994. struct iommu_sva *iommu_sva_bind_device(struct device *dev,
  995. struct mm_struct *mm);
  996. void iommu_sva_unbind_device(struct iommu_sva *handle);
  997. u32 iommu_sva_get_pasid(struct iommu_sva *handle);
  998. #else
  999. static inline struct iommu_sva *
  1000. iommu_sva_bind_device(struct device *dev, struct mm_struct *mm)
  1001. {
  1002. return NULL;
  1003. }
  1004. static inline void iommu_sva_unbind_device(struct iommu_sva *handle)
  1005. {
  1006. }
  1007. static inline u32 iommu_sva_get_pasid(struct iommu_sva *handle)
  1008. {
  1009. return IOMMU_PASID_INVALID;
  1010. }
  1011. #endif /* CONFIG_IOMMU_SVA */
  1012. #endif /* __LINUX_IOMMU_H */