iommu-sva.c 5.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Helpers for IOMMU drivers implementing SVA
  4. */
  5. #include <linux/mutex.h>
  6. #include <linux/sched/mm.h>
  7. #include <linux/iommu.h>
  8. #include "iommu-sva.h"
  9. static DEFINE_MUTEX(iommu_sva_lock);
  10. static DECLARE_IOASID_SET(iommu_sva_pasid);
  11. /**
  12. * iommu_sva_alloc_pasid - Allocate a PASID for the mm
  13. * @mm: the mm
  14. * @min: minimum PASID value (inclusive)
  15. * @max: maximum PASID value (inclusive)
  16. *
  17. * Try to allocate a PASID for this mm, or take a reference to the existing one
  18. * provided it fits within the [@min, @max] range. On success the PASID is
  19. * available in mm->pasid and will be available for the lifetime of the mm.
  20. *
  21. * Returns 0 on success and < 0 on error.
  22. */
  23. int iommu_sva_alloc_pasid(struct mm_struct *mm, ioasid_t min, ioasid_t max)
  24. {
  25. int ret = 0;
  26. ioasid_t pasid;
  27. if (min == INVALID_IOASID || max == INVALID_IOASID ||
  28. min == 0 || max < min)
  29. return -EINVAL;
  30. mutex_lock(&iommu_sva_lock);
  31. /* Is a PASID already associated with this mm? */
  32. if (pasid_valid(mm->pasid)) {
  33. if (mm->pasid < min || mm->pasid >= max)
  34. ret = -EOVERFLOW;
  35. goto out;
  36. }
  37. pasid = ioasid_alloc(&iommu_sva_pasid, min, max, mm);
  38. if (!pasid_valid(pasid))
  39. ret = -ENOMEM;
  40. else
  41. mm_pasid_set(mm, pasid);
  42. out:
  43. mutex_unlock(&iommu_sva_lock);
  44. return ret;
  45. }
  46. EXPORT_SYMBOL_GPL(iommu_sva_alloc_pasid);
  47. /* ioasid_find getter() requires a void * argument */
  48. static bool __mmget_not_zero(void *mm)
  49. {
  50. return mmget_not_zero(mm);
  51. }
  52. /**
  53. * iommu_sva_find() - Find mm associated to the given PASID
  54. * @pasid: Process Address Space ID assigned to the mm
  55. *
  56. * On success a reference to the mm is taken, and must be released with mmput().
  57. *
  58. * Returns the mm corresponding to this PASID, or an error if not found.
  59. */
  60. struct mm_struct *iommu_sva_find(ioasid_t pasid)
  61. {
  62. return ioasid_find(&iommu_sva_pasid, pasid, __mmget_not_zero);
  63. }
  64. EXPORT_SYMBOL_GPL(iommu_sva_find);
  65. /**
  66. * iommu_sva_bind_device() - Bind a process address space to a device
  67. * @dev: the device
  68. * @mm: the mm to bind, caller must hold a reference to mm_users
  69. *
  70. * Create a bond between device and address space, allowing the device to
  71. * access the mm using the PASID returned by iommu_sva_get_pasid(). If a
  72. * bond already exists between @device and @mm, an additional internal
  73. * reference is taken. Caller must call iommu_sva_unbind_device()
  74. * to release each reference.
  75. *
  76. * iommu_dev_enable_feature(dev, IOMMU_DEV_FEAT_SVA) must be called first, to
  77. * initialize the required SVA features.
  78. *
  79. * On error, returns an ERR_PTR value.
  80. */
  81. struct iommu_sva *iommu_sva_bind_device(struct device *dev, struct mm_struct *mm)
  82. {
  83. struct iommu_domain *domain;
  84. struct iommu_sva *handle;
  85. ioasid_t max_pasids;
  86. int ret;
  87. max_pasids = dev->iommu->max_pasids;
  88. if (!max_pasids)
  89. return ERR_PTR(-EOPNOTSUPP);
  90. /* Allocate mm->pasid if necessary. */
  91. ret = iommu_sva_alloc_pasid(mm, 1, max_pasids - 1);
  92. if (ret)
  93. return ERR_PTR(ret);
  94. handle = kzalloc(sizeof(*handle), GFP_KERNEL);
  95. if (!handle)
  96. return ERR_PTR(-ENOMEM);
  97. mutex_lock(&iommu_sva_lock);
  98. /* Search for an existing domain. */
  99. domain = iommu_get_domain_for_dev_pasid(dev, mm->pasid,
  100. IOMMU_DOMAIN_SVA);
  101. if (IS_ERR(domain)) {
  102. ret = PTR_ERR(domain);
  103. goto out_unlock;
  104. }
  105. if (domain) {
  106. domain->users++;
  107. goto out;
  108. }
  109. /* Allocate a new domain and set it on device pasid. */
  110. domain = iommu_sva_domain_alloc(dev, mm);
  111. if (!domain) {
  112. ret = -ENOMEM;
  113. goto out_unlock;
  114. }
  115. ret = iommu_attach_device_pasid(domain, dev, mm->pasid);
  116. if (ret)
  117. goto out_free_domain;
  118. domain->users = 1;
  119. out:
  120. mutex_unlock(&iommu_sva_lock);
  121. handle->dev = dev;
  122. handle->domain = domain;
  123. return handle;
  124. out_free_domain:
  125. iommu_domain_free(domain);
  126. out_unlock:
  127. mutex_unlock(&iommu_sva_lock);
  128. kfree(handle);
  129. return ERR_PTR(ret);
  130. }
  131. EXPORT_SYMBOL_GPL(iommu_sva_bind_device);
  132. /**
  133. * iommu_sva_unbind_device() - Remove a bond created with iommu_sva_bind_device
  134. * @handle: the handle returned by iommu_sva_bind_device()
  135. *
  136. * Put reference to a bond between device and address space. The device should
  137. * not be issuing any more transaction for this PASID. All outstanding page
  138. * requests for this PASID must have been flushed to the IOMMU.
  139. */
  140. void iommu_sva_unbind_device(struct iommu_sva *handle)
  141. {
  142. struct iommu_domain *domain = handle->domain;
  143. ioasid_t pasid = domain->mm->pasid;
  144. struct device *dev = handle->dev;
  145. mutex_lock(&iommu_sva_lock);
  146. if (--domain->users == 0) {
  147. iommu_detach_device_pasid(domain, dev, pasid);
  148. iommu_domain_free(domain);
  149. }
  150. mutex_unlock(&iommu_sva_lock);
  151. kfree(handle);
  152. }
  153. EXPORT_SYMBOL_GPL(iommu_sva_unbind_device);
  154. u32 iommu_sva_get_pasid(struct iommu_sva *handle)
  155. {
  156. struct iommu_domain *domain = handle->domain;
  157. return domain->mm->pasid;
  158. }
  159. EXPORT_SYMBOL_GPL(iommu_sva_get_pasid);
  160. /*
  161. * I/O page fault handler for SVA
  162. */
  163. enum iommu_page_response_code
  164. iommu_sva_handle_iopf(struct iommu_fault *fault, void *data)
  165. {
  166. vm_fault_t ret;
  167. struct vm_area_struct *vma;
  168. struct mm_struct *mm = data;
  169. unsigned int access_flags = 0;
  170. unsigned int fault_flags = FAULT_FLAG_REMOTE;
  171. struct iommu_fault_page_request *prm = &fault->prm;
  172. enum iommu_page_response_code status = IOMMU_PAGE_RESP_INVALID;
  173. if (!(prm->flags & IOMMU_FAULT_PAGE_REQUEST_PASID_VALID))
  174. return status;
  175. if (!mmget_not_zero(mm))
  176. return status;
  177. mmap_read_lock(mm);
  178. vma = vma_lookup(mm, prm->addr);
  179. if (!vma)
  180. /* Unmapped area */
  181. goto out_put_mm;
  182. if (prm->perm & IOMMU_FAULT_PERM_READ)
  183. access_flags |= VM_READ;
  184. if (prm->perm & IOMMU_FAULT_PERM_WRITE) {
  185. access_flags |= VM_WRITE;
  186. fault_flags |= FAULT_FLAG_WRITE;
  187. }
  188. if (prm->perm & IOMMU_FAULT_PERM_EXEC) {
  189. access_flags |= VM_EXEC;
  190. fault_flags |= FAULT_FLAG_INSTRUCTION;
  191. }
  192. if (!(prm->perm & IOMMU_FAULT_PERM_PRIV))
  193. fault_flags |= FAULT_FLAG_USER;
  194. if (access_flags & ~vma->vm_flags)
  195. /* Access fault */
  196. goto out_put_mm;
  197. ret = handle_mm_fault(vma, prm->addr, fault_flags, NULL);
  198. status = ret & VM_FAULT_ERROR ? IOMMU_PAGE_RESP_INVALID :
  199. IOMMU_PAGE_RESP_SUCCESS;
  200. out_put_mm:
  201. mmap_read_unlock(mm);
  202. mmput(mm);
  203. return status;
  204. }