pci_mmio.c 8.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Access to PCI I/O memory from user space programs.
  4. *
  5. * Copyright IBM Corp. 2014
  6. * Author(s): Alexey Ishchuk <[email protected]>
  7. */
  8. #include <linux/kernel.h>
  9. #include <linux/syscalls.h>
  10. #include <linux/init.h>
  11. #include <linux/mm.h>
  12. #include <linux/errno.h>
  13. #include <linux/pci.h>
  14. #include <asm/asm-extable.h>
  15. #include <asm/pci_io.h>
  16. #include <asm/pci_debug.h>
  17. static inline void zpci_err_mmio(u8 cc, u8 status, u64 offset)
  18. {
  19. struct {
  20. u64 offset;
  21. u8 cc;
  22. u8 status;
  23. } data = {offset, cc, status};
  24. zpci_err_hex(&data, sizeof(data));
  25. }
  26. static inline int __pcistb_mio_inuser(
  27. void __iomem *ioaddr, const void __user *src,
  28. u64 len, u8 *status)
  29. {
  30. int cc = -ENXIO;
  31. asm volatile (
  32. " sacf 256\n"
  33. "0: .insn rsy,0xeb00000000d4,%[len],%[ioaddr],%[src]\n"
  34. "1: ipm %[cc]\n"
  35. " srl %[cc],28\n"
  36. "2: sacf 768\n"
  37. EX_TABLE(0b, 2b) EX_TABLE(1b, 2b)
  38. : [cc] "+d" (cc), [len] "+d" (len)
  39. : [ioaddr] "a" (ioaddr), [src] "Q" (*((u8 __force *)src))
  40. : "cc", "memory");
  41. *status = len >> 24 & 0xff;
  42. return cc;
  43. }
  44. static inline int __pcistg_mio_inuser(
  45. void __iomem *ioaddr, const void __user *src,
  46. u64 ulen, u8 *status)
  47. {
  48. union register_pair ioaddr_len = {.even = (u64 __force)ioaddr, .odd = ulen};
  49. int cc = -ENXIO;
  50. u64 val = 0;
  51. u64 cnt = ulen;
  52. u8 tmp;
  53. /*
  54. * copy 0 < @len <= 8 bytes from @src into the right most bytes of
  55. * a register, then store it to PCI at @ioaddr while in secondary
  56. * address space. pcistg then uses the user mappings.
  57. */
  58. asm volatile (
  59. " sacf 256\n"
  60. "0: llgc %[tmp],0(%[src])\n"
  61. "4: sllg %[val],%[val],8\n"
  62. " aghi %[src],1\n"
  63. " ogr %[val],%[tmp]\n"
  64. " brctg %[cnt],0b\n"
  65. "1: .insn rre,0xb9d40000,%[val],%[ioaddr_len]\n"
  66. "2: ipm %[cc]\n"
  67. " srl %[cc],28\n"
  68. "3: sacf 768\n"
  69. EX_TABLE(0b, 3b) EX_TABLE(4b, 3b) EX_TABLE(1b, 3b) EX_TABLE(2b, 3b)
  70. :
  71. [src] "+a" (src), [cnt] "+d" (cnt),
  72. [val] "+d" (val), [tmp] "=d" (tmp),
  73. [cc] "+d" (cc), [ioaddr_len] "+&d" (ioaddr_len.pair)
  74. :: "cc", "memory");
  75. *status = ioaddr_len.odd >> 24 & 0xff;
  76. /* did we read everything from user memory? */
  77. if (!cc && cnt != 0)
  78. cc = -EFAULT;
  79. return cc;
  80. }
  81. static inline int __memcpy_toio_inuser(void __iomem *dst,
  82. const void __user *src, size_t n)
  83. {
  84. int size, rc = 0;
  85. u8 status = 0;
  86. if (!src)
  87. return -EINVAL;
  88. while (n > 0) {
  89. size = zpci_get_max_write_size((u64 __force) dst,
  90. (u64 __force) src, n,
  91. ZPCI_MAX_WRITE_SIZE);
  92. if (size > 8) /* main path */
  93. rc = __pcistb_mio_inuser(dst, src, size, &status);
  94. else
  95. rc = __pcistg_mio_inuser(dst, src, size, &status);
  96. if (rc)
  97. break;
  98. src += size;
  99. dst += size;
  100. n -= size;
  101. }
  102. if (rc)
  103. zpci_err_mmio(rc, status, (__force u64) dst);
  104. return rc;
  105. }
  106. SYSCALL_DEFINE3(s390_pci_mmio_write, unsigned long, mmio_addr,
  107. const void __user *, user_buffer, size_t, length)
  108. {
  109. u8 local_buf[64];
  110. void __iomem *io_addr;
  111. void *buf;
  112. struct vm_area_struct *vma;
  113. pte_t *ptep;
  114. spinlock_t *ptl;
  115. long ret;
  116. if (!zpci_is_enabled())
  117. return -ENODEV;
  118. if (length <= 0 || PAGE_SIZE - (mmio_addr & ~PAGE_MASK) < length)
  119. return -EINVAL;
  120. /*
  121. * We only support write access to MIO capable devices if we are on
  122. * a MIO enabled system. Otherwise we would have to check for every
  123. * address if it is a special ZPCI_ADDR and would have to do
  124. * a pfn lookup which we don't need for MIO capable devices. Currently
  125. * ISM devices are the only devices without MIO support and there is no
  126. * known need for accessing these from userspace.
  127. */
  128. if (static_branch_likely(&have_mio)) {
  129. ret = __memcpy_toio_inuser((void __iomem *) mmio_addr,
  130. user_buffer,
  131. length);
  132. return ret;
  133. }
  134. if (length > 64) {
  135. buf = kmalloc(length, GFP_KERNEL);
  136. if (!buf)
  137. return -ENOMEM;
  138. } else
  139. buf = local_buf;
  140. ret = -EFAULT;
  141. if (copy_from_user(buf, user_buffer, length))
  142. goto out_free;
  143. mmap_read_lock(current->mm);
  144. ret = -EINVAL;
  145. vma = vma_lookup(current->mm, mmio_addr);
  146. if (!vma)
  147. goto out_unlock_mmap;
  148. if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)))
  149. goto out_unlock_mmap;
  150. ret = -EACCES;
  151. if (!(vma->vm_flags & VM_WRITE))
  152. goto out_unlock_mmap;
  153. ret = follow_pte(vma->vm_mm, mmio_addr, &ptep, &ptl);
  154. if (ret)
  155. goto out_unlock_mmap;
  156. io_addr = (void __iomem *)((pte_pfn(*ptep) << PAGE_SHIFT) |
  157. (mmio_addr & ~PAGE_MASK));
  158. if ((unsigned long) io_addr < ZPCI_IOMAP_ADDR_BASE)
  159. goto out_unlock_pt;
  160. ret = zpci_memcpy_toio(io_addr, buf, length);
  161. out_unlock_pt:
  162. pte_unmap_unlock(ptep, ptl);
  163. out_unlock_mmap:
  164. mmap_read_unlock(current->mm);
  165. out_free:
  166. if (buf != local_buf)
  167. kfree(buf);
  168. return ret;
  169. }
  170. static inline int __pcilg_mio_inuser(
  171. void __user *dst, const void __iomem *ioaddr,
  172. u64 ulen, u8 *status)
  173. {
  174. union register_pair ioaddr_len = {.even = (u64 __force)ioaddr, .odd = ulen};
  175. u64 cnt = ulen;
  176. int shift = ulen * 8;
  177. int cc = -ENXIO;
  178. u64 val, tmp;
  179. /*
  180. * read 0 < @len <= 8 bytes from the PCI memory mapped at @ioaddr (in
  181. * user space) into a register using pcilg then store these bytes at
  182. * user address @dst
  183. */
  184. asm volatile (
  185. " sacf 256\n"
  186. "0: .insn rre,0xb9d60000,%[val],%[ioaddr_len]\n"
  187. "1: ipm %[cc]\n"
  188. " srl %[cc],28\n"
  189. " ltr %[cc],%[cc]\n"
  190. " jne 4f\n"
  191. "2: ahi %[shift],-8\n"
  192. " srlg %[tmp],%[val],0(%[shift])\n"
  193. "3: stc %[tmp],0(%[dst])\n"
  194. "5: aghi %[dst],1\n"
  195. " brctg %[cnt],2b\n"
  196. "4: sacf 768\n"
  197. EX_TABLE(0b, 4b) EX_TABLE(1b, 4b) EX_TABLE(3b, 4b) EX_TABLE(5b, 4b)
  198. :
  199. [ioaddr_len] "+&d" (ioaddr_len.pair),
  200. [cc] "+d" (cc), [val] "=d" (val),
  201. [dst] "+a" (dst), [cnt] "+d" (cnt), [tmp] "=d" (tmp),
  202. [shift] "+d" (shift)
  203. :: "cc", "memory");
  204. /* did we write everything to the user space buffer? */
  205. if (!cc && cnt != 0)
  206. cc = -EFAULT;
  207. *status = ioaddr_len.odd >> 24 & 0xff;
  208. return cc;
  209. }
  210. static inline int __memcpy_fromio_inuser(void __user *dst,
  211. const void __iomem *src,
  212. unsigned long n)
  213. {
  214. int size, rc = 0;
  215. u8 status;
  216. while (n > 0) {
  217. size = zpci_get_max_write_size((u64 __force) src,
  218. (u64 __force) dst, n,
  219. ZPCI_MAX_READ_SIZE);
  220. rc = __pcilg_mio_inuser(dst, src, size, &status);
  221. if (rc)
  222. break;
  223. src += size;
  224. dst += size;
  225. n -= size;
  226. }
  227. if (rc)
  228. zpci_err_mmio(rc, status, (__force u64) dst);
  229. return rc;
  230. }
  231. SYSCALL_DEFINE3(s390_pci_mmio_read, unsigned long, mmio_addr,
  232. void __user *, user_buffer, size_t, length)
  233. {
  234. u8 local_buf[64];
  235. void __iomem *io_addr;
  236. void *buf;
  237. struct vm_area_struct *vma;
  238. pte_t *ptep;
  239. spinlock_t *ptl;
  240. long ret;
  241. if (!zpci_is_enabled())
  242. return -ENODEV;
  243. if (length <= 0 || PAGE_SIZE - (mmio_addr & ~PAGE_MASK) < length)
  244. return -EINVAL;
  245. /*
  246. * We only support read access to MIO capable devices if we are on
  247. * a MIO enabled system. Otherwise we would have to check for every
  248. * address if it is a special ZPCI_ADDR and would have to do
  249. * a pfn lookup which we don't need for MIO capable devices. Currently
  250. * ISM devices are the only devices without MIO support and there is no
  251. * known need for accessing these from userspace.
  252. */
  253. if (static_branch_likely(&have_mio)) {
  254. ret = __memcpy_fromio_inuser(
  255. user_buffer, (const void __iomem *)mmio_addr,
  256. length);
  257. return ret;
  258. }
  259. if (length > 64) {
  260. buf = kmalloc(length, GFP_KERNEL);
  261. if (!buf)
  262. return -ENOMEM;
  263. } else {
  264. buf = local_buf;
  265. }
  266. mmap_read_lock(current->mm);
  267. ret = -EINVAL;
  268. vma = vma_lookup(current->mm, mmio_addr);
  269. if (!vma)
  270. goto out_unlock_mmap;
  271. if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)))
  272. goto out_unlock_mmap;
  273. ret = -EACCES;
  274. if (!(vma->vm_flags & VM_WRITE))
  275. goto out_unlock_mmap;
  276. ret = follow_pte(vma->vm_mm, mmio_addr, &ptep, &ptl);
  277. if (ret)
  278. goto out_unlock_mmap;
  279. io_addr = (void __iomem *)((pte_pfn(*ptep) << PAGE_SHIFT) |
  280. (mmio_addr & ~PAGE_MASK));
  281. if ((unsigned long) io_addr < ZPCI_IOMAP_ADDR_BASE) {
  282. ret = -EFAULT;
  283. goto out_unlock_pt;
  284. }
  285. ret = zpci_memcpy_fromio(buf, io_addr, length);
  286. out_unlock_pt:
  287. pte_unmap_unlock(ptep, ptl);
  288. out_unlock_mmap:
  289. mmap_read_unlock(current->mm);
  290. if (!ret && copy_to_user(user_buffer, buf, length))
  291. ret = -EFAULT;
  292. if (buf != local_buf)
  293. kfree(buf);
  294. return ret;
  295. }