pci_insn.c 9.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * s390 specific pci instructions
  4. *
  5. * Copyright IBM Corp. 2013
  6. */
  7. #include <linux/export.h>
  8. #include <linux/errno.h>
  9. #include <linux/delay.h>
  10. #include <linux/jump_label.h>
  11. #include <asm/asm-extable.h>
  12. #include <asm/facility.h>
  13. #include <asm/pci_insn.h>
  14. #include <asm/pci_debug.h>
  15. #include <asm/pci_io.h>
  16. #include <asm/processor.h>
  17. #define ZPCI_INSN_BUSY_DELAY 1 /* 1 microsecond */
  18. struct zpci_err_insn_data {
  19. u8 insn;
  20. u8 cc;
  21. u8 status;
  22. union {
  23. struct {
  24. u64 req;
  25. u64 offset;
  26. };
  27. struct {
  28. u64 addr;
  29. u64 len;
  30. };
  31. };
  32. } __packed;
  33. static inline void zpci_err_insn_req(int lvl, u8 insn, u8 cc, u8 status,
  34. u64 req, u64 offset)
  35. {
  36. struct zpci_err_insn_data data = {
  37. .insn = insn, .cc = cc, .status = status,
  38. .req = req, .offset = offset};
  39. zpci_err_hex_level(lvl, &data, sizeof(data));
  40. }
  41. static inline void zpci_err_insn_addr(int lvl, u8 insn, u8 cc, u8 status,
  42. u64 addr, u64 len)
  43. {
  44. struct zpci_err_insn_data data = {
  45. .insn = insn, .cc = cc, .status = status,
  46. .addr = addr, .len = len};
  47. zpci_err_hex_level(lvl, &data, sizeof(data));
  48. }
  49. /* Modify PCI Function Controls */
  50. static inline u8 __mpcifc(u64 req, struct zpci_fib *fib, u8 *status)
  51. {
  52. u8 cc;
  53. asm volatile (
  54. " .insn rxy,0xe300000000d0,%[req],%[fib]\n"
  55. " ipm %[cc]\n"
  56. " srl %[cc],28\n"
  57. : [cc] "=d" (cc), [req] "+d" (req), [fib] "+Q" (*fib)
  58. : : "cc");
  59. *status = req >> 24 & 0xff;
  60. return cc;
  61. }
  62. u8 zpci_mod_fc(u64 req, struct zpci_fib *fib, u8 *status)
  63. {
  64. bool retried = false;
  65. u8 cc;
  66. do {
  67. cc = __mpcifc(req, fib, status);
  68. if (cc == 2) {
  69. msleep(ZPCI_INSN_BUSY_DELAY);
  70. if (!retried) {
  71. zpci_err_insn_req(1, 'M', cc, *status, req, 0);
  72. retried = true;
  73. }
  74. }
  75. } while (cc == 2);
  76. if (cc)
  77. zpci_err_insn_req(0, 'M', cc, *status, req, 0);
  78. else if (retried)
  79. zpci_err_insn_req(1, 'M', cc, *status, req, 0);
  80. return cc;
  81. }
  82. EXPORT_SYMBOL_GPL(zpci_mod_fc);
  83. /* Refresh PCI Translations */
  84. static inline u8 __rpcit(u64 fn, u64 addr, u64 range, u8 *status)
  85. {
  86. union register_pair addr_range = {.even = addr, .odd = range};
  87. u8 cc;
  88. asm volatile (
  89. " .insn rre,0xb9d30000,%[fn],%[addr_range]\n"
  90. " ipm %[cc]\n"
  91. " srl %[cc],28\n"
  92. : [cc] "=d" (cc), [fn] "+d" (fn)
  93. : [addr_range] "d" (addr_range.pair)
  94. : "cc");
  95. *status = fn >> 24 & 0xff;
  96. return cc;
  97. }
  98. int zpci_refresh_trans(u64 fn, u64 addr, u64 range)
  99. {
  100. bool retried = false;
  101. u8 cc, status;
  102. do {
  103. cc = __rpcit(fn, addr, range, &status);
  104. if (cc == 2) {
  105. udelay(ZPCI_INSN_BUSY_DELAY);
  106. if (!retried) {
  107. zpci_err_insn_addr(1, 'R', cc, status, addr, range);
  108. retried = true;
  109. }
  110. }
  111. } while (cc == 2);
  112. if (cc)
  113. zpci_err_insn_addr(0, 'R', cc, status, addr, range);
  114. else if (retried)
  115. zpci_err_insn_addr(1, 'R', cc, status, addr, range);
  116. if (cc == 1 && (status == 4 || status == 16))
  117. return -ENOMEM;
  118. return (cc) ? -EIO : 0;
  119. }
  120. /* Set Interruption Controls */
  121. int zpci_set_irq_ctrl(u16 ctl, u8 isc, union zpci_sic_iib *iib)
  122. {
  123. if (!test_facility(72))
  124. return -EIO;
  125. asm volatile(
  126. ".insn rsy,0xeb00000000d1,%[ctl],%[isc],%[iib]\n"
  127. : : [ctl] "d" (ctl), [isc] "d" (isc << 27), [iib] "Q" (*iib));
  128. return 0;
  129. }
  130. EXPORT_SYMBOL_GPL(zpci_set_irq_ctrl);
  131. /* PCI Load */
  132. static inline int ____pcilg(u64 *data, u64 req, u64 offset, u8 *status)
  133. {
  134. union register_pair req_off = {.even = req, .odd = offset};
  135. int cc = -ENXIO;
  136. u64 __data;
  137. asm volatile (
  138. " .insn rre,0xb9d20000,%[data],%[req_off]\n"
  139. "0: ipm %[cc]\n"
  140. " srl %[cc],28\n"
  141. "1:\n"
  142. EX_TABLE(0b, 1b)
  143. : [cc] "+d" (cc), [data] "=d" (__data),
  144. [req_off] "+&d" (req_off.pair) :: "cc");
  145. *status = req_off.even >> 24 & 0xff;
  146. *data = __data;
  147. return cc;
  148. }
  149. static inline int __pcilg(u64 *data, u64 req, u64 offset, u8 *status)
  150. {
  151. u64 __data;
  152. int cc;
  153. cc = ____pcilg(&__data, req, offset, status);
  154. if (!cc)
  155. *data = __data;
  156. return cc;
  157. }
  158. int __zpci_load(u64 *data, u64 req, u64 offset)
  159. {
  160. bool retried = false;
  161. u8 status;
  162. int cc;
  163. do {
  164. cc = __pcilg(data, req, offset, &status);
  165. if (cc == 2) {
  166. udelay(ZPCI_INSN_BUSY_DELAY);
  167. if (!retried) {
  168. zpci_err_insn_req(1, 'l', cc, status, req, offset);
  169. retried = true;
  170. }
  171. }
  172. } while (cc == 2);
  173. if (cc)
  174. zpci_err_insn_req(0, 'l', cc, status, req, offset);
  175. else if (retried)
  176. zpci_err_insn_req(1, 'l', cc, status, req, offset);
  177. return (cc > 0) ? -EIO : cc;
  178. }
  179. EXPORT_SYMBOL_GPL(__zpci_load);
  180. static inline int zpci_load_fh(u64 *data, const volatile void __iomem *addr,
  181. unsigned long len)
  182. {
  183. struct zpci_iomap_entry *entry = &zpci_iomap_start[ZPCI_IDX(addr)];
  184. u64 req = ZPCI_CREATE_REQ(READ_ONCE(entry->fh), entry->bar, len);
  185. return __zpci_load(data, req, ZPCI_OFFSET(addr));
  186. }
  187. static inline int __pcilg_mio(u64 *data, u64 ioaddr, u64 len, u8 *status)
  188. {
  189. union register_pair ioaddr_len = {.even = ioaddr, .odd = len};
  190. int cc = -ENXIO;
  191. u64 __data;
  192. asm volatile (
  193. " .insn rre,0xb9d60000,%[data],%[ioaddr_len]\n"
  194. "0: ipm %[cc]\n"
  195. " srl %[cc],28\n"
  196. "1:\n"
  197. EX_TABLE(0b, 1b)
  198. : [cc] "+d" (cc), [data] "=d" (__data),
  199. [ioaddr_len] "+&d" (ioaddr_len.pair) :: "cc");
  200. *status = ioaddr_len.odd >> 24 & 0xff;
  201. *data = __data;
  202. return cc;
  203. }
  204. int zpci_load(u64 *data, const volatile void __iomem *addr, unsigned long len)
  205. {
  206. u8 status;
  207. int cc;
  208. if (!static_branch_unlikely(&have_mio))
  209. return zpci_load_fh(data, addr, len);
  210. cc = __pcilg_mio(data, (__force u64) addr, len, &status);
  211. if (cc)
  212. zpci_err_insn_addr(0, 'L', cc, status, (__force u64) addr, len);
  213. return (cc > 0) ? -EIO : cc;
  214. }
  215. EXPORT_SYMBOL_GPL(zpci_load);
  216. /* PCI Store */
  217. static inline int __pcistg(u64 data, u64 req, u64 offset, u8 *status)
  218. {
  219. union register_pair req_off = {.even = req, .odd = offset};
  220. int cc = -ENXIO;
  221. asm volatile (
  222. " .insn rre,0xb9d00000,%[data],%[req_off]\n"
  223. "0: ipm %[cc]\n"
  224. " srl %[cc],28\n"
  225. "1:\n"
  226. EX_TABLE(0b, 1b)
  227. : [cc] "+d" (cc), [req_off] "+&d" (req_off.pair)
  228. : [data] "d" (data)
  229. : "cc");
  230. *status = req_off.even >> 24 & 0xff;
  231. return cc;
  232. }
  233. int __zpci_store(u64 data, u64 req, u64 offset)
  234. {
  235. bool retried = false;
  236. u8 status;
  237. int cc;
  238. do {
  239. cc = __pcistg(data, req, offset, &status);
  240. if (cc == 2) {
  241. udelay(ZPCI_INSN_BUSY_DELAY);
  242. if (!retried) {
  243. zpci_err_insn_req(1, 's', cc, status, req, offset);
  244. retried = true;
  245. }
  246. }
  247. } while (cc == 2);
  248. if (cc)
  249. zpci_err_insn_req(0, 's', cc, status, req, offset);
  250. else if (retried)
  251. zpci_err_insn_req(1, 's', cc, status, req, offset);
  252. return (cc > 0) ? -EIO : cc;
  253. }
  254. EXPORT_SYMBOL_GPL(__zpci_store);
  255. static inline int zpci_store_fh(const volatile void __iomem *addr, u64 data,
  256. unsigned long len)
  257. {
  258. struct zpci_iomap_entry *entry = &zpci_iomap_start[ZPCI_IDX(addr)];
  259. u64 req = ZPCI_CREATE_REQ(READ_ONCE(entry->fh), entry->bar, len);
  260. return __zpci_store(data, req, ZPCI_OFFSET(addr));
  261. }
  262. static inline int __pcistg_mio(u64 data, u64 ioaddr, u64 len, u8 *status)
  263. {
  264. union register_pair ioaddr_len = {.even = ioaddr, .odd = len};
  265. int cc = -ENXIO;
  266. asm volatile (
  267. " .insn rre,0xb9d40000,%[data],%[ioaddr_len]\n"
  268. "0: ipm %[cc]\n"
  269. " srl %[cc],28\n"
  270. "1:\n"
  271. EX_TABLE(0b, 1b)
  272. : [cc] "+d" (cc), [ioaddr_len] "+&d" (ioaddr_len.pair)
  273. : [data] "d" (data)
  274. : "cc", "memory");
  275. *status = ioaddr_len.odd >> 24 & 0xff;
  276. return cc;
  277. }
  278. int zpci_store(const volatile void __iomem *addr, u64 data, unsigned long len)
  279. {
  280. u8 status;
  281. int cc;
  282. if (!static_branch_unlikely(&have_mio))
  283. return zpci_store_fh(addr, data, len);
  284. cc = __pcistg_mio(data, (__force u64) addr, len, &status);
  285. if (cc)
  286. zpci_err_insn_addr(0, 'S', cc, status, (__force u64) addr, len);
  287. return (cc > 0) ? -EIO : cc;
  288. }
  289. EXPORT_SYMBOL_GPL(zpci_store);
  290. /* PCI Store Block */
  291. static inline int __pcistb(const u64 *data, u64 req, u64 offset, u8 *status)
  292. {
  293. int cc = -ENXIO;
  294. asm volatile (
  295. " .insn rsy,0xeb00000000d0,%[req],%[offset],%[data]\n"
  296. "0: ipm %[cc]\n"
  297. " srl %[cc],28\n"
  298. "1:\n"
  299. EX_TABLE(0b, 1b)
  300. : [cc] "+d" (cc), [req] "+d" (req)
  301. : [offset] "d" (offset), [data] "Q" (*data)
  302. : "cc");
  303. *status = req >> 24 & 0xff;
  304. return cc;
  305. }
  306. int __zpci_store_block(const u64 *data, u64 req, u64 offset)
  307. {
  308. bool retried = false;
  309. u8 status;
  310. int cc;
  311. do {
  312. cc = __pcistb(data, req, offset, &status);
  313. if (cc == 2) {
  314. udelay(ZPCI_INSN_BUSY_DELAY);
  315. if (!retried) {
  316. zpci_err_insn_req(0, 'b', cc, status, req, offset);
  317. retried = true;
  318. }
  319. }
  320. } while (cc == 2);
  321. if (cc)
  322. zpci_err_insn_req(0, 'b', cc, status, req, offset);
  323. else if (retried)
  324. zpci_err_insn_req(1, 'b', cc, status, req, offset);
  325. return (cc > 0) ? -EIO : cc;
  326. }
  327. EXPORT_SYMBOL_GPL(__zpci_store_block);
  328. static inline int zpci_write_block_fh(volatile void __iomem *dst,
  329. const void *src, unsigned long len)
  330. {
  331. struct zpci_iomap_entry *entry = &zpci_iomap_start[ZPCI_IDX(dst)];
  332. u64 req = ZPCI_CREATE_REQ(entry->fh, entry->bar, len);
  333. u64 offset = ZPCI_OFFSET(dst);
  334. return __zpci_store_block(src, req, offset);
  335. }
  336. static inline int __pcistb_mio(const u64 *data, u64 ioaddr, u64 len, u8 *status)
  337. {
  338. int cc = -ENXIO;
  339. asm volatile (
  340. " .insn rsy,0xeb00000000d4,%[len],%[ioaddr],%[data]\n"
  341. "0: ipm %[cc]\n"
  342. " srl %[cc],28\n"
  343. "1:\n"
  344. EX_TABLE(0b, 1b)
  345. : [cc] "+d" (cc), [len] "+d" (len)
  346. : [ioaddr] "d" (ioaddr), [data] "Q" (*data)
  347. : "cc");
  348. *status = len >> 24 & 0xff;
  349. return cc;
  350. }
  351. int zpci_write_block(volatile void __iomem *dst,
  352. const void *src, unsigned long len)
  353. {
  354. u8 status;
  355. int cc;
  356. if (!static_branch_unlikely(&have_mio))
  357. return zpci_write_block_fh(dst, src, len);
  358. cc = __pcistb_mio(src, (__force u64) dst, len, &status);
  359. if (cc)
  360. zpci_err_insn_addr(0, 'B', cc, status, (__force u64) dst, len);
  361. return (cc > 0) ? -EIO : cc;
  362. }
  363. EXPORT_SYMBOL_GPL(zpci_write_block);
  364. static inline void __pciwb_mio(void)
  365. {
  366. asm volatile (".insn rre,0xb9d50000,0,0\n");
  367. }
  368. void zpci_barrier(void)
  369. {
  370. if (static_branch_likely(&have_mio))
  371. __pciwb_mio();
  372. }
  373. EXPORT_SYMBOL_GPL(zpci_barrier);