pci.c 28 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright IBM Corp. 2012
  4. *
  5. * Author(s):
  6. * Jan Glauber <[email protected]>
  7. *
  8. * The System z PCI code is a rewrite from a prototype by
  9. * the following people (Kudoz!):
  10. * Alexander Schmidt
  11. * Christoph Raisch
  12. * Hannes Hering
  13. * Hoang-Nam Nguyen
  14. * Jan-Bernd Themann
  15. * Stefan Roscher
  16. * Thomas Klein
  17. */
  18. #define KMSG_COMPONENT "zpci"
  19. #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
  20. #include <linux/kernel.h>
  21. #include <linux/slab.h>
  22. #include <linux/err.h>
  23. #include <linux/export.h>
  24. #include <linux/delay.h>
  25. #include <linux/seq_file.h>
  26. #include <linux/jump_label.h>
  27. #include <linux/pci.h>
  28. #include <linux/printk.h>
  29. #include <asm/isc.h>
  30. #include <asm/airq.h>
  31. #include <asm/facility.h>
  32. #include <asm/pci_insn.h>
  33. #include <asm/pci_clp.h>
  34. #include <asm/pci_dma.h>
  35. #include "pci_bus.h"
  36. #include "pci_iov.h"
  37. /* list of all detected zpci devices */
  38. static LIST_HEAD(zpci_list);
  39. static DEFINE_SPINLOCK(zpci_list_lock);
  40. static DECLARE_BITMAP(zpci_domain, ZPCI_DOMAIN_BITMAP_SIZE);
  41. static DEFINE_SPINLOCK(zpci_domain_lock);
  42. #define ZPCI_IOMAP_ENTRIES \
  43. min(((unsigned long) ZPCI_NR_DEVICES * PCI_STD_NUM_BARS / 2), \
  44. ZPCI_IOMAP_MAX_ENTRIES)
  45. unsigned int s390_pci_no_rid;
  46. static DEFINE_SPINLOCK(zpci_iomap_lock);
  47. static unsigned long *zpci_iomap_bitmap;
  48. struct zpci_iomap_entry *zpci_iomap_start;
  49. EXPORT_SYMBOL_GPL(zpci_iomap_start);
  50. DEFINE_STATIC_KEY_FALSE(have_mio);
  51. static struct kmem_cache *zdev_fmb_cache;
  52. /* AEN structures that must be preserved over KVM module re-insertion */
  53. union zpci_sic_iib *zpci_aipb;
  54. EXPORT_SYMBOL_GPL(zpci_aipb);
  55. struct airq_iv *zpci_aif_sbv;
  56. EXPORT_SYMBOL_GPL(zpci_aif_sbv);
  57. struct zpci_dev *get_zdev_by_fid(u32 fid)
  58. {
  59. struct zpci_dev *tmp, *zdev = NULL;
  60. spin_lock(&zpci_list_lock);
  61. list_for_each_entry(tmp, &zpci_list, entry) {
  62. if (tmp->fid == fid) {
  63. zdev = tmp;
  64. zpci_zdev_get(zdev);
  65. break;
  66. }
  67. }
  68. spin_unlock(&zpci_list_lock);
  69. return zdev;
  70. }
  71. void zpci_remove_reserved_devices(void)
  72. {
  73. struct zpci_dev *tmp, *zdev;
  74. enum zpci_state state;
  75. LIST_HEAD(remove);
  76. spin_lock(&zpci_list_lock);
  77. list_for_each_entry_safe(zdev, tmp, &zpci_list, entry) {
  78. if (zdev->state == ZPCI_FN_STATE_STANDBY &&
  79. !clp_get_state(zdev->fid, &state) &&
  80. state == ZPCI_FN_STATE_RESERVED)
  81. list_move_tail(&zdev->entry, &remove);
  82. }
  83. spin_unlock(&zpci_list_lock);
  84. list_for_each_entry_safe(zdev, tmp, &remove, entry)
  85. zpci_device_reserved(zdev);
  86. }
  87. int pci_domain_nr(struct pci_bus *bus)
  88. {
  89. return ((struct zpci_bus *) bus->sysdata)->domain_nr;
  90. }
  91. EXPORT_SYMBOL_GPL(pci_domain_nr);
  92. int pci_proc_domain(struct pci_bus *bus)
  93. {
  94. return pci_domain_nr(bus);
  95. }
  96. EXPORT_SYMBOL_GPL(pci_proc_domain);
  97. /* Modify PCI: Register I/O address translation parameters */
  98. int zpci_register_ioat(struct zpci_dev *zdev, u8 dmaas,
  99. u64 base, u64 limit, u64 iota)
  100. {
  101. u64 req = ZPCI_CREATE_REQ(zdev->fh, dmaas, ZPCI_MOD_FC_REG_IOAT);
  102. struct zpci_fib fib = {0};
  103. u8 cc, status;
  104. WARN_ON_ONCE(iota & 0x3fff);
  105. fib.pba = base;
  106. fib.pal = limit;
  107. fib.iota = iota | ZPCI_IOTA_RTTO_FLAG;
  108. fib.gd = zdev->gisa;
  109. cc = zpci_mod_fc(req, &fib, &status);
  110. if (cc)
  111. zpci_dbg(3, "reg ioat fid:%x, cc:%d, status:%d\n", zdev->fid, cc, status);
  112. return cc;
  113. }
  114. EXPORT_SYMBOL_GPL(zpci_register_ioat);
  115. /* Modify PCI: Unregister I/O address translation parameters */
  116. int zpci_unregister_ioat(struct zpci_dev *zdev, u8 dmaas)
  117. {
  118. u64 req = ZPCI_CREATE_REQ(zdev->fh, dmaas, ZPCI_MOD_FC_DEREG_IOAT);
  119. struct zpci_fib fib = {0};
  120. u8 cc, status;
  121. fib.gd = zdev->gisa;
  122. cc = zpci_mod_fc(req, &fib, &status);
  123. if (cc)
  124. zpci_dbg(3, "unreg ioat fid:%x, cc:%d, status:%d\n", zdev->fid, cc, status);
  125. return cc;
  126. }
  127. /* Modify PCI: Set PCI function measurement parameters */
  128. int zpci_fmb_enable_device(struct zpci_dev *zdev)
  129. {
  130. u64 req = ZPCI_CREATE_REQ(zdev->fh, 0, ZPCI_MOD_FC_SET_MEASURE);
  131. struct zpci_fib fib = {0};
  132. u8 cc, status;
  133. if (zdev->fmb || sizeof(*zdev->fmb) < zdev->fmb_length)
  134. return -EINVAL;
  135. zdev->fmb = kmem_cache_zalloc(zdev_fmb_cache, GFP_KERNEL);
  136. if (!zdev->fmb)
  137. return -ENOMEM;
  138. WARN_ON((u64) zdev->fmb & 0xf);
  139. /* reset software counters */
  140. atomic64_set(&zdev->allocated_pages, 0);
  141. atomic64_set(&zdev->mapped_pages, 0);
  142. atomic64_set(&zdev->unmapped_pages, 0);
  143. fib.fmb_addr = virt_to_phys(zdev->fmb);
  144. fib.gd = zdev->gisa;
  145. cc = zpci_mod_fc(req, &fib, &status);
  146. if (cc) {
  147. kmem_cache_free(zdev_fmb_cache, zdev->fmb);
  148. zdev->fmb = NULL;
  149. }
  150. return cc ? -EIO : 0;
  151. }
  152. /* Modify PCI: Disable PCI function measurement */
  153. int zpci_fmb_disable_device(struct zpci_dev *zdev)
  154. {
  155. u64 req = ZPCI_CREATE_REQ(zdev->fh, 0, ZPCI_MOD_FC_SET_MEASURE);
  156. struct zpci_fib fib = {0};
  157. u8 cc, status;
  158. if (!zdev->fmb)
  159. return -EINVAL;
  160. fib.gd = zdev->gisa;
  161. /* Function measurement is disabled if fmb address is zero */
  162. cc = zpci_mod_fc(req, &fib, &status);
  163. if (cc == 3) /* Function already gone. */
  164. cc = 0;
  165. if (!cc) {
  166. kmem_cache_free(zdev_fmb_cache, zdev->fmb);
  167. zdev->fmb = NULL;
  168. }
  169. return cc ? -EIO : 0;
  170. }
  171. static int zpci_cfg_load(struct zpci_dev *zdev, int offset, u32 *val, u8 len)
  172. {
  173. u64 req = ZPCI_CREATE_REQ(zdev->fh, ZPCI_PCIAS_CFGSPC, len);
  174. u64 data;
  175. int rc;
  176. rc = __zpci_load(&data, req, offset);
  177. if (!rc) {
  178. data = le64_to_cpu((__force __le64) data);
  179. data >>= (8 - len) * 8;
  180. *val = (u32) data;
  181. } else
  182. *val = 0xffffffff;
  183. return rc;
  184. }
  185. static int zpci_cfg_store(struct zpci_dev *zdev, int offset, u32 val, u8 len)
  186. {
  187. u64 req = ZPCI_CREATE_REQ(zdev->fh, ZPCI_PCIAS_CFGSPC, len);
  188. u64 data = val;
  189. int rc;
  190. data <<= (8 - len) * 8;
  191. data = (__force u64) cpu_to_le64(data);
  192. rc = __zpci_store(data, req, offset);
  193. return rc;
  194. }
  195. resource_size_t pcibios_align_resource(void *data, const struct resource *res,
  196. resource_size_t size,
  197. resource_size_t align)
  198. {
  199. return 0;
  200. }
  201. /* combine single writes by using store-block insn */
  202. void __iowrite64_copy(void __iomem *to, const void *from, size_t count)
  203. {
  204. zpci_memcpy_toio(to, from, count);
  205. }
  206. static void __iomem *__ioremap(phys_addr_t addr, size_t size, pgprot_t prot)
  207. {
  208. unsigned long offset, vaddr;
  209. struct vm_struct *area;
  210. phys_addr_t last_addr;
  211. last_addr = addr + size - 1;
  212. if (!size || last_addr < addr)
  213. return NULL;
  214. if (!static_branch_unlikely(&have_mio))
  215. return (void __iomem *) addr;
  216. offset = addr & ~PAGE_MASK;
  217. addr &= PAGE_MASK;
  218. size = PAGE_ALIGN(size + offset);
  219. area = get_vm_area(size, VM_IOREMAP);
  220. if (!area)
  221. return NULL;
  222. vaddr = (unsigned long) area->addr;
  223. if (ioremap_page_range(vaddr, vaddr + size, addr, prot)) {
  224. free_vm_area(area);
  225. return NULL;
  226. }
  227. return (void __iomem *) ((unsigned long) area->addr + offset);
  228. }
  229. void __iomem *ioremap_prot(phys_addr_t addr, size_t size, unsigned long prot)
  230. {
  231. return __ioremap(addr, size, __pgprot(prot));
  232. }
  233. EXPORT_SYMBOL(ioremap_prot);
  234. void __iomem *ioremap(phys_addr_t addr, size_t size)
  235. {
  236. return __ioremap(addr, size, PAGE_KERNEL);
  237. }
  238. EXPORT_SYMBOL(ioremap);
  239. void __iomem *ioremap_wc(phys_addr_t addr, size_t size)
  240. {
  241. return __ioremap(addr, size, pgprot_writecombine(PAGE_KERNEL));
  242. }
  243. EXPORT_SYMBOL(ioremap_wc);
  244. void __iomem *ioremap_wt(phys_addr_t addr, size_t size)
  245. {
  246. return __ioremap(addr, size, pgprot_writethrough(PAGE_KERNEL));
  247. }
  248. EXPORT_SYMBOL(ioremap_wt);
  249. void iounmap(volatile void __iomem *addr)
  250. {
  251. if (static_branch_likely(&have_mio))
  252. vunmap((__force void *) ((unsigned long) addr & PAGE_MASK));
  253. }
  254. EXPORT_SYMBOL(iounmap);
  255. /* Create a virtual mapping cookie for a PCI BAR */
  256. static void __iomem *pci_iomap_range_fh(struct pci_dev *pdev, int bar,
  257. unsigned long offset, unsigned long max)
  258. {
  259. struct zpci_dev *zdev = to_zpci(pdev);
  260. int idx;
  261. idx = zdev->bars[bar].map_idx;
  262. spin_lock(&zpci_iomap_lock);
  263. /* Detect overrun */
  264. WARN_ON(!++zpci_iomap_start[idx].count);
  265. zpci_iomap_start[idx].fh = zdev->fh;
  266. zpci_iomap_start[idx].bar = bar;
  267. spin_unlock(&zpci_iomap_lock);
  268. return (void __iomem *) ZPCI_ADDR(idx) + offset;
  269. }
  270. static void __iomem *pci_iomap_range_mio(struct pci_dev *pdev, int bar,
  271. unsigned long offset,
  272. unsigned long max)
  273. {
  274. unsigned long barsize = pci_resource_len(pdev, bar);
  275. struct zpci_dev *zdev = to_zpci(pdev);
  276. void __iomem *iova;
  277. iova = ioremap((unsigned long) zdev->bars[bar].mio_wt, barsize);
  278. return iova ? iova + offset : iova;
  279. }
  280. void __iomem *pci_iomap_range(struct pci_dev *pdev, int bar,
  281. unsigned long offset, unsigned long max)
  282. {
  283. if (bar >= PCI_STD_NUM_BARS || !pci_resource_len(pdev, bar))
  284. return NULL;
  285. if (static_branch_likely(&have_mio))
  286. return pci_iomap_range_mio(pdev, bar, offset, max);
  287. else
  288. return pci_iomap_range_fh(pdev, bar, offset, max);
  289. }
  290. EXPORT_SYMBOL(pci_iomap_range);
  291. void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen)
  292. {
  293. return pci_iomap_range(dev, bar, 0, maxlen);
  294. }
  295. EXPORT_SYMBOL(pci_iomap);
  296. static void __iomem *pci_iomap_wc_range_mio(struct pci_dev *pdev, int bar,
  297. unsigned long offset, unsigned long max)
  298. {
  299. unsigned long barsize = pci_resource_len(pdev, bar);
  300. struct zpci_dev *zdev = to_zpci(pdev);
  301. void __iomem *iova;
  302. iova = ioremap((unsigned long) zdev->bars[bar].mio_wb, barsize);
  303. return iova ? iova + offset : iova;
  304. }
  305. void __iomem *pci_iomap_wc_range(struct pci_dev *pdev, int bar,
  306. unsigned long offset, unsigned long max)
  307. {
  308. if (bar >= PCI_STD_NUM_BARS || !pci_resource_len(pdev, bar))
  309. return NULL;
  310. if (static_branch_likely(&have_mio))
  311. return pci_iomap_wc_range_mio(pdev, bar, offset, max);
  312. else
  313. return pci_iomap_range_fh(pdev, bar, offset, max);
  314. }
  315. EXPORT_SYMBOL(pci_iomap_wc_range);
  316. void __iomem *pci_iomap_wc(struct pci_dev *dev, int bar, unsigned long maxlen)
  317. {
  318. return pci_iomap_wc_range(dev, bar, 0, maxlen);
  319. }
  320. EXPORT_SYMBOL(pci_iomap_wc);
  321. static void pci_iounmap_fh(struct pci_dev *pdev, void __iomem *addr)
  322. {
  323. unsigned int idx = ZPCI_IDX(addr);
  324. spin_lock(&zpci_iomap_lock);
  325. /* Detect underrun */
  326. WARN_ON(!zpci_iomap_start[idx].count);
  327. if (!--zpci_iomap_start[idx].count) {
  328. zpci_iomap_start[idx].fh = 0;
  329. zpci_iomap_start[idx].bar = 0;
  330. }
  331. spin_unlock(&zpci_iomap_lock);
  332. }
  333. static void pci_iounmap_mio(struct pci_dev *pdev, void __iomem *addr)
  334. {
  335. iounmap(addr);
  336. }
  337. void pci_iounmap(struct pci_dev *pdev, void __iomem *addr)
  338. {
  339. if (static_branch_likely(&have_mio))
  340. pci_iounmap_mio(pdev, addr);
  341. else
  342. pci_iounmap_fh(pdev, addr);
  343. }
  344. EXPORT_SYMBOL(pci_iounmap);
  345. static int pci_read(struct pci_bus *bus, unsigned int devfn, int where,
  346. int size, u32 *val)
  347. {
  348. struct zpci_dev *zdev = zdev_from_bus(bus, devfn);
  349. return (zdev) ? zpci_cfg_load(zdev, where, val, size) : -ENODEV;
  350. }
  351. static int pci_write(struct pci_bus *bus, unsigned int devfn, int where,
  352. int size, u32 val)
  353. {
  354. struct zpci_dev *zdev = zdev_from_bus(bus, devfn);
  355. return (zdev) ? zpci_cfg_store(zdev, where, val, size) : -ENODEV;
  356. }
  357. static struct pci_ops pci_root_ops = {
  358. .read = pci_read,
  359. .write = pci_write,
  360. };
  361. static void zpci_map_resources(struct pci_dev *pdev)
  362. {
  363. struct zpci_dev *zdev = to_zpci(pdev);
  364. resource_size_t len;
  365. int i;
  366. for (i = 0; i < PCI_STD_NUM_BARS; i++) {
  367. len = pci_resource_len(pdev, i);
  368. if (!len)
  369. continue;
  370. if (zpci_use_mio(zdev))
  371. pdev->resource[i].start =
  372. (resource_size_t __force) zdev->bars[i].mio_wt;
  373. else
  374. pdev->resource[i].start = (resource_size_t __force)
  375. pci_iomap_range_fh(pdev, i, 0, 0);
  376. pdev->resource[i].end = pdev->resource[i].start + len - 1;
  377. }
  378. zpci_iov_map_resources(pdev);
  379. }
  380. static void zpci_unmap_resources(struct pci_dev *pdev)
  381. {
  382. struct zpci_dev *zdev = to_zpci(pdev);
  383. resource_size_t len;
  384. int i;
  385. if (zpci_use_mio(zdev))
  386. return;
  387. for (i = 0; i < PCI_STD_NUM_BARS; i++) {
  388. len = pci_resource_len(pdev, i);
  389. if (!len)
  390. continue;
  391. pci_iounmap_fh(pdev, (void __iomem __force *)
  392. pdev->resource[i].start);
  393. }
  394. }
  395. static int zpci_alloc_iomap(struct zpci_dev *zdev)
  396. {
  397. unsigned long entry;
  398. spin_lock(&zpci_iomap_lock);
  399. entry = find_first_zero_bit(zpci_iomap_bitmap, ZPCI_IOMAP_ENTRIES);
  400. if (entry == ZPCI_IOMAP_ENTRIES) {
  401. spin_unlock(&zpci_iomap_lock);
  402. return -ENOSPC;
  403. }
  404. set_bit(entry, zpci_iomap_bitmap);
  405. spin_unlock(&zpci_iomap_lock);
  406. return entry;
  407. }
  408. static void zpci_free_iomap(struct zpci_dev *zdev, int entry)
  409. {
  410. spin_lock(&zpci_iomap_lock);
  411. memset(&zpci_iomap_start[entry], 0, sizeof(struct zpci_iomap_entry));
  412. clear_bit(entry, zpci_iomap_bitmap);
  413. spin_unlock(&zpci_iomap_lock);
  414. }
  415. static void zpci_do_update_iomap_fh(struct zpci_dev *zdev, u32 fh)
  416. {
  417. int bar, idx;
  418. spin_lock(&zpci_iomap_lock);
  419. for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
  420. if (!zdev->bars[bar].size)
  421. continue;
  422. idx = zdev->bars[bar].map_idx;
  423. if (!zpci_iomap_start[idx].count)
  424. continue;
  425. WRITE_ONCE(zpci_iomap_start[idx].fh, zdev->fh);
  426. }
  427. spin_unlock(&zpci_iomap_lock);
  428. }
  429. void zpci_update_fh(struct zpci_dev *zdev, u32 fh)
  430. {
  431. if (!fh || zdev->fh == fh)
  432. return;
  433. zdev->fh = fh;
  434. if (zpci_use_mio(zdev))
  435. return;
  436. if (zdev->has_resources && zdev_enabled(zdev))
  437. zpci_do_update_iomap_fh(zdev, fh);
  438. }
  439. static struct resource *__alloc_res(struct zpci_dev *zdev, unsigned long start,
  440. unsigned long size, unsigned long flags)
  441. {
  442. struct resource *r;
  443. r = kzalloc(sizeof(*r), GFP_KERNEL);
  444. if (!r)
  445. return NULL;
  446. r->start = start;
  447. r->end = r->start + size - 1;
  448. r->flags = flags;
  449. r->name = zdev->res_name;
  450. if (request_resource(&iomem_resource, r)) {
  451. kfree(r);
  452. return NULL;
  453. }
  454. return r;
  455. }
  456. int zpci_setup_bus_resources(struct zpci_dev *zdev)
  457. {
  458. unsigned long addr, size, flags;
  459. struct resource *res;
  460. int i, entry;
  461. snprintf(zdev->res_name, sizeof(zdev->res_name),
  462. "PCI Bus %04x:%02x", zdev->uid, ZPCI_BUS_NR);
  463. for (i = 0; i < PCI_STD_NUM_BARS; i++) {
  464. if (!zdev->bars[i].size)
  465. continue;
  466. entry = zpci_alloc_iomap(zdev);
  467. if (entry < 0)
  468. return entry;
  469. zdev->bars[i].map_idx = entry;
  470. /* only MMIO is supported */
  471. flags = IORESOURCE_MEM;
  472. if (zdev->bars[i].val & 8)
  473. flags |= IORESOURCE_PREFETCH;
  474. if (zdev->bars[i].val & 4)
  475. flags |= IORESOURCE_MEM_64;
  476. if (zpci_use_mio(zdev))
  477. addr = (unsigned long) zdev->bars[i].mio_wt;
  478. else
  479. addr = ZPCI_ADDR(entry);
  480. size = 1UL << zdev->bars[i].size;
  481. res = __alloc_res(zdev, addr, size, flags);
  482. if (!res) {
  483. zpci_free_iomap(zdev, entry);
  484. return -ENOMEM;
  485. }
  486. zdev->bars[i].res = res;
  487. }
  488. zdev->has_resources = 1;
  489. return 0;
  490. }
  491. static void zpci_cleanup_bus_resources(struct zpci_dev *zdev)
  492. {
  493. struct resource *res;
  494. int i;
  495. pci_lock_rescan_remove();
  496. for (i = 0; i < PCI_STD_NUM_BARS; i++) {
  497. res = zdev->bars[i].res;
  498. if (!res)
  499. continue;
  500. release_resource(res);
  501. pci_bus_remove_resource(zdev->zbus->bus, res);
  502. zpci_free_iomap(zdev, zdev->bars[i].map_idx);
  503. zdev->bars[i].res = NULL;
  504. kfree(res);
  505. }
  506. zdev->has_resources = 0;
  507. pci_unlock_rescan_remove();
  508. }
  509. int pcibios_device_add(struct pci_dev *pdev)
  510. {
  511. struct zpci_dev *zdev = to_zpci(pdev);
  512. struct resource *res;
  513. int i;
  514. /* The pdev has a reference to the zdev via its bus */
  515. zpci_zdev_get(zdev);
  516. if (pdev->is_physfn)
  517. pdev->no_vf_scan = 1;
  518. pdev->dev.groups = zpci_attr_groups;
  519. pdev->dev.dma_ops = &s390_pci_dma_ops;
  520. zpci_map_resources(pdev);
  521. for (i = 0; i < PCI_STD_NUM_BARS; i++) {
  522. res = &pdev->resource[i];
  523. if (res->parent || !res->flags)
  524. continue;
  525. pci_claim_resource(pdev, i);
  526. }
  527. return 0;
  528. }
  529. void pcibios_release_device(struct pci_dev *pdev)
  530. {
  531. struct zpci_dev *zdev = to_zpci(pdev);
  532. zpci_unmap_resources(pdev);
  533. zpci_zdev_put(zdev);
  534. }
  535. int pcibios_enable_device(struct pci_dev *pdev, int mask)
  536. {
  537. struct zpci_dev *zdev = to_zpci(pdev);
  538. zpci_debug_init_device(zdev, dev_name(&pdev->dev));
  539. zpci_fmb_enable_device(zdev);
  540. return pci_enable_resources(pdev, mask);
  541. }
  542. void pcibios_disable_device(struct pci_dev *pdev)
  543. {
  544. struct zpci_dev *zdev = to_zpci(pdev);
  545. zpci_fmb_disable_device(zdev);
  546. zpci_debug_exit_device(zdev);
  547. }
  548. static int __zpci_register_domain(int domain)
  549. {
  550. spin_lock(&zpci_domain_lock);
  551. if (test_bit(domain, zpci_domain)) {
  552. spin_unlock(&zpci_domain_lock);
  553. pr_err("Domain %04x is already assigned\n", domain);
  554. return -EEXIST;
  555. }
  556. set_bit(domain, zpci_domain);
  557. spin_unlock(&zpci_domain_lock);
  558. return domain;
  559. }
  560. static int __zpci_alloc_domain(void)
  561. {
  562. int domain;
  563. spin_lock(&zpci_domain_lock);
  564. /*
  565. * We can always auto allocate domains below ZPCI_NR_DEVICES.
  566. * There is either a free domain or we have reached the maximum in
  567. * which case we would have bailed earlier.
  568. */
  569. domain = find_first_zero_bit(zpci_domain, ZPCI_NR_DEVICES);
  570. set_bit(domain, zpci_domain);
  571. spin_unlock(&zpci_domain_lock);
  572. return domain;
  573. }
  574. int zpci_alloc_domain(int domain)
  575. {
  576. if (zpci_unique_uid) {
  577. if (domain)
  578. return __zpci_register_domain(domain);
  579. pr_warn("UID checking was active but no UID is provided: switching to automatic domain allocation\n");
  580. update_uid_checking(false);
  581. }
  582. return __zpci_alloc_domain();
  583. }
  584. void zpci_free_domain(int domain)
  585. {
  586. spin_lock(&zpci_domain_lock);
  587. clear_bit(domain, zpci_domain);
  588. spin_unlock(&zpci_domain_lock);
  589. }
  590. int zpci_enable_device(struct zpci_dev *zdev)
  591. {
  592. u32 fh = zdev->fh;
  593. int rc = 0;
  594. if (clp_enable_fh(zdev, &fh, ZPCI_NR_DMA_SPACES))
  595. rc = -EIO;
  596. else
  597. zpci_update_fh(zdev, fh);
  598. return rc;
  599. }
  600. EXPORT_SYMBOL_GPL(zpci_enable_device);
  601. int zpci_disable_device(struct zpci_dev *zdev)
  602. {
  603. u32 fh = zdev->fh;
  604. int cc, rc = 0;
  605. cc = clp_disable_fh(zdev, &fh);
  606. if (!cc) {
  607. zpci_update_fh(zdev, fh);
  608. } else if (cc == CLP_RC_SETPCIFN_ALRDY) {
  609. pr_info("Disabling PCI function %08x had no effect as it was already disabled\n",
  610. zdev->fid);
  611. /* Function is already disabled - update handle */
  612. rc = clp_refresh_fh(zdev->fid, &fh);
  613. if (!rc) {
  614. zpci_update_fh(zdev, fh);
  615. rc = -EINVAL;
  616. }
  617. } else {
  618. rc = -EIO;
  619. }
  620. return rc;
  621. }
  622. EXPORT_SYMBOL_GPL(zpci_disable_device);
  623. /**
  624. * zpci_hot_reset_device - perform a reset of the given zPCI function
  625. * @zdev: the slot which should be reset
  626. *
  627. * Performs a low level reset of the zPCI function. The reset is low level in
  628. * the sense that the zPCI function can be reset without detaching it from the
  629. * common PCI subsystem. The reset may be performed while under control of
  630. * either DMA or IOMMU APIs in which case the existing DMA/IOMMU translation
  631. * table is reinstated at the end of the reset.
  632. *
  633. * After the reset the functions internal state is reset to an initial state
  634. * equivalent to its state during boot when first probing a driver.
  635. * Consequently after reset the PCI function requires re-initialization via the
  636. * common PCI code including re-enabling IRQs via pci_alloc_irq_vectors()
  637. * and enabling the function via e.g.pci_enablde_device_flags().The caller
  638. * must guard against concurrent reset attempts.
  639. *
  640. * In most cases this function should not be called directly but through
  641. * pci_reset_function() or pci_reset_bus() which handle the save/restore and
  642. * locking.
  643. *
  644. * Return: 0 on success and an error value otherwise
  645. */
  646. int zpci_hot_reset_device(struct zpci_dev *zdev)
  647. {
  648. int rc;
  649. zpci_dbg(3, "rst fid:%x, fh:%x\n", zdev->fid, zdev->fh);
  650. if (zdev_enabled(zdev)) {
  651. /* Disables device access, DMAs and IRQs (reset state) */
  652. rc = zpci_disable_device(zdev);
  653. /*
  654. * Due to a z/VM vs LPAR inconsistency in the error state the
  655. * FH may indicate an enabled device but disable says the
  656. * device is already disabled don't treat it as an error here.
  657. */
  658. if (rc == -EINVAL)
  659. rc = 0;
  660. if (rc)
  661. return rc;
  662. }
  663. rc = zpci_enable_device(zdev);
  664. if (rc)
  665. return rc;
  666. if (zdev->dma_table)
  667. rc = zpci_register_ioat(zdev, 0, zdev->start_dma, zdev->end_dma,
  668. virt_to_phys(zdev->dma_table));
  669. else
  670. rc = zpci_dma_init_device(zdev);
  671. if (rc) {
  672. zpci_disable_device(zdev);
  673. return rc;
  674. }
  675. return 0;
  676. }
  677. /**
  678. * zpci_create_device() - Create a new zpci_dev and add it to the zbus
  679. * @fid: Function ID of the device to be created
  680. * @fh: Current Function Handle of the device to be created
  681. * @state: Initial state after creation either Standby or Configured
  682. *
  683. * Creates a new zpci device and adds it to its, possibly newly created, zbus
  684. * as well as zpci_list.
  685. *
  686. * Returns: the zdev on success or an error pointer otherwise
  687. */
  688. struct zpci_dev *zpci_create_device(u32 fid, u32 fh, enum zpci_state state)
  689. {
  690. struct zpci_dev *zdev;
  691. int rc;
  692. zpci_dbg(1, "add fid:%x, fh:%x, c:%d\n", fid, fh, state);
  693. zdev = kzalloc(sizeof(*zdev), GFP_KERNEL);
  694. if (!zdev)
  695. return ERR_PTR(-ENOMEM);
  696. /* FID and Function Handle are the static/dynamic identifiers */
  697. zdev->fid = fid;
  698. zdev->fh = fh;
  699. /* Query function properties and update zdev */
  700. rc = clp_query_pci_fn(zdev);
  701. if (rc)
  702. goto error;
  703. zdev->state = state;
  704. kref_init(&zdev->kref);
  705. mutex_init(&zdev->lock);
  706. mutex_init(&zdev->kzdev_lock);
  707. rc = zpci_init_iommu(zdev);
  708. if (rc)
  709. goto error;
  710. rc = zpci_bus_device_register(zdev, &pci_root_ops);
  711. if (rc)
  712. goto error_destroy_iommu;
  713. spin_lock(&zpci_list_lock);
  714. list_add_tail(&zdev->entry, &zpci_list);
  715. spin_unlock(&zpci_list_lock);
  716. return zdev;
  717. error_destroy_iommu:
  718. zpci_destroy_iommu(zdev);
  719. error:
  720. zpci_dbg(0, "add fid:%x, rc:%d\n", fid, rc);
  721. kfree(zdev);
  722. return ERR_PTR(rc);
  723. }
  724. bool zpci_is_device_configured(struct zpci_dev *zdev)
  725. {
  726. enum zpci_state state = zdev->state;
  727. return state != ZPCI_FN_STATE_RESERVED &&
  728. state != ZPCI_FN_STATE_STANDBY;
  729. }
  730. /**
  731. * zpci_scan_configured_device() - Scan a freshly configured zpci_dev
  732. * @zdev: The zpci_dev to be configured
  733. * @fh: The general function handle supplied by the platform
  734. *
  735. * Given a device in the configuration state Configured, enables, scans and
  736. * adds it to the common code PCI subsystem if possible. If the PCI device is
  737. * parked because we can not yet create a PCI bus because we have not seen
  738. * function 0, it is ignored but will be scanned once function 0 appears.
  739. * If any failure occurs, the zpci_dev is left disabled.
  740. *
  741. * Return: 0 on success, or an error code otherwise
  742. */
  743. int zpci_scan_configured_device(struct zpci_dev *zdev, u32 fh)
  744. {
  745. int rc;
  746. zpci_update_fh(zdev, fh);
  747. /* the PCI function will be scanned once function 0 appears */
  748. if (!zdev->zbus->bus)
  749. return 0;
  750. /* For function 0 on a multi-function bus scan whole bus as we might
  751. * have to pick up existing functions waiting for it to allow creating
  752. * the PCI bus
  753. */
  754. if (zdev->devfn == 0 && zdev->zbus->multifunction)
  755. rc = zpci_bus_scan_bus(zdev->zbus);
  756. else
  757. rc = zpci_bus_scan_device(zdev);
  758. return rc;
  759. }
  760. /**
  761. * zpci_deconfigure_device() - Deconfigure a zpci_dev
  762. * @zdev: The zpci_dev to configure
  763. *
  764. * Deconfigure a zPCI function that is currently configured and possibly known
  765. * to the common code PCI subsystem.
  766. * If any failure occurs the device is left as is.
  767. *
  768. * Return: 0 on success, or an error code otherwise
  769. */
  770. int zpci_deconfigure_device(struct zpci_dev *zdev)
  771. {
  772. int rc;
  773. if (zdev->zbus->bus)
  774. zpci_bus_remove_device(zdev, false);
  775. if (zdev->dma_table) {
  776. rc = zpci_dma_exit_device(zdev);
  777. if (rc)
  778. return rc;
  779. }
  780. if (zdev_enabled(zdev)) {
  781. rc = zpci_disable_device(zdev);
  782. if (rc)
  783. return rc;
  784. }
  785. rc = sclp_pci_deconfigure(zdev->fid);
  786. zpci_dbg(3, "deconf fid:%x, rc:%d\n", zdev->fid, rc);
  787. if (rc)
  788. return rc;
  789. zdev->state = ZPCI_FN_STATE_STANDBY;
  790. return 0;
  791. }
  792. /**
  793. * zpci_device_reserved() - Mark device as resverved
  794. * @zdev: the zpci_dev that was reserved
  795. *
  796. * Handle the case that a given zPCI function was reserved by another system.
  797. * After a call to this function the zpci_dev can not be found via
  798. * get_zdev_by_fid() anymore but may still be accessible via existing
  799. * references though it will not be functional anymore.
  800. */
  801. void zpci_device_reserved(struct zpci_dev *zdev)
  802. {
  803. if (zdev->has_hp_slot)
  804. zpci_exit_slot(zdev);
  805. /*
  806. * Remove device from zpci_list as it is going away. This also
  807. * makes sure we ignore subsequent zPCI events for this device.
  808. */
  809. spin_lock(&zpci_list_lock);
  810. list_del(&zdev->entry);
  811. spin_unlock(&zpci_list_lock);
  812. zdev->state = ZPCI_FN_STATE_RESERVED;
  813. zpci_dbg(3, "rsv fid:%x\n", zdev->fid);
  814. zpci_zdev_put(zdev);
  815. }
  816. void zpci_release_device(struct kref *kref)
  817. {
  818. struct zpci_dev *zdev = container_of(kref, struct zpci_dev, kref);
  819. int ret;
  820. if (zdev->zbus->bus)
  821. zpci_bus_remove_device(zdev, false);
  822. if (zdev->dma_table)
  823. zpci_dma_exit_device(zdev);
  824. if (zdev_enabled(zdev))
  825. zpci_disable_device(zdev);
  826. switch (zdev->state) {
  827. case ZPCI_FN_STATE_CONFIGURED:
  828. ret = sclp_pci_deconfigure(zdev->fid);
  829. zpci_dbg(3, "deconf fid:%x, rc:%d\n", zdev->fid, ret);
  830. fallthrough;
  831. case ZPCI_FN_STATE_STANDBY:
  832. if (zdev->has_hp_slot)
  833. zpci_exit_slot(zdev);
  834. spin_lock(&zpci_list_lock);
  835. list_del(&zdev->entry);
  836. spin_unlock(&zpci_list_lock);
  837. zpci_dbg(3, "rsv fid:%x\n", zdev->fid);
  838. fallthrough;
  839. case ZPCI_FN_STATE_RESERVED:
  840. if (zdev->has_resources)
  841. zpci_cleanup_bus_resources(zdev);
  842. zpci_bus_device_unregister(zdev);
  843. zpci_destroy_iommu(zdev);
  844. fallthrough;
  845. default:
  846. break;
  847. }
  848. zpci_dbg(3, "rem fid:%x\n", zdev->fid);
  849. kfree(zdev);
  850. }
  851. int zpci_report_error(struct pci_dev *pdev,
  852. struct zpci_report_error_header *report)
  853. {
  854. struct zpci_dev *zdev = to_zpci(pdev);
  855. return sclp_pci_report(report, zdev->fh, zdev->fid);
  856. }
  857. EXPORT_SYMBOL(zpci_report_error);
  858. /**
  859. * zpci_clear_error_state() - Clears the zPCI error state of the device
  860. * @zdev: The zdev for which the zPCI error state should be reset
  861. *
  862. * Clear the zPCI error state of the device. If clearing the zPCI error state
  863. * fails the device is left in the error state. In this case it may make sense
  864. * to call zpci_io_perm_failure() on the associated pdev if it exists.
  865. *
  866. * Returns: 0 on success, -EIO otherwise
  867. */
  868. int zpci_clear_error_state(struct zpci_dev *zdev)
  869. {
  870. u64 req = ZPCI_CREATE_REQ(zdev->fh, 0, ZPCI_MOD_FC_RESET_ERROR);
  871. struct zpci_fib fib = {0};
  872. u8 status;
  873. int cc;
  874. cc = zpci_mod_fc(req, &fib, &status);
  875. if (cc) {
  876. zpci_dbg(3, "ces fid:%x, cc:%d, status:%x\n", zdev->fid, cc, status);
  877. return -EIO;
  878. }
  879. return 0;
  880. }
  881. /**
  882. * zpci_reset_load_store_blocked() - Re-enables L/S from error state
  883. * @zdev: The zdev for which to unblock load/store access
  884. *
  885. * Re-enables load/store access for a PCI function in the error state while
  886. * keeping DMA blocked. In this state drivers can poke MMIO space to determine
  887. * if error recovery is possible while catching any rogue DMA access from the
  888. * device.
  889. *
  890. * Returns: 0 on success, -EIO otherwise
  891. */
  892. int zpci_reset_load_store_blocked(struct zpci_dev *zdev)
  893. {
  894. u64 req = ZPCI_CREATE_REQ(zdev->fh, 0, ZPCI_MOD_FC_RESET_BLOCK);
  895. struct zpci_fib fib = {0};
  896. u8 status;
  897. int cc;
  898. cc = zpci_mod_fc(req, &fib, &status);
  899. if (cc) {
  900. zpci_dbg(3, "rls fid:%x, cc:%d, status:%x\n", zdev->fid, cc, status);
  901. return -EIO;
  902. }
  903. return 0;
  904. }
  905. static int zpci_mem_init(void)
  906. {
  907. BUILD_BUG_ON(!is_power_of_2(__alignof__(struct zpci_fmb)) ||
  908. __alignof__(struct zpci_fmb) < sizeof(struct zpci_fmb));
  909. zdev_fmb_cache = kmem_cache_create("PCI_FMB_cache", sizeof(struct zpci_fmb),
  910. __alignof__(struct zpci_fmb), 0, NULL);
  911. if (!zdev_fmb_cache)
  912. goto error_fmb;
  913. zpci_iomap_start = kcalloc(ZPCI_IOMAP_ENTRIES,
  914. sizeof(*zpci_iomap_start), GFP_KERNEL);
  915. if (!zpci_iomap_start)
  916. goto error_iomap;
  917. zpci_iomap_bitmap = kcalloc(BITS_TO_LONGS(ZPCI_IOMAP_ENTRIES),
  918. sizeof(*zpci_iomap_bitmap), GFP_KERNEL);
  919. if (!zpci_iomap_bitmap)
  920. goto error_iomap_bitmap;
  921. if (static_branch_likely(&have_mio))
  922. clp_setup_writeback_mio();
  923. return 0;
  924. error_iomap_bitmap:
  925. kfree(zpci_iomap_start);
  926. error_iomap:
  927. kmem_cache_destroy(zdev_fmb_cache);
  928. error_fmb:
  929. return -ENOMEM;
  930. }
  931. static void zpci_mem_exit(void)
  932. {
  933. kfree(zpci_iomap_bitmap);
  934. kfree(zpci_iomap_start);
  935. kmem_cache_destroy(zdev_fmb_cache);
  936. }
  937. static unsigned int s390_pci_probe __initdata = 1;
  938. unsigned int s390_pci_force_floating __initdata;
  939. static unsigned int s390_pci_initialized;
  940. char * __init pcibios_setup(char *str)
  941. {
  942. if (!strcmp(str, "off")) {
  943. s390_pci_probe = 0;
  944. return NULL;
  945. }
  946. if (!strcmp(str, "nomio")) {
  947. S390_lowcore.machine_flags &= ~MACHINE_FLAG_PCI_MIO;
  948. return NULL;
  949. }
  950. if (!strcmp(str, "force_floating")) {
  951. s390_pci_force_floating = 1;
  952. return NULL;
  953. }
  954. if (!strcmp(str, "norid")) {
  955. s390_pci_no_rid = 1;
  956. return NULL;
  957. }
  958. return str;
  959. }
  960. bool zpci_is_enabled(void)
  961. {
  962. return s390_pci_initialized;
  963. }
  964. static int __init pci_base_init(void)
  965. {
  966. int rc;
  967. if (!s390_pci_probe)
  968. return 0;
  969. if (!test_facility(69) || !test_facility(71)) {
  970. pr_info("PCI is not supported because CPU facilities 69 or 71 are not available\n");
  971. return 0;
  972. }
  973. if (MACHINE_HAS_PCI_MIO) {
  974. static_branch_enable(&have_mio);
  975. ctl_set_bit(2, 5);
  976. }
  977. rc = zpci_debug_init();
  978. if (rc)
  979. goto out;
  980. rc = zpci_mem_init();
  981. if (rc)
  982. goto out_mem;
  983. rc = zpci_irq_init();
  984. if (rc)
  985. goto out_irq;
  986. rc = zpci_dma_init();
  987. if (rc)
  988. goto out_dma;
  989. rc = clp_scan_pci_devices();
  990. if (rc)
  991. goto out_find;
  992. zpci_bus_scan_busses();
  993. s390_pci_initialized = 1;
  994. return 0;
  995. out_find:
  996. zpci_dma_exit();
  997. out_dma:
  998. zpci_irq_exit();
  999. out_irq:
  1000. zpci_mem_exit();
  1001. out_mem:
  1002. zpci_debug_exit();
  1003. out:
  1004. return rc;
  1005. }
  1006. subsys_initcall_sync(pci_base_init);