pci.c 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /*
  3. * Support PCI/PCIe on PowerNV platforms
  4. *
  5. * Copyright 2011 Benjamin Herrenschmidt, IBM Corp.
  6. */
  7. #include <linux/kernel.h>
  8. #include <linux/pci.h>
  9. #include <linux/delay.h>
  10. #include <linux/string.h>
  11. #include <linux/init.h>
  12. #include <linux/irq.h>
  13. #include <linux/io.h>
  14. #include <linux/msi.h>
  15. #include <linux/iommu.h>
  16. #include <linux/sched/mm.h>
  17. #include <asm/sections.h>
  18. #include <asm/io.h>
  19. #include <asm/pci-bridge.h>
  20. #include <asm/machdep.h>
  21. #include <asm/msi_bitmap.h>
  22. #include <asm/ppc-pci.h>
  23. #include <asm/pnv-pci.h>
  24. #include <asm/opal.h>
  25. #include <asm/iommu.h>
  26. #include <asm/tce.h>
  27. #include <asm/firmware.h>
  28. #include <asm/eeh_event.h>
  29. #include <asm/eeh.h>
  30. #include "powernv.h"
  31. #include "pci.h"
  32. static DEFINE_MUTEX(tunnel_mutex);
  33. int pnv_pci_get_slot_id(struct device_node *np, uint64_t *id)
  34. {
  35. struct device_node *node = np;
  36. u32 bdfn;
  37. u64 phbid;
  38. int ret;
  39. ret = of_property_read_u32(np, "reg", &bdfn);
  40. if (ret)
  41. return -ENXIO;
  42. bdfn = ((bdfn & 0x00ffff00) >> 8);
  43. for (node = np; node; node = of_get_parent(node)) {
  44. if (!PCI_DN(node)) {
  45. of_node_put(node);
  46. break;
  47. }
  48. if (!of_device_is_compatible(node, "ibm,ioda2-phb") &&
  49. !of_device_is_compatible(node, "ibm,ioda3-phb") &&
  50. !of_device_is_compatible(node, "ibm,ioda2-npu2-opencapi-phb")) {
  51. of_node_put(node);
  52. continue;
  53. }
  54. ret = of_property_read_u64(node, "ibm,opal-phbid", &phbid);
  55. if (ret) {
  56. of_node_put(node);
  57. return -ENXIO;
  58. }
  59. if (of_device_is_compatible(node, "ibm,ioda2-npu2-opencapi-phb"))
  60. *id = PCI_PHB_SLOT_ID(phbid);
  61. else
  62. *id = PCI_SLOT_ID(phbid, bdfn);
  63. return 0;
  64. }
  65. return -ENODEV;
  66. }
  67. EXPORT_SYMBOL_GPL(pnv_pci_get_slot_id);
  68. int pnv_pci_get_device_tree(uint32_t phandle, void *buf, uint64_t len)
  69. {
  70. int64_t rc;
  71. if (!opal_check_token(OPAL_GET_DEVICE_TREE))
  72. return -ENXIO;
  73. rc = opal_get_device_tree(phandle, (uint64_t)buf, len);
  74. if (rc < OPAL_SUCCESS)
  75. return -EIO;
  76. return rc;
  77. }
  78. EXPORT_SYMBOL_GPL(pnv_pci_get_device_tree);
  79. int pnv_pci_get_presence_state(uint64_t id, uint8_t *state)
  80. {
  81. int64_t rc;
  82. if (!opal_check_token(OPAL_PCI_GET_PRESENCE_STATE))
  83. return -ENXIO;
  84. rc = opal_pci_get_presence_state(id, (uint64_t)state);
  85. if (rc != OPAL_SUCCESS)
  86. return -EIO;
  87. return 0;
  88. }
  89. EXPORT_SYMBOL_GPL(pnv_pci_get_presence_state);
  90. int pnv_pci_get_power_state(uint64_t id, uint8_t *state)
  91. {
  92. int64_t rc;
  93. if (!opal_check_token(OPAL_PCI_GET_POWER_STATE))
  94. return -ENXIO;
  95. rc = opal_pci_get_power_state(id, (uint64_t)state);
  96. if (rc != OPAL_SUCCESS)
  97. return -EIO;
  98. return 0;
  99. }
  100. EXPORT_SYMBOL_GPL(pnv_pci_get_power_state);
  101. int pnv_pci_set_power_state(uint64_t id, uint8_t state, struct opal_msg *msg)
  102. {
  103. struct opal_msg m;
  104. int token, ret;
  105. int64_t rc;
  106. if (!opal_check_token(OPAL_PCI_SET_POWER_STATE))
  107. return -ENXIO;
  108. token = opal_async_get_token_interruptible();
  109. if (unlikely(token < 0))
  110. return token;
  111. rc = opal_pci_set_power_state(token, id, (uint64_t)&state);
  112. if (rc == OPAL_SUCCESS) {
  113. ret = 0;
  114. goto exit;
  115. } else if (rc != OPAL_ASYNC_COMPLETION) {
  116. ret = -EIO;
  117. goto exit;
  118. }
  119. ret = opal_async_wait_response(token, &m);
  120. if (ret < 0)
  121. goto exit;
  122. if (msg) {
  123. ret = 1;
  124. memcpy(msg, &m, sizeof(m));
  125. }
  126. exit:
  127. opal_async_release_token(token);
  128. return ret;
  129. }
  130. EXPORT_SYMBOL_GPL(pnv_pci_set_power_state);
  131. /* Nicely print the contents of the PE State Tables (PEST). */
  132. static void pnv_pci_dump_pest(__be64 pestA[], __be64 pestB[], int pest_size)
  133. {
  134. __be64 prevA = ULONG_MAX, prevB = ULONG_MAX;
  135. bool dup = false;
  136. int i;
  137. for (i = 0; i < pest_size; i++) {
  138. __be64 peA = be64_to_cpu(pestA[i]);
  139. __be64 peB = be64_to_cpu(pestB[i]);
  140. if (peA != prevA || peB != prevB) {
  141. if (dup) {
  142. pr_info("PE[..%03x] A/B: as above\n", i-1);
  143. dup = false;
  144. }
  145. prevA = peA;
  146. prevB = peB;
  147. if (peA & PNV_IODA_STOPPED_STATE ||
  148. peB & PNV_IODA_STOPPED_STATE)
  149. pr_info("PE[%03x] A/B: %016llx %016llx\n",
  150. i, peA, peB);
  151. } else if (!dup && (peA & PNV_IODA_STOPPED_STATE ||
  152. peB & PNV_IODA_STOPPED_STATE)) {
  153. dup = true;
  154. }
  155. }
  156. }
  157. static void pnv_pci_dump_p7ioc_diag_data(struct pci_controller *hose,
  158. struct OpalIoPhbErrorCommon *common)
  159. {
  160. struct OpalIoP7IOCPhbErrorData *data;
  161. data = (struct OpalIoP7IOCPhbErrorData *)common;
  162. pr_info("P7IOC PHB#%x Diag-data (Version: %d)\n",
  163. hose->global_number, be32_to_cpu(common->version));
  164. if (data->brdgCtl)
  165. pr_info("brdgCtl: %08x\n",
  166. be32_to_cpu(data->brdgCtl));
  167. if (data->portStatusReg || data->rootCmplxStatus ||
  168. data->busAgentStatus)
  169. pr_info("UtlSts: %08x %08x %08x\n",
  170. be32_to_cpu(data->portStatusReg),
  171. be32_to_cpu(data->rootCmplxStatus),
  172. be32_to_cpu(data->busAgentStatus));
  173. if (data->deviceStatus || data->slotStatus ||
  174. data->linkStatus || data->devCmdStatus ||
  175. data->devSecStatus)
  176. pr_info("RootSts: %08x %08x %08x %08x %08x\n",
  177. be32_to_cpu(data->deviceStatus),
  178. be32_to_cpu(data->slotStatus),
  179. be32_to_cpu(data->linkStatus),
  180. be32_to_cpu(data->devCmdStatus),
  181. be32_to_cpu(data->devSecStatus));
  182. if (data->rootErrorStatus || data->uncorrErrorStatus ||
  183. data->corrErrorStatus)
  184. pr_info("RootErrSts: %08x %08x %08x\n",
  185. be32_to_cpu(data->rootErrorStatus),
  186. be32_to_cpu(data->uncorrErrorStatus),
  187. be32_to_cpu(data->corrErrorStatus));
  188. if (data->tlpHdr1 || data->tlpHdr2 ||
  189. data->tlpHdr3 || data->tlpHdr4)
  190. pr_info("RootErrLog: %08x %08x %08x %08x\n",
  191. be32_to_cpu(data->tlpHdr1),
  192. be32_to_cpu(data->tlpHdr2),
  193. be32_to_cpu(data->tlpHdr3),
  194. be32_to_cpu(data->tlpHdr4));
  195. if (data->sourceId || data->errorClass ||
  196. data->correlator)
  197. pr_info("RootErrLog1: %08x %016llx %016llx\n",
  198. be32_to_cpu(data->sourceId),
  199. be64_to_cpu(data->errorClass),
  200. be64_to_cpu(data->correlator));
  201. if (data->p7iocPlssr || data->p7iocCsr)
  202. pr_info("PhbSts: %016llx %016llx\n",
  203. be64_to_cpu(data->p7iocPlssr),
  204. be64_to_cpu(data->p7iocCsr));
  205. if (data->lemFir)
  206. pr_info("Lem: %016llx %016llx %016llx\n",
  207. be64_to_cpu(data->lemFir),
  208. be64_to_cpu(data->lemErrorMask),
  209. be64_to_cpu(data->lemWOF));
  210. if (data->phbErrorStatus)
  211. pr_info("PhbErr: %016llx %016llx %016llx %016llx\n",
  212. be64_to_cpu(data->phbErrorStatus),
  213. be64_to_cpu(data->phbFirstErrorStatus),
  214. be64_to_cpu(data->phbErrorLog0),
  215. be64_to_cpu(data->phbErrorLog1));
  216. if (data->mmioErrorStatus)
  217. pr_info("OutErr: %016llx %016llx %016llx %016llx\n",
  218. be64_to_cpu(data->mmioErrorStatus),
  219. be64_to_cpu(data->mmioFirstErrorStatus),
  220. be64_to_cpu(data->mmioErrorLog0),
  221. be64_to_cpu(data->mmioErrorLog1));
  222. if (data->dma0ErrorStatus)
  223. pr_info("InAErr: %016llx %016llx %016llx %016llx\n",
  224. be64_to_cpu(data->dma0ErrorStatus),
  225. be64_to_cpu(data->dma0FirstErrorStatus),
  226. be64_to_cpu(data->dma0ErrorLog0),
  227. be64_to_cpu(data->dma0ErrorLog1));
  228. if (data->dma1ErrorStatus)
  229. pr_info("InBErr: %016llx %016llx %016llx %016llx\n",
  230. be64_to_cpu(data->dma1ErrorStatus),
  231. be64_to_cpu(data->dma1FirstErrorStatus),
  232. be64_to_cpu(data->dma1ErrorLog0),
  233. be64_to_cpu(data->dma1ErrorLog1));
  234. pnv_pci_dump_pest(data->pestA, data->pestB, OPAL_P7IOC_NUM_PEST_REGS);
  235. }
  236. static void pnv_pci_dump_phb3_diag_data(struct pci_controller *hose,
  237. struct OpalIoPhbErrorCommon *common)
  238. {
  239. struct OpalIoPhb3ErrorData *data;
  240. data = (struct OpalIoPhb3ErrorData*)common;
  241. pr_info("PHB3 PHB#%x Diag-data (Version: %d)\n",
  242. hose->global_number, be32_to_cpu(common->version));
  243. if (data->brdgCtl)
  244. pr_info("brdgCtl: %08x\n",
  245. be32_to_cpu(data->brdgCtl));
  246. if (data->portStatusReg || data->rootCmplxStatus ||
  247. data->busAgentStatus)
  248. pr_info("UtlSts: %08x %08x %08x\n",
  249. be32_to_cpu(data->portStatusReg),
  250. be32_to_cpu(data->rootCmplxStatus),
  251. be32_to_cpu(data->busAgentStatus));
  252. if (data->deviceStatus || data->slotStatus ||
  253. data->linkStatus || data->devCmdStatus ||
  254. data->devSecStatus)
  255. pr_info("RootSts: %08x %08x %08x %08x %08x\n",
  256. be32_to_cpu(data->deviceStatus),
  257. be32_to_cpu(data->slotStatus),
  258. be32_to_cpu(data->linkStatus),
  259. be32_to_cpu(data->devCmdStatus),
  260. be32_to_cpu(data->devSecStatus));
  261. if (data->rootErrorStatus || data->uncorrErrorStatus ||
  262. data->corrErrorStatus)
  263. pr_info("RootErrSts: %08x %08x %08x\n",
  264. be32_to_cpu(data->rootErrorStatus),
  265. be32_to_cpu(data->uncorrErrorStatus),
  266. be32_to_cpu(data->corrErrorStatus));
  267. if (data->tlpHdr1 || data->tlpHdr2 ||
  268. data->tlpHdr3 || data->tlpHdr4)
  269. pr_info("RootErrLog: %08x %08x %08x %08x\n",
  270. be32_to_cpu(data->tlpHdr1),
  271. be32_to_cpu(data->tlpHdr2),
  272. be32_to_cpu(data->tlpHdr3),
  273. be32_to_cpu(data->tlpHdr4));
  274. if (data->sourceId || data->errorClass ||
  275. data->correlator)
  276. pr_info("RootErrLog1: %08x %016llx %016llx\n",
  277. be32_to_cpu(data->sourceId),
  278. be64_to_cpu(data->errorClass),
  279. be64_to_cpu(data->correlator));
  280. if (data->nFir)
  281. pr_info("nFir: %016llx %016llx %016llx\n",
  282. be64_to_cpu(data->nFir),
  283. be64_to_cpu(data->nFirMask),
  284. be64_to_cpu(data->nFirWOF));
  285. if (data->phbPlssr || data->phbCsr)
  286. pr_info("PhbSts: %016llx %016llx\n",
  287. be64_to_cpu(data->phbPlssr),
  288. be64_to_cpu(data->phbCsr));
  289. if (data->lemFir)
  290. pr_info("Lem: %016llx %016llx %016llx\n",
  291. be64_to_cpu(data->lemFir),
  292. be64_to_cpu(data->lemErrorMask),
  293. be64_to_cpu(data->lemWOF));
  294. if (data->phbErrorStatus)
  295. pr_info("PhbErr: %016llx %016llx %016llx %016llx\n",
  296. be64_to_cpu(data->phbErrorStatus),
  297. be64_to_cpu(data->phbFirstErrorStatus),
  298. be64_to_cpu(data->phbErrorLog0),
  299. be64_to_cpu(data->phbErrorLog1));
  300. if (data->mmioErrorStatus)
  301. pr_info("OutErr: %016llx %016llx %016llx %016llx\n",
  302. be64_to_cpu(data->mmioErrorStatus),
  303. be64_to_cpu(data->mmioFirstErrorStatus),
  304. be64_to_cpu(data->mmioErrorLog0),
  305. be64_to_cpu(data->mmioErrorLog1));
  306. if (data->dma0ErrorStatus)
  307. pr_info("InAErr: %016llx %016llx %016llx %016llx\n",
  308. be64_to_cpu(data->dma0ErrorStatus),
  309. be64_to_cpu(data->dma0FirstErrorStatus),
  310. be64_to_cpu(data->dma0ErrorLog0),
  311. be64_to_cpu(data->dma0ErrorLog1));
  312. if (data->dma1ErrorStatus)
  313. pr_info("InBErr: %016llx %016llx %016llx %016llx\n",
  314. be64_to_cpu(data->dma1ErrorStatus),
  315. be64_to_cpu(data->dma1FirstErrorStatus),
  316. be64_to_cpu(data->dma1ErrorLog0),
  317. be64_to_cpu(data->dma1ErrorLog1));
  318. pnv_pci_dump_pest(data->pestA, data->pestB, OPAL_PHB3_NUM_PEST_REGS);
  319. }
  320. static void pnv_pci_dump_phb4_diag_data(struct pci_controller *hose,
  321. struct OpalIoPhbErrorCommon *common)
  322. {
  323. struct OpalIoPhb4ErrorData *data;
  324. data = (struct OpalIoPhb4ErrorData*)common;
  325. pr_info("PHB4 PHB#%d Diag-data (Version: %d)\n",
  326. hose->global_number, be32_to_cpu(common->version));
  327. if (data->brdgCtl)
  328. pr_info("brdgCtl: %08x\n",
  329. be32_to_cpu(data->brdgCtl));
  330. if (data->deviceStatus || data->slotStatus ||
  331. data->linkStatus || data->devCmdStatus ||
  332. data->devSecStatus)
  333. pr_info("RootSts: %08x %08x %08x %08x %08x\n",
  334. be32_to_cpu(data->deviceStatus),
  335. be32_to_cpu(data->slotStatus),
  336. be32_to_cpu(data->linkStatus),
  337. be32_to_cpu(data->devCmdStatus),
  338. be32_to_cpu(data->devSecStatus));
  339. if (data->rootErrorStatus || data->uncorrErrorStatus ||
  340. data->corrErrorStatus)
  341. pr_info("RootErrSts: %08x %08x %08x\n",
  342. be32_to_cpu(data->rootErrorStatus),
  343. be32_to_cpu(data->uncorrErrorStatus),
  344. be32_to_cpu(data->corrErrorStatus));
  345. if (data->tlpHdr1 || data->tlpHdr2 ||
  346. data->tlpHdr3 || data->tlpHdr4)
  347. pr_info("RootErrLog: %08x %08x %08x %08x\n",
  348. be32_to_cpu(data->tlpHdr1),
  349. be32_to_cpu(data->tlpHdr2),
  350. be32_to_cpu(data->tlpHdr3),
  351. be32_to_cpu(data->tlpHdr4));
  352. if (data->sourceId)
  353. pr_info("sourceId: %08x\n", be32_to_cpu(data->sourceId));
  354. if (data->nFir)
  355. pr_info("nFir: %016llx %016llx %016llx\n",
  356. be64_to_cpu(data->nFir),
  357. be64_to_cpu(data->nFirMask),
  358. be64_to_cpu(data->nFirWOF));
  359. if (data->phbPlssr || data->phbCsr)
  360. pr_info("PhbSts: %016llx %016llx\n",
  361. be64_to_cpu(data->phbPlssr),
  362. be64_to_cpu(data->phbCsr));
  363. if (data->lemFir)
  364. pr_info("Lem: %016llx %016llx %016llx\n",
  365. be64_to_cpu(data->lemFir),
  366. be64_to_cpu(data->lemErrorMask),
  367. be64_to_cpu(data->lemWOF));
  368. if (data->phbErrorStatus)
  369. pr_info("PhbErr: %016llx %016llx %016llx %016llx\n",
  370. be64_to_cpu(data->phbErrorStatus),
  371. be64_to_cpu(data->phbFirstErrorStatus),
  372. be64_to_cpu(data->phbErrorLog0),
  373. be64_to_cpu(data->phbErrorLog1));
  374. if (data->phbTxeErrorStatus)
  375. pr_info("PhbTxeErr: %016llx %016llx %016llx %016llx\n",
  376. be64_to_cpu(data->phbTxeErrorStatus),
  377. be64_to_cpu(data->phbTxeFirstErrorStatus),
  378. be64_to_cpu(data->phbTxeErrorLog0),
  379. be64_to_cpu(data->phbTxeErrorLog1));
  380. if (data->phbRxeArbErrorStatus)
  381. pr_info("RxeArbErr: %016llx %016llx %016llx %016llx\n",
  382. be64_to_cpu(data->phbRxeArbErrorStatus),
  383. be64_to_cpu(data->phbRxeArbFirstErrorStatus),
  384. be64_to_cpu(data->phbRxeArbErrorLog0),
  385. be64_to_cpu(data->phbRxeArbErrorLog1));
  386. if (data->phbRxeMrgErrorStatus)
  387. pr_info("RxeMrgErr: %016llx %016llx %016llx %016llx\n",
  388. be64_to_cpu(data->phbRxeMrgErrorStatus),
  389. be64_to_cpu(data->phbRxeMrgFirstErrorStatus),
  390. be64_to_cpu(data->phbRxeMrgErrorLog0),
  391. be64_to_cpu(data->phbRxeMrgErrorLog1));
  392. if (data->phbRxeTceErrorStatus)
  393. pr_info("RxeTceErr: %016llx %016llx %016llx %016llx\n",
  394. be64_to_cpu(data->phbRxeTceErrorStatus),
  395. be64_to_cpu(data->phbRxeTceFirstErrorStatus),
  396. be64_to_cpu(data->phbRxeTceErrorLog0),
  397. be64_to_cpu(data->phbRxeTceErrorLog1));
  398. if (data->phbPblErrorStatus)
  399. pr_info("PblErr: %016llx %016llx %016llx %016llx\n",
  400. be64_to_cpu(data->phbPblErrorStatus),
  401. be64_to_cpu(data->phbPblFirstErrorStatus),
  402. be64_to_cpu(data->phbPblErrorLog0),
  403. be64_to_cpu(data->phbPblErrorLog1));
  404. if (data->phbPcieDlpErrorStatus)
  405. pr_info("PcieDlp: %016llx %016llx %016llx\n",
  406. be64_to_cpu(data->phbPcieDlpErrorLog1),
  407. be64_to_cpu(data->phbPcieDlpErrorLog2),
  408. be64_to_cpu(data->phbPcieDlpErrorStatus));
  409. if (data->phbRegbErrorStatus)
  410. pr_info("RegbErr: %016llx %016llx %016llx %016llx\n",
  411. be64_to_cpu(data->phbRegbErrorStatus),
  412. be64_to_cpu(data->phbRegbFirstErrorStatus),
  413. be64_to_cpu(data->phbRegbErrorLog0),
  414. be64_to_cpu(data->phbRegbErrorLog1));
  415. pnv_pci_dump_pest(data->pestA, data->pestB, OPAL_PHB4_NUM_PEST_REGS);
  416. }
  417. void pnv_pci_dump_phb_diag_data(struct pci_controller *hose,
  418. unsigned char *log_buff)
  419. {
  420. struct OpalIoPhbErrorCommon *common;
  421. if (!hose || !log_buff)
  422. return;
  423. common = (struct OpalIoPhbErrorCommon *)log_buff;
  424. switch (be32_to_cpu(common->ioType)) {
  425. case OPAL_PHB_ERROR_DATA_TYPE_P7IOC:
  426. pnv_pci_dump_p7ioc_diag_data(hose, common);
  427. break;
  428. case OPAL_PHB_ERROR_DATA_TYPE_PHB3:
  429. pnv_pci_dump_phb3_diag_data(hose, common);
  430. break;
  431. case OPAL_PHB_ERROR_DATA_TYPE_PHB4:
  432. pnv_pci_dump_phb4_diag_data(hose, common);
  433. break;
  434. default:
  435. pr_warn("%s: Unrecognized ioType %d\n",
  436. __func__, be32_to_cpu(common->ioType));
  437. }
  438. }
  439. static void pnv_pci_handle_eeh_config(struct pnv_phb *phb, u32 pe_no)
  440. {
  441. unsigned long flags, rc;
  442. int has_diag, ret = 0;
  443. spin_lock_irqsave(&phb->lock, flags);
  444. /* Fetch PHB diag-data */
  445. rc = opal_pci_get_phb_diag_data2(phb->opal_id, phb->diag_data,
  446. phb->diag_data_size);
  447. has_diag = (rc == OPAL_SUCCESS);
  448. /* If PHB supports compound PE, to handle it */
  449. if (phb->unfreeze_pe) {
  450. ret = phb->unfreeze_pe(phb,
  451. pe_no,
  452. OPAL_EEH_ACTION_CLEAR_FREEZE_ALL);
  453. } else {
  454. rc = opal_pci_eeh_freeze_clear(phb->opal_id,
  455. pe_no,
  456. OPAL_EEH_ACTION_CLEAR_FREEZE_ALL);
  457. if (rc) {
  458. pr_warn("%s: Failure %ld clearing frozen "
  459. "PHB#%x-PE#%x\n",
  460. __func__, rc, phb->hose->global_number,
  461. pe_no);
  462. ret = -EIO;
  463. }
  464. }
  465. /*
  466. * For now, let's only display the diag buffer when we fail to clear
  467. * the EEH status. We'll do more sensible things later when we have
  468. * proper EEH support. We need to make sure we don't pollute ourselves
  469. * with the normal errors generated when probing empty slots
  470. */
  471. if (has_diag && ret)
  472. pnv_pci_dump_phb_diag_data(phb->hose, phb->diag_data);
  473. spin_unlock_irqrestore(&phb->lock, flags);
  474. }
  475. static void pnv_pci_config_check_eeh(struct pci_dn *pdn)
  476. {
  477. struct pnv_phb *phb = pdn->phb->private_data;
  478. u8 fstate = 0;
  479. __be16 pcierr = 0;
  480. unsigned int pe_no;
  481. s64 rc;
  482. /*
  483. * Get the PE#. During the PCI probe stage, we might not
  484. * setup that yet. So all ER errors should be mapped to
  485. * reserved PE.
  486. */
  487. pe_no = pdn->pe_number;
  488. if (pe_no == IODA_INVALID_PE) {
  489. pe_no = phb->ioda.reserved_pe_idx;
  490. }
  491. /*
  492. * Fetch frozen state. If the PHB support compound PE,
  493. * we need handle that case.
  494. */
  495. if (phb->get_pe_state) {
  496. fstate = phb->get_pe_state(phb, pe_no);
  497. } else {
  498. rc = opal_pci_eeh_freeze_status(phb->opal_id,
  499. pe_no,
  500. &fstate,
  501. &pcierr,
  502. NULL);
  503. if (rc) {
  504. pr_warn("%s: Failure %lld getting PHB#%x-PE#%x state\n",
  505. __func__, rc, phb->hose->global_number, pe_no);
  506. return;
  507. }
  508. }
  509. pr_devel(" -> EEH check, bdfn=%04x PE#%x fstate=%x\n",
  510. (pdn->busno << 8) | (pdn->devfn), pe_no, fstate);
  511. /* Clear the frozen state if applicable */
  512. if (fstate == OPAL_EEH_STOPPED_MMIO_FREEZE ||
  513. fstate == OPAL_EEH_STOPPED_DMA_FREEZE ||
  514. fstate == OPAL_EEH_STOPPED_MMIO_DMA_FREEZE) {
  515. /*
  516. * If PHB supports compound PE, freeze it for
  517. * consistency.
  518. */
  519. if (phb->freeze_pe)
  520. phb->freeze_pe(phb, pe_no);
  521. pnv_pci_handle_eeh_config(phb, pe_no);
  522. }
  523. }
  524. int pnv_pci_cfg_read(struct pci_dn *pdn,
  525. int where, int size, u32 *val)
  526. {
  527. struct pnv_phb *phb = pdn->phb->private_data;
  528. u32 bdfn = (pdn->busno << 8) | pdn->devfn;
  529. s64 rc;
  530. switch (size) {
  531. case 1: {
  532. u8 v8;
  533. rc = opal_pci_config_read_byte(phb->opal_id, bdfn, where, &v8);
  534. *val = (rc == OPAL_SUCCESS) ? v8 : 0xff;
  535. break;
  536. }
  537. case 2: {
  538. __be16 v16;
  539. rc = opal_pci_config_read_half_word(phb->opal_id, bdfn, where,
  540. &v16);
  541. *val = (rc == OPAL_SUCCESS) ? be16_to_cpu(v16) : 0xffff;
  542. break;
  543. }
  544. case 4: {
  545. __be32 v32;
  546. rc = opal_pci_config_read_word(phb->opal_id, bdfn, where, &v32);
  547. *val = (rc == OPAL_SUCCESS) ? be32_to_cpu(v32) : 0xffffffff;
  548. break;
  549. }
  550. default:
  551. return PCIBIOS_FUNC_NOT_SUPPORTED;
  552. }
  553. pr_devel("%s: bus: %x devfn: %x +%x/%x -> %08x\n",
  554. __func__, pdn->busno, pdn->devfn, where, size, *val);
  555. return PCIBIOS_SUCCESSFUL;
  556. }
  557. int pnv_pci_cfg_write(struct pci_dn *pdn,
  558. int where, int size, u32 val)
  559. {
  560. struct pnv_phb *phb = pdn->phb->private_data;
  561. u32 bdfn = (pdn->busno << 8) | pdn->devfn;
  562. pr_devel("%s: bus: %x devfn: %x +%x/%x -> %08x\n",
  563. __func__, pdn->busno, pdn->devfn, where, size, val);
  564. switch (size) {
  565. case 1:
  566. opal_pci_config_write_byte(phb->opal_id, bdfn, where, val);
  567. break;
  568. case 2:
  569. opal_pci_config_write_half_word(phb->opal_id, bdfn, where, val);
  570. break;
  571. case 4:
  572. opal_pci_config_write_word(phb->opal_id, bdfn, where, val);
  573. break;
  574. default:
  575. return PCIBIOS_FUNC_NOT_SUPPORTED;
  576. }
  577. return PCIBIOS_SUCCESSFUL;
  578. }
  579. #ifdef CONFIG_EEH
  580. static bool pnv_pci_cfg_check(struct pci_dn *pdn)
  581. {
  582. struct eeh_dev *edev = NULL;
  583. struct pnv_phb *phb = pdn->phb->private_data;
  584. /* EEH not enabled ? */
  585. if (!(phb->flags & PNV_PHB_FLAG_EEH))
  586. return true;
  587. /* PE reset or device removed ? */
  588. edev = pdn->edev;
  589. if (edev) {
  590. if (edev->pe &&
  591. (edev->pe->state & EEH_PE_CFG_BLOCKED))
  592. return false;
  593. if (edev->mode & EEH_DEV_REMOVED)
  594. return false;
  595. }
  596. return true;
  597. }
  598. #else
  599. static inline pnv_pci_cfg_check(struct pci_dn *pdn)
  600. {
  601. return true;
  602. }
  603. #endif /* CONFIG_EEH */
  604. static int pnv_pci_read_config(struct pci_bus *bus,
  605. unsigned int devfn,
  606. int where, int size, u32 *val)
  607. {
  608. struct pci_dn *pdn;
  609. struct pnv_phb *phb;
  610. int ret;
  611. *val = 0xFFFFFFFF;
  612. pdn = pci_get_pdn_by_devfn(bus, devfn);
  613. if (!pdn)
  614. return PCIBIOS_DEVICE_NOT_FOUND;
  615. if (!pnv_pci_cfg_check(pdn))
  616. return PCIBIOS_DEVICE_NOT_FOUND;
  617. ret = pnv_pci_cfg_read(pdn, where, size, val);
  618. phb = pdn->phb->private_data;
  619. if (phb->flags & PNV_PHB_FLAG_EEH && pdn->edev) {
  620. if (*val == EEH_IO_ERROR_VALUE(size) &&
  621. eeh_dev_check_failure(pdn->edev))
  622. return PCIBIOS_DEVICE_NOT_FOUND;
  623. } else {
  624. pnv_pci_config_check_eeh(pdn);
  625. }
  626. return ret;
  627. }
  628. static int pnv_pci_write_config(struct pci_bus *bus,
  629. unsigned int devfn,
  630. int where, int size, u32 val)
  631. {
  632. struct pci_dn *pdn;
  633. struct pnv_phb *phb;
  634. int ret;
  635. pdn = pci_get_pdn_by_devfn(bus, devfn);
  636. if (!pdn)
  637. return PCIBIOS_DEVICE_NOT_FOUND;
  638. if (!pnv_pci_cfg_check(pdn))
  639. return PCIBIOS_DEVICE_NOT_FOUND;
  640. ret = pnv_pci_cfg_write(pdn, where, size, val);
  641. phb = pdn->phb->private_data;
  642. if (!(phb->flags & PNV_PHB_FLAG_EEH))
  643. pnv_pci_config_check_eeh(pdn);
  644. return ret;
  645. }
  646. struct pci_ops pnv_pci_ops = {
  647. .read = pnv_pci_read_config,
  648. .write = pnv_pci_write_config,
  649. };
  650. struct iommu_table *pnv_pci_table_alloc(int nid)
  651. {
  652. struct iommu_table *tbl;
  653. tbl = kzalloc_node(sizeof(struct iommu_table), GFP_KERNEL, nid);
  654. if (!tbl)
  655. return NULL;
  656. INIT_LIST_HEAD_RCU(&tbl->it_group_list);
  657. kref_init(&tbl->it_kref);
  658. return tbl;
  659. }
  660. struct device_node *pnv_pci_get_phb_node(struct pci_dev *dev)
  661. {
  662. struct pci_controller *hose = pci_bus_to_host(dev->bus);
  663. return of_node_get(hose->dn);
  664. }
  665. EXPORT_SYMBOL(pnv_pci_get_phb_node);
  666. int pnv_pci_set_tunnel_bar(struct pci_dev *dev, u64 addr, int enable)
  667. {
  668. struct pnv_phb *phb = pci_bus_to_pnvhb(dev->bus);
  669. u64 tunnel_bar;
  670. __be64 val;
  671. int rc;
  672. if (!opal_check_token(OPAL_PCI_GET_PBCQ_TUNNEL_BAR))
  673. return -ENXIO;
  674. if (!opal_check_token(OPAL_PCI_SET_PBCQ_TUNNEL_BAR))
  675. return -ENXIO;
  676. mutex_lock(&tunnel_mutex);
  677. rc = opal_pci_get_pbcq_tunnel_bar(phb->opal_id, &val);
  678. if (rc != OPAL_SUCCESS) {
  679. rc = -EIO;
  680. goto out;
  681. }
  682. tunnel_bar = be64_to_cpu(val);
  683. if (enable) {
  684. /*
  685. * Only one device per PHB can use atomics.
  686. * Our policy is first-come, first-served.
  687. */
  688. if (tunnel_bar) {
  689. if (tunnel_bar != addr)
  690. rc = -EBUSY;
  691. else
  692. rc = 0; /* Setting same address twice is ok */
  693. goto out;
  694. }
  695. } else {
  696. /*
  697. * The device that owns atomics and wants to release
  698. * them must pass the same address with enable == 0.
  699. */
  700. if (tunnel_bar != addr) {
  701. rc = -EPERM;
  702. goto out;
  703. }
  704. addr = 0x0ULL;
  705. }
  706. rc = opal_pci_set_pbcq_tunnel_bar(phb->opal_id, addr);
  707. rc = opal_error_code(rc);
  708. out:
  709. mutex_unlock(&tunnel_mutex);
  710. return rc;
  711. }
  712. EXPORT_SYMBOL_GPL(pnv_pci_set_tunnel_bar);
  713. void pnv_pci_shutdown(void)
  714. {
  715. struct pci_controller *hose;
  716. list_for_each_entry(hose, &hose_list, list_node)
  717. if (hose->controller_ops.shutdown)
  718. hose->controller_ops.shutdown(hose);
  719. }
  720. /* Fixup wrong class code in p7ioc and p8 root complex */
  721. static void pnv_p7ioc_rc_quirk(struct pci_dev *dev)
  722. {
  723. dev->class = PCI_CLASS_BRIDGE_PCI_NORMAL;
  724. }
  725. DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_IBM, 0x3b9, pnv_p7ioc_rc_quirk);
  726. void __init pnv_pci_init(void)
  727. {
  728. struct device_node *np;
  729. pci_add_flags(PCI_CAN_SKIP_ISA_ALIGN);
  730. /* If we don't have OPAL, eg. in sim, just skip PCI probe */
  731. if (!firmware_has_feature(FW_FEATURE_OPAL))
  732. return;
  733. #ifdef CONFIG_PCIEPORTBUS
  734. /*
  735. * On PowerNV PCIe devices are (currently) managed in cooperation
  736. * with firmware. This isn't *strictly* required, but there's enough
  737. * assumptions baked into both firmware and the platform code that
  738. * it's unwise to allow the portbus services to be used.
  739. *
  740. * We need to fix this eventually, but for now set this flag to disable
  741. * the portbus driver. The AER service isn't required since that AER
  742. * events are handled via EEH. The pciehp hotplug driver can't work
  743. * without kernel changes (and portbus binding breaks pnv_php). The
  744. * other services also require some thinking about how we're going
  745. * to integrate them.
  746. */
  747. pcie_ports_disabled = true;
  748. #endif
  749. /* Look for IODA IO-Hubs. */
  750. for_each_compatible_node(np, NULL, "ibm,ioda-hub") {
  751. pnv_pci_init_ioda_hub(np);
  752. }
  753. /* Look for ioda2 built-in PHB3's */
  754. for_each_compatible_node(np, NULL, "ibm,ioda2-phb")
  755. pnv_pci_init_ioda2_phb(np);
  756. /* Look for ioda3 built-in PHB4's, we treat them as IODA2 */
  757. for_each_compatible_node(np, NULL, "ibm,ioda3-phb")
  758. pnv_pci_init_ioda2_phb(np);
  759. /* Look for NPU2 OpenCAPI PHBs */
  760. for_each_compatible_node(np, NULL, "ibm,ioda2-npu2-opencapi-phb")
  761. pnv_pci_init_npu2_opencapi_phb(np);
  762. /* Configure IOMMU DMA hooks */
  763. set_pci_dma_ops(&dma_iommu_ops);
  764. }
  765. static int pnv_tce_iommu_bus_notifier(struct notifier_block *nb,
  766. unsigned long action, void *data)
  767. {
  768. struct device *dev = data;
  769. switch (action) {
  770. case BUS_NOTIFY_DEL_DEVICE:
  771. iommu_del_device(dev);
  772. return 0;
  773. default:
  774. return 0;
  775. }
  776. }
  777. static struct notifier_block pnv_tce_iommu_bus_nb = {
  778. .notifier_call = pnv_tce_iommu_bus_notifier,
  779. };
  780. static int __init pnv_tce_iommu_bus_notifier_init(void)
  781. {
  782. bus_register_notifier(&pci_bus_type, &pnv_tce_iommu_bus_nb);
  783. return 0;
  784. }
  785. machine_subsys_initcall_sync(powernv, pnv_tce_iommu_bus_notifier_init);