efa_main.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678
  1. // SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause
  2. /*
  3. * Copyright 2018-2022 Amazon.com, Inc. or its affiliates. All rights reserved.
  4. */
  5. #include <linux/module.h>
  6. #include <linux/pci.h>
  7. #include <linux/utsname.h>
  8. #include <linux/version.h>
  9. #include <rdma/ib_user_verbs.h>
  10. #include "efa.h"
  11. #define PCI_DEV_ID_EFA0_VF 0xefa0
  12. #define PCI_DEV_ID_EFA1_VF 0xefa1
  13. #define PCI_DEV_ID_EFA2_VF 0xefa2
  14. static const struct pci_device_id efa_pci_tbl[] = {
  15. { PCI_VDEVICE(AMAZON, PCI_DEV_ID_EFA0_VF) },
  16. { PCI_VDEVICE(AMAZON, PCI_DEV_ID_EFA1_VF) },
  17. { PCI_VDEVICE(AMAZON, PCI_DEV_ID_EFA2_VF) },
  18. { }
  19. };
  20. MODULE_AUTHOR("Amazon.com, Inc. or its affiliates");
  21. MODULE_LICENSE("Dual BSD/GPL");
  22. MODULE_DESCRIPTION(DEVICE_NAME);
  23. MODULE_DEVICE_TABLE(pci, efa_pci_tbl);
  24. #define EFA_REG_BAR 0
  25. #define EFA_MEM_BAR 2
  26. #define EFA_BASE_BAR_MASK (BIT(EFA_REG_BAR) | BIT(EFA_MEM_BAR))
  27. #define EFA_AENQ_ENABLED_GROUPS \
  28. (BIT(EFA_ADMIN_FATAL_ERROR) | BIT(EFA_ADMIN_WARNING) | \
  29. BIT(EFA_ADMIN_NOTIFICATION) | BIT(EFA_ADMIN_KEEP_ALIVE))
  30. /* This handler will called for unknown event group or unimplemented handlers */
  31. static void unimplemented_aenq_handler(void *data,
  32. struct efa_admin_aenq_entry *aenq_e)
  33. {
  34. struct efa_dev *dev = (struct efa_dev *)data;
  35. ibdev_err(&dev->ibdev,
  36. "Unknown event was received or event with unimplemented handler\n");
  37. }
  38. static void efa_keep_alive(void *data, struct efa_admin_aenq_entry *aenq_e)
  39. {
  40. struct efa_dev *dev = (struct efa_dev *)data;
  41. atomic64_inc(&dev->stats.keep_alive_rcvd);
  42. }
  43. static struct efa_aenq_handlers aenq_handlers = {
  44. .handlers = {
  45. [EFA_ADMIN_KEEP_ALIVE] = efa_keep_alive,
  46. },
  47. .unimplemented_handler = unimplemented_aenq_handler
  48. };
  49. static void efa_release_bars(struct efa_dev *dev, int bars_mask)
  50. {
  51. struct pci_dev *pdev = dev->pdev;
  52. int release_bars;
  53. release_bars = pci_select_bars(pdev, IORESOURCE_MEM) & bars_mask;
  54. pci_release_selected_regions(pdev, release_bars);
  55. }
  56. static void efa_process_comp_eqe(struct efa_dev *dev, struct efa_admin_eqe *eqe)
  57. {
  58. u16 cqn = eqe->u.comp_event.cqn;
  59. struct efa_cq *cq;
  60. /* Safe to load as we're in irq and removal calls synchronize_irq() */
  61. cq = xa_load(&dev->cqs_xa, cqn);
  62. if (unlikely(!cq)) {
  63. ibdev_err_ratelimited(&dev->ibdev,
  64. "Completion event on non-existent CQ[%u]",
  65. cqn);
  66. return;
  67. }
  68. cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context);
  69. }
  70. static void efa_process_eqe(struct efa_com_eq *eeq, struct efa_admin_eqe *eqe)
  71. {
  72. struct efa_dev *dev = container_of(eeq->edev, struct efa_dev, edev);
  73. if (likely(EFA_GET(&eqe->common, EFA_ADMIN_EQE_EVENT_TYPE) ==
  74. EFA_ADMIN_EQE_EVENT_TYPE_COMPLETION))
  75. efa_process_comp_eqe(dev, eqe);
  76. else
  77. ibdev_err_ratelimited(&dev->ibdev,
  78. "Unknown event type received %lu",
  79. EFA_GET(&eqe->common,
  80. EFA_ADMIN_EQE_EVENT_TYPE));
  81. }
  82. static irqreturn_t efa_intr_msix_comp(int irq, void *data)
  83. {
  84. struct efa_eq *eq = data;
  85. struct efa_com_dev *edev = eq->eeq.edev;
  86. efa_com_eq_comp_intr_handler(edev, &eq->eeq);
  87. return IRQ_HANDLED;
  88. }
  89. static irqreturn_t efa_intr_msix_mgmnt(int irq, void *data)
  90. {
  91. struct efa_dev *dev = data;
  92. efa_com_admin_q_comp_intr_handler(&dev->edev);
  93. efa_com_aenq_intr_handler(&dev->edev, data);
  94. return IRQ_HANDLED;
  95. }
  96. static int efa_request_irq(struct efa_dev *dev, struct efa_irq *irq)
  97. {
  98. int err;
  99. err = request_irq(irq->irqn, irq->handler, 0, irq->name, irq->data);
  100. if (err) {
  101. dev_err(&dev->pdev->dev, "Failed to request irq %s (%d)\n",
  102. irq->name, err);
  103. return err;
  104. }
  105. irq_set_affinity_hint(irq->irqn, &irq->affinity_hint_mask);
  106. return 0;
  107. }
  108. static void efa_setup_comp_irq(struct efa_dev *dev, struct efa_eq *eq,
  109. int vector)
  110. {
  111. u32 cpu;
  112. cpu = vector - EFA_COMP_EQS_VEC_BASE;
  113. snprintf(eq->irq.name, EFA_IRQNAME_SIZE, "efa-comp%d@pci:%s", cpu,
  114. pci_name(dev->pdev));
  115. eq->irq.handler = efa_intr_msix_comp;
  116. eq->irq.data = eq;
  117. eq->irq.vector = vector;
  118. eq->irq.irqn = pci_irq_vector(dev->pdev, vector);
  119. cpumask_set_cpu(cpu, &eq->irq.affinity_hint_mask);
  120. }
  121. static void efa_free_irq(struct efa_dev *dev, struct efa_irq *irq)
  122. {
  123. irq_set_affinity_hint(irq->irqn, NULL);
  124. free_irq(irq->irqn, irq->data);
  125. }
  126. static void efa_setup_mgmnt_irq(struct efa_dev *dev)
  127. {
  128. u32 cpu;
  129. snprintf(dev->admin_irq.name, EFA_IRQNAME_SIZE,
  130. "efa-mgmnt@pci:%s", pci_name(dev->pdev));
  131. dev->admin_irq.handler = efa_intr_msix_mgmnt;
  132. dev->admin_irq.data = dev;
  133. dev->admin_irq.vector = dev->admin_msix_vector_idx;
  134. dev->admin_irq.irqn = pci_irq_vector(dev->pdev,
  135. dev->admin_msix_vector_idx);
  136. cpu = cpumask_first(cpu_online_mask);
  137. cpumask_set_cpu(cpu,
  138. &dev->admin_irq.affinity_hint_mask);
  139. dev_info(&dev->pdev->dev, "Setup irq:%d name:%s\n",
  140. dev->admin_irq.irqn,
  141. dev->admin_irq.name);
  142. }
  143. static int efa_set_mgmnt_irq(struct efa_dev *dev)
  144. {
  145. efa_setup_mgmnt_irq(dev);
  146. return efa_request_irq(dev, &dev->admin_irq);
  147. }
  148. static int efa_request_doorbell_bar(struct efa_dev *dev)
  149. {
  150. u8 db_bar_idx = dev->dev_attr.db_bar;
  151. struct pci_dev *pdev = dev->pdev;
  152. int bars;
  153. int err;
  154. if (!(BIT(db_bar_idx) & EFA_BASE_BAR_MASK)) {
  155. bars = pci_select_bars(pdev, IORESOURCE_MEM) & BIT(db_bar_idx);
  156. err = pci_request_selected_regions(pdev, bars, DRV_MODULE_NAME);
  157. if (err) {
  158. dev_err(&dev->pdev->dev,
  159. "pci_request_selected_regions for bar %d failed %d\n",
  160. db_bar_idx, err);
  161. return err;
  162. }
  163. }
  164. dev->db_bar_addr = pci_resource_start(dev->pdev, db_bar_idx);
  165. dev->db_bar_len = pci_resource_len(dev->pdev, db_bar_idx);
  166. return 0;
  167. }
  168. static void efa_release_doorbell_bar(struct efa_dev *dev)
  169. {
  170. if (!(BIT(dev->dev_attr.db_bar) & EFA_BASE_BAR_MASK))
  171. efa_release_bars(dev, BIT(dev->dev_attr.db_bar));
  172. }
  173. static void efa_update_hw_hints(struct efa_dev *dev,
  174. struct efa_com_get_hw_hints_result *hw_hints)
  175. {
  176. struct efa_com_dev *edev = &dev->edev;
  177. if (hw_hints->mmio_read_timeout)
  178. edev->mmio_read.mmio_read_timeout =
  179. hw_hints->mmio_read_timeout * 1000;
  180. if (hw_hints->poll_interval)
  181. edev->aq.poll_interval = hw_hints->poll_interval;
  182. if (hw_hints->admin_completion_timeout)
  183. edev->aq.completion_timeout =
  184. hw_hints->admin_completion_timeout;
  185. }
  186. static void efa_stats_init(struct efa_dev *dev)
  187. {
  188. atomic64_t *s = (atomic64_t *)&dev->stats;
  189. int i;
  190. for (i = 0; i < sizeof(dev->stats) / sizeof(*s); i++, s++)
  191. atomic64_set(s, 0);
  192. }
  193. static void efa_set_host_info(struct efa_dev *dev)
  194. {
  195. struct efa_admin_set_feature_resp resp = {};
  196. struct efa_admin_set_feature_cmd cmd = {};
  197. struct efa_admin_host_info *hinf;
  198. u32 bufsz = sizeof(*hinf);
  199. dma_addr_t hinf_dma;
  200. if (!efa_com_check_supported_feature_id(&dev->edev,
  201. EFA_ADMIN_HOST_INFO))
  202. return;
  203. /* Failures in host info set shall not disturb probe */
  204. hinf = dma_alloc_coherent(&dev->pdev->dev, bufsz, &hinf_dma,
  205. GFP_KERNEL);
  206. if (!hinf)
  207. return;
  208. strscpy(hinf->os_dist_str, utsname()->release,
  209. sizeof(hinf->os_dist_str));
  210. hinf->os_type = EFA_ADMIN_OS_LINUX;
  211. strscpy(hinf->kernel_ver_str, utsname()->version,
  212. sizeof(hinf->kernel_ver_str));
  213. hinf->kernel_ver = LINUX_VERSION_CODE;
  214. EFA_SET(&hinf->driver_ver, EFA_ADMIN_HOST_INFO_DRIVER_MAJOR, 0);
  215. EFA_SET(&hinf->driver_ver, EFA_ADMIN_HOST_INFO_DRIVER_MINOR, 0);
  216. EFA_SET(&hinf->driver_ver, EFA_ADMIN_HOST_INFO_DRIVER_SUB_MINOR, 0);
  217. EFA_SET(&hinf->driver_ver, EFA_ADMIN_HOST_INFO_DRIVER_MODULE_TYPE, 0);
  218. EFA_SET(&hinf->bdf, EFA_ADMIN_HOST_INFO_BUS, dev->pdev->bus->number);
  219. EFA_SET(&hinf->bdf, EFA_ADMIN_HOST_INFO_DEVICE,
  220. PCI_SLOT(dev->pdev->devfn));
  221. EFA_SET(&hinf->bdf, EFA_ADMIN_HOST_INFO_FUNCTION,
  222. PCI_FUNC(dev->pdev->devfn));
  223. EFA_SET(&hinf->spec_ver, EFA_ADMIN_HOST_INFO_SPEC_MAJOR,
  224. EFA_COMMON_SPEC_VERSION_MAJOR);
  225. EFA_SET(&hinf->spec_ver, EFA_ADMIN_HOST_INFO_SPEC_MINOR,
  226. EFA_COMMON_SPEC_VERSION_MINOR);
  227. EFA_SET(&hinf->flags, EFA_ADMIN_HOST_INFO_INTREE, 1);
  228. EFA_SET(&hinf->flags, EFA_ADMIN_HOST_INFO_GDR, 0);
  229. efa_com_set_feature_ex(&dev->edev, &resp, &cmd, EFA_ADMIN_HOST_INFO,
  230. hinf_dma, bufsz);
  231. dma_free_coherent(&dev->pdev->dev, bufsz, hinf, hinf_dma);
  232. }
  233. static void efa_destroy_eq(struct efa_dev *dev, struct efa_eq *eq)
  234. {
  235. efa_com_eq_destroy(&dev->edev, &eq->eeq);
  236. efa_free_irq(dev, &eq->irq);
  237. }
  238. static int efa_create_eq(struct efa_dev *dev, struct efa_eq *eq, u8 msix_vec)
  239. {
  240. int err;
  241. efa_setup_comp_irq(dev, eq, msix_vec);
  242. err = efa_request_irq(dev, &eq->irq);
  243. if (err)
  244. return err;
  245. err = efa_com_eq_init(&dev->edev, &eq->eeq, efa_process_eqe,
  246. dev->dev_attr.max_eq_depth, msix_vec);
  247. if (err)
  248. goto err_free_comp_irq;
  249. return 0;
  250. err_free_comp_irq:
  251. efa_free_irq(dev, &eq->irq);
  252. return err;
  253. }
  254. static int efa_create_eqs(struct efa_dev *dev)
  255. {
  256. unsigned int neqs = dev->dev_attr.max_eq;
  257. int err;
  258. int i;
  259. neqs = min_t(unsigned int, neqs, num_online_cpus());
  260. dev->neqs = neqs;
  261. dev->eqs = kcalloc(neqs, sizeof(*dev->eqs), GFP_KERNEL);
  262. if (!dev->eqs)
  263. return -ENOMEM;
  264. for (i = 0; i < neqs; i++) {
  265. err = efa_create_eq(dev, &dev->eqs[i],
  266. i + EFA_COMP_EQS_VEC_BASE);
  267. if (err)
  268. goto err_destroy_eqs;
  269. }
  270. return 0;
  271. err_destroy_eqs:
  272. for (i--; i >= 0; i--)
  273. efa_destroy_eq(dev, &dev->eqs[i]);
  274. kfree(dev->eqs);
  275. return err;
  276. }
  277. static void efa_destroy_eqs(struct efa_dev *dev)
  278. {
  279. int i;
  280. for (i = 0; i < dev->neqs; i++)
  281. efa_destroy_eq(dev, &dev->eqs[i]);
  282. kfree(dev->eqs);
  283. }
  284. static const struct ib_device_ops efa_dev_ops = {
  285. .owner = THIS_MODULE,
  286. .driver_id = RDMA_DRIVER_EFA,
  287. .uverbs_abi_ver = EFA_UVERBS_ABI_VERSION,
  288. .alloc_hw_port_stats = efa_alloc_hw_port_stats,
  289. .alloc_hw_device_stats = efa_alloc_hw_device_stats,
  290. .alloc_pd = efa_alloc_pd,
  291. .alloc_ucontext = efa_alloc_ucontext,
  292. .create_cq = efa_create_cq,
  293. .create_qp = efa_create_qp,
  294. .create_user_ah = efa_create_ah,
  295. .dealloc_pd = efa_dealloc_pd,
  296. .dealloc_ucontext = efa_dealloc_ucontext,
  297. .dereg_mr = efa_dereg_mr,
  298. .destroy_ah = efa_destroy_ah,
  299. .destroy_cq = efa_destroy_cq,
  300. .destroy_qp = efa_destroy_qp,
  301. .get_hw_stats = efa_get_hw_stats,
  302. .get_link_layer = efa_port_link_layer,
  303. .get_port_immutable = efa_get_port_immutable,
  304. .mmap = efa_mmap,
  305. .mmap_free = efa_mmap_free,
  306. .modify_qp = efa_modify_qp,
  307. .query_device = efa_query_device,
  308. .query_gid = efa_query_gid,
  309. .query_pkey = efa_query_pkey,
  310. .query_port = efa_query_port,
  311. .query_qp = efa_query_qp,
  312. .reg_user_mr = efa_reg_mr,
  313. .reg_user_mr_dmabuf = efa_reg_user_mr_dmabuf,
  314. INIT_RDMA_OBJ_SIZE(ib_ah, efa_ah, ibah),
  315. INIT_RDMA_OBJ_SIZE(ib_cq, efa_cq, ibcq),
  316. INIT_RDMA_OBJ_SIZE(ib_pd, efa_pd, ibpd),
  317. INIT_RDMA_OBJ_SIZE(ib_qp, efa_qp, ibqp),
  318. INIT_RDMA_OBJ_SIZE(ib_ucontext, efa_ucontext, ibucontext),
  319. };
  320. static int efa_ib_device_add(struct efa_dev *dev)
  321. {
  322. struct efa_com_get_hw_hints_result hw_hints;
  323. struct pci_dev *pdev = dev->pdev;
  324. int err;
  325. efa_stats_init(dev);
  326. err = efa_com_get_device_attr(&dev->edev, &dev->dev_attr);
  327. if (err)
  328. return err;
  329. dev_dbg(&dev->pdev->dev, "Doorbells bar (%d)\n", dev->dev_attr.db_bar);
  330. err = efa_request_doorbell_bar(dev);
  331. if (err)
  332. return err;
  333. err = efa_com_get_hw_hints(&dev->edev, &hw_hints);
  334. if (err)
  335. goto err_release_doorbell_bar;
  336. efa_update_hw_hints(dev, &hw_hints);
  337. /* Try to enable all the available aenq groups */
  338. err = efa_com_set_aenq_config(&dev->edev, EFA_AENQ_ENABLED_GROUPS);
  339. if (err)
  340. goto err_release_doorbell_bar;
  341. err = efa_create_eqs(dev);
  342. if (err)
  343. goto err_release_doorbell_bar;
  344. efa_set_host_info(dev);
  345. dev->ibdev.node_type = RDMA_NODE_UNSPECIFIED;
  346. dev->ibdev.phys_port_cnt = 1;
  347. dev->ibdev.num_comp_vectors = dev->neqs ?: 1;
  348. dev->ibdev.dev.parent = &pdev->dev;
  349. ib_set_device_ops(&dev->ibdev, &efa_dev_ops);
  350. err = ib_register_device(&dev->ibdev, "efa_%d", &pdev->dev);
  351. if (err)
  352. goto err_destroy_eqs;
  353. ibdev_info(&dev->ibdev, "IB device registered\n");
  354. return 0;
  355. err_destroy_eqs:
  356. efa_destroy_eqs(dev);
  357. err_release_doorbell_bar:
  358. efa_release_doorbell_bar(dev);
  359. return err;
  360. }
  361. static void efa_ib_device_remove(struct efa_dev *dev)
  362. {
  363. ibdev_info(&dev->ibdev, "Unregister ib device\n");
  364. ib_unregister_device(&dev->ibdev);
  365. efa_destroy_eqs(dev);
  366. efa_com_dev_reset(&dev->edev, EFA_REGS_RESET_NORMAL);
  367. efa_release_doorbell_bar(dev);
  368. }
  369. static void efa_disable_msix(struct efa_dev *dev)
  370. {
  371. pci_free_irq_vectors(dev->pdev);
  372. }
  373. static int efa_enable_msix(struct efa_dev *dev)
  374. {
  375. int msix_vecs, irq_num;
  376. /*
  377. * Reserve the max msix vectors we might need, one vector is reserved
  378. * for admin.
  379. */
  380. msix_vecs = min_t(int, pci_msix_vec_count(dev->pdev),
  381. num_online_cpus() + 1);
  382. dev_dbg(&dev->pdev->dev, "Trying to enable MSI-X, vectors %d\n",
  383. msix_vecs);
  384. dev->admin_msix_vector_idx = EFA_MGMNT_MSIX_VEC_IDX;
  385. irq_num = pci_alloc_irq_vectors(dev->pdev, msix_vecs,
  386. msix_vecs, PCI_IRQ_MSIX);
  387. if (irq_num < 0) {
  388. dev_err(&dev->pdev->dev, "Failed to enable MSI-X. irq_num %d\n",
  389. irq_num);
  390. return -ENOSPC;
  391. }
  392. if (irq_num != msix_vecs) {
  393. efa_disable_msix(dev);
  394. dev_err(&dev->pdev->dev,
  395. "Allocated %d MSI-X (out of %d requested)\n",
  396. irq_num, msix_vecs);
  397. return -ENOSPC;
  398. }
  399. return 0;
  400. }
  401. static int efa_device_init(struct efa_com_dev *edev, struct pci_dev *pdev)
  402. {
  403. int dma_width;
  404. int err;
  405. err = efa_com_dev_reset(edev, EFA_REGS_RESET_NORMAL);
  406. if (err)
  407. return err;
  408. err = efa_com_validate_version(edev);
  409. if (err)
  410. return err;
  411. dma_width = efa_com_get_dma_width(edev);
  412. if (dma_width < 0) {
  413. err = dma_width;
  414. return err;
  415. }
  416. err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(dma_width));
  417. if (err) {
  418. dev_err(&pdev->dev, "dma_set_mask_and_coherent failed %d\n", err);
  419. return err;
  420. }
  421. dma_set_max_seg_size(&pdev->dev, UINT_MAX);
  422. return 0;
  423. }
  424. static struct efa_dev *efa_probe_device(struct pci_dev *pdev)
  425. {
  426. struct efa_com_dev *edev;
  427. struct efa_dev *dev;
  428. int bars;
  429. int err;
  430. err = pci_enable_device_mem(pdev);
  431. if (err) {
  432. dev_err(&pdev->dev, "pci_enable_device_mem() failed!\n");
  433. return ERR_PTR(err);
  434. }
  435. pci_set_master(pdev);
  436. dev = ib_alloc_device(efa_dev, ibdev);
  437. if (!dev) {
  438. dev_err(&pdev->dev, "Device alloc failed\n");
  439. err = -ENOMEM;
  440. goto err_disable_device;
  441. }
  442. pci_set_drvdata(pdev, dev);
  443. edev = &dev->edev;
  444. edev->efa_dev = dev;
  445. edev->dmadev = &pdev->dev;
  446. dev->pdev = pdev;
  447. xa_init(&dev->cqs_xa);
  448. bars = pci_select_bars(pdev, IORESOURCE_MEM) & EFA_BASE_BAR_MASK;
  449. err = pci_request_selected_regions(pdev, bars, DRV_MODULE_NAME);
  450. if (err) {
  451. dev_err(&pdev->dev, "pci_request_selected_regions failed %d\n",
  452. err);
  453. goto err_ibdev_destroy;
  454. }
  455. dev->reg_bar_addr = pci_resource_start(pdev, EFA_REG_BAR);
  456. dev->reg_bar_len = pci_resource_len(pdev, EFA_REG_BAR);
  457. dev->mem_bar_addr = pci_resource_start(pdev, EFA_MEM_BAR);
  458. dev->mem_bar_len = pci_resource_len(pdev, EFA_MEM_BAR);
  459. edev->reg_bar = devm_ioremap(&pdev->dev,
  460. dev->reg_bar_addr,
  461. dev->reg_bar_len);
  462. if (!edev->reg_bar) {
  463. dev_err(&pdev->dev, "Failed to remap register bar\n");
  464. err = -EFAULT;
  465. goto err_release_bars;
  466. }
  467. err = efa_com_mmio_reg_read_init(edev);
  468. if (err) {
  469. dev_err(&pdev->dev, "Failed to init readless MMIO\n");
  470. goto err_iounmap;
  471. }
  472. err = efa_device_init(edev, pdev);
  473. if (err) {
  474. dev_err(&pdev->dev, "EFA device init failed\n");
  475. if (err == -ETIME)
  476. err = -EPROBE_DEFER;
  477. goto err_reg_read_destroy;
  478. }
  479. err = efa_enable_msix(dev);
  480. if (err)
  481. goto err_reg_read_destroy;
  482. edev->aq.msix_vector_idx = dev->admin_msix_vector_idx;
  483. edev->aenq.msix_vector_idx = dev->admin_msix_vector_idx;
  484. err = efa_set_mgmnt_irq(dev);
  485. if (err)
  486. goto err_disable_msix;
  487. err = efa_com_admin_init(edev, &aenq_handlers);
  488. if (err)
  489. goto err_free_mgmnt_irq;
  490. return dev;
  491. err_free_mgmnt_irq:
  492. efa_free_irq(dev, &dev->admin_irq);
  493. err_disable_msix:
  494. efa_disable_msix(dev);
  495. err_reg_read_destroy:
  496. efa_com_mmio_reg_read_destroy(edev);
  497. err_iounmap:
  498. devm_iounmap(&pdev->dev, edev->reg_bar);
  499. err_release_bars:
  500. efa_release_bars(dev, EFA_BASE_BAR_MASK);
  501. err_ibdev_destroy:
  502. ib_dealloc_device(&dev->ibdev);
  503. err_disable_device:
  504. pci_disable_device(pdev);
  505. return ERR_PTR(err);
  506. }
  507. static void efa_remove_device(struct pci_dev *pdev)
  508. {
  509. struct efa_dev *dev = pci_get_drvdata(pdev);
  510. struct efa_com_dev *edev;
  511. edev = &dev->edev;
  512. efa_com_admin_destroy(edev);
  513. efa_free_irq(dev, &dev->admin_irq);
  514. efa_disable_msix(dev);
  515. efa_com_mmio_reg_read_destroy(edev);
  516. devm_iounmap(&pdev->dev, edev->reg_bar);
  517. efa_release_bars(dev, EFA_BASE_BAR_MASK);
  518. xa_destroy(&dev->cqs_xa);
  519. ib_dealloc_device(&dev->ibdev);
  520. pci_disable_device(pdev);
  521. }
  522. static int efa_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
  523. {
  524. struct efa_dev *dev;
  525. int err;
  526. dev = efa_probe_device(pdev);
  527. if (IS_ERR(dev))
  528. return PTR_ERR(dev);
  529. err = efa_ib_device_add(dev);
  530. if (err)
  531. goto err_remove_device;
  532. return 0;
  533. err_remove_device:
  534. efa_remove_device(pdev);
  535. return err;
  536. }
  537. static void efa_remove(struct pci_dev *pdev)
  538. {
  539. struct efa_dev *dev = pci_get_drvdata(pdev);
  540. efa_ib_device_remove(dev);
  541. efa_remove_device(pdev);
  542. }
  543. static struct pci_driver efa_pci_driver = {
  544. .name = DRV_MODULE_NAME,
  545. .id_table = efa_pci_tbl,
  546. .probe = efa_probe,
  547. .remove = efa_remove,
  548. };
  549. module_pci_driver(efa_pci_driver);