efct_driver.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright (C) 2021 Broadcom. All Rights Reserved. The term
  4. * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
  5. */
  6. #include "efct_driver.h"
  7. #include "efct_hw.h"
  8. #include "efct_unsol.h"
  9. #include "efct_scsi.h"
  10. LIST_HEAD(efct_devices);
  11. static int logmask;
  12. module_param(logmask, int, 0444);
  13. MODULE_PARM_DESC(logmask, "logging bitmask (default 0)");
  14. static struct libefc_function_template efct_libefc_templ = {
  15. .issue_mbox_rqst = efct_issue_mbox_rqst,
  16. .send_els = efct_els_hw_srrs_send,
  17. .send_bls = efct_efc_bls_send,
  18. .new_nport = efct_scsi_tgt_new_nport,
  19. .del_nport = efct_scsi_tgt_del_nport,
  20. .scsi_new_node = efct_scsi_new_initiator,
  21. .scsi_del_node = efct_scsi_del_initiator,
  22. .hw_seq_free = efct_efc_hw_sequence_free,
  23. };
  24. static int
  25. efct_device_init(void)
  26. {
  27. int rc;
  28. /* driver-wide init for target-server */
  29. rc = efct_scsi_tgt_driver_init();
  30. if (rc) {
  31. pr_err("efct_scsi_tgt_init failed rc=%d\n", rc);
  32. return rc;
  33. }
  34. rc = efct_scsi_reg_fc_transport();
  35. if (rc) {
  36. efct_scsi_tgt_driver_exit();
  37. pr_err("failed to register to FC host\n");
  38. return rc;
  39. }
  40. return 0;
  41. }
  42. static void
  43. efct_device_shutdown(void)
  44. {
  45. efct_scsi_release_fc_transport();
  46. efct_scsi_tgt_driver_exit();
  47. }
  48. static void *
  49. efct_device_alloc(u32 nid)
  50. {
  51. struct efct *efct = NULL;
  52. efct = kzalloc_node(sizeof(*efct), GFP_KERNEL, nid);
  53. if (!efct)
  54. return efct;
  55. INIT_LIST_HEAD(&efct->list_entry);
  56. list_add_tail(&efct->list_entry, &efct_devices);
  57. return efct;
  58. }
  59. static void
  60. efct_teardown_msix(struct efct *efct)
  61. {
  62. u32 i;
  63. for (i = 0; i < efct->n_msix_vec; i++) {
  64. free_irq(pci_irq_vector(efct->pci, i),
  65. &efct->intr_context[i]);
  66. }
  67. pci_free_irq_vectors(efct->pci);
  68. }
  69. static int
  70. efct_efclib_config(struct efct *efct, struct libefc_function_template *tt)
  71. {
  72. struct efc *efc;
  73. struct sli4 *sli;
  74. int rc = 0;
  75. efc = kzalloc(sizeof(*efc), GFP_KERNEL);
  76. if (!efc)
  77. return -ENOMEM;
  78. efct->efcport = efc;
  79. memcpy(&efc->tt, tt, sizeof(*tt));
  80. efc->base = efct;
  81. efc->pci = efct->pci;
  82. efc->def_wwnn = efct_get_wwnn(&efct->hw);
  83. efc->def_wwpn = efct_get_wwpn(&efct->hw);
  84. efc->enable_tgt = 1;
  85. efc->log_level = EFC_LOG_LIB;
  86. sli = &efct->hw.sli;
  87. efc->max_xfer_size = sli->sge_supported_length *
  88. sli_get_max_sgl(&efct->hw.sli);
  89. efc->sli = sli;
  90. efc->fcfi = efct->hw.fcf_indicator;
  91. rc = efcport_init(efc);
  92. if (rc)
  93. efc_log_err(efc, "efcport_init failed\n");
  94. return rc;
  95. }
  96. static int efct_request_firmware_update(struct efct *efct);
  97. static const char*
  98. efct_pci_model(u16 device)
  99. {
  100. switch (device) {
  101. case EFCT_DEVICE_LANCER_G6: return "LPE31004";
  102. case EFCT_DEVICE_LANCER_G7: return "LPE36000";
  103. default: return "unknown";
  104. }
  105. }
  106. static int
  107. efct_device_attach(struct efct *efct)
  108. {
  109. u32 rc = 0, i = 0;
  110. if (efct->attached) {
  111. efc_log_err(efct, "Device is already attached\n");
  112. return -EIO;
  113. }
  114. snprintf(efct->name, sizeof(efct->name), "[%s%d] ", "fc",
  115. efct->instance_index);
  116. efct->logmask = logmask;
  117. efct->filter_def = EFCT_DEFAULT_FILTER;
  118. efct->max_isr_time_msec = EFCT_OS_MAX_ISR_TIME_MSEC;
  119. efct->model = efct_pci_model(efct->pci->device);
  120. efct->efct_req_fw_upgrade = true;
  121. /* Allocate transport object and bring online */
  122. efct->xport = efct_xport_alloc(efct);
  123. if (!efct->xport) {
  124. efc_log_err(efct, "failed to allocate transport object\n");
  125. rc = -ENOMEM;
  126. goto out;
  127. }
  128. rc = efct_xport_attach(efct->xport);
  129. if (rc) {
  130. efc_log_err(efct, "failed to attach transport object\n");
  131. goto xport_out;
  132. }
  133. rc = efct_xport_initialize(efct->xport);
  134. if (rc) {
  135. efc_log_err(efct, "failed to initialize transport object\n");
  136. goto xport_out;
  137. }
  138. rc = efct_efclib_config(efct, &efct_libefc_templ);
  139. if (rc) {
  140. efc_log_err(efct, "failed to init efclib\n");
  141. goto efclib_out;
  142. }
  143. for (i = 0; i < efct->n_msix_vec; i++) {
  144. efc_log_debug(efct, "irq %d enabled\n", i);
  145. enable_irq(pci_irq_vector(efct->pci, i));
  146. }
  147. efct->attached = true;
  148. if (efct->efct_req_fw_upgrade)
  149. efct_request_firmware_update(efct);
  150. return rc;
  151. efclib_out:
  152. efct_xport_detach(efct->xport);
  153. xport_out:
  154. efct_xport_free(efct->xport);
  155. efct->xport = NULL;
  156. out:
  157. return rc;
  158. }
  159. static int
  160. efct_device_detach(struct efct *efct)
  161. {
  162. int i;
  163. if (!efct || !efct->attached) {
  164. pr_err("Device is not attached\n");
  165. return -EIO;
  166. }
  167. if (efct_xport_control(efct->xport, EFCT_XPORT_SHUTDOWN))
  168. efc_log_err(efct, "Transport Shutdown timed out\n");
  169. for (i = 0; i < efct->n_msix_vec; i++)
  170. disable_irq(pci_irq_vector(efct->pci, i));
  171. efct_xport_detach(efct->xport);
  172. efct_xport_free(efct->xport);
  173. efct->xport = NULL;
  174. efcport_destroy(efct->efcport);
  175. kfree(efct->efcport);
  176. efct->attached = false;
  177. return 0;
  178. }
  179. static void
  180. efct_fw_write_cb(int status, u32 actual_write_length,
  181. u32 change_status, void *arg)
  182. {
  183. struct efct_fw_write_result *result = arg;
  184. result->status = status;
  185. result->actual_xfer = actual_write_length;
  186. result->change_status = change_status;
  187. complete(&result->done);
  188. }
  189. static int
  190. efct_firmware_write(struct efct *efct, const u8 *buf, size_t buf_len,
  191. u8 *change_status)
  192. {
  193. int rc = 0;
  194. u32 bytes_left;
  195. u32 xfer_size;
  196. u32 offset;
  197. struct efc_dma dma;
  198. int last = 0;
  199. struct efct_fw_write_result result;
  200. init_completion(&result.done);
  201. bytes_left = buf_len;
  202. offset = 0;
  203. dma.size = FW_WRITE_BUFSIZE;
  204. dma.virt = dma_alloc_coherent(&efct->pci->dev,
  205. dma.size, &dma.phys, GFP_KERNEL);
  206. if (!dma.virt)
  207. return -ENOMEM;
  208. while (bytes_left > 0) {
  209. if (bytes_left > FW_WRITE_BUFSIZE)
  210. xfer_size = FW_WRITE_BUFSIZE;
  211. else
  212. xfer_size = bytes_left;
  213. memcpy(dma.virt, buf + offset, xfer_size);
  214. if (bytes_left == xfer_size)
  215. last = 1;
  216. efct_hw_firmware_write(&efct->hw, &dma, xfer_size, offset,
  217. last, efct_fw_write_cb, &result);
  218. if (wait_for_completion_interruptible(&result.done) != 0) {
  219. rc = -ENXIO;
  220. break;
  221. }
  222. if (result.actual_xfer == 0 || result.status != 0) {
  223. rc = -EFAULT;
  224. break;
  225. }
  226. if (last)
  227. *change_status = result.change_status;
  228. bytes_left -= result.actual_xfer;
  229. offset += result.actual_xfer;
  230. }
  231. dma_free_coherent(&efct->pci->dev, dma.size, dma.virt, dma.phys);
  232. return rc;
  233. }
  234. static int
  235. efct_fw_reset(struct efct *efct)
  236. {
  237. /*
  238. * Firmware reset to activate the new firmware.
  239. * Function 0 will update and load the new firmware
  240. * during attach.
  241. */
  242. if (timer_pending(&efct->xport->stats_timer))
  243. del_timer(&efct->xport->stats_timer);
  244. if (efct_hw_reset(&efct->hw, EFCT_HW_RESET_FIRMWARE)) {
  245. efc_log_info(efct, "failed to reset firmware\n");
  246. return -EIO;
  247. }
  248. efc_log_info(efct, "successfully reset firmware.Now resetting port\n");
  249. efct_device_detach(efct);
  250. return efct_device_attach(efct);
  251. }
  252. static int
  253. efct_request_firmware_update(struct efct *efct)
  254. {
  255. int rc = 0;
  256. u8 file_name[256], fw_change_status = 0;
  257. const struct firmware *fw;
  258. struct efct_hw_grp_hdr *fw_image;
  259. snprintf(file_name, 256, "%s.grp", efct->model);
  260. rc = request_firmware(&fw, file_name, &efct->pci->dev);
  261. if (rc) {
  262. efc_log_debug(efct, "Firmware file(%s) not found.\n", file_name);
  263. return rc;
  264. }
  265. fw_image = (struct efct_hw_grp_hdr *)fw->data;
  266. if (!strncmp(efct->hw.sli.fw_name[0], fw_image->revision,
  267. strnlen(fw_image->revision, 16))) {
  268. efc_log_debug(efct,
  269. "Skip update. Firmware is already up to date.\n");
  270. goto exit;
  271. }
  272. efc_log_info(efct, "Firmware update is initiated. %s -> %s\n",
  273. efct->hw.sli.fw_name[0], fw_image->revision);
  274. rc = efct_firmware_write(efct, fw->data, fw->size, &fw_change_status);
  275. if (rc) {
  276. efc_log_err(efct, "Firmware update failed. rc = %d\n", rc);
  277. goto exit;
  278. }
  279. efc_log_info(efct, "Firmware updated successfully\n");
  280. switch (fw_change_status) {
  281. case 0x00:
  282. efc_log_info(efct, "New firmware is active.\n");
  283. break;
  284. case 0x01:
  285. efc_log_info(efct,
  286. "System reboot needed to activate the new firmware\n");
  287. break;
  288. case 0x02:
  289. case 0x03:
  290. efc_log_info(efct,
  291. "firmware reset to activate the new firmware\n");
  292. efct_fw_reset(efct);
  293. break;
  294. default:
  295. efc_log_info(efct, "Unexpected value change_status:%d\n",
  296. fw_change_status);
  297. break;
  298. }
  299. exit:
  300. release_firmware(fw);
  301. return rc;
  302. }
  303. static void
  304. efct_device_free(struct efct *efct)
  305. {
  306. if (efct) {
  307. list_del(&efct->list_entry);
  308. kfree(efct);
  309. }
  310. }
  311. static int
  312. efct_device_interrupts_required(struct efct *efct)
  313. {
  314. int rc;
  315. rc = efct_hw_setup(&efct->hw, efct, efct->pci);
  316. if (rc < 0)
  317. return rc;
  318. return efct->hw.config.n_eq;
  319. }
  320. static irqreturn_t
  321. efct_intr_thread(int irq, void *handle)
  322. {
  323. struct efct_intr_context *intr_ctx = handle;
  324. struct efct *efct = intr_ctx->efct;
  325. efct_hw_process(&efct->hw, intr_ctx->index, efct->max_isr_time_msec);
  326. return IRQ_HANDLED;
  327. }
  328. static irqreturn_t
  329. efct_intr_msix(int irq, void *handle)
  330. {
  331. return IRQ_WAKE_THREAD;
  332. }
  333. static int
  334. efct_setup_msix(struct efct *efct, u32 num_intrs)
  335. {
  336. int rc = 0, i;
  337. if (!pci_find_capability(efct->pci, PCI_CAP_ID_MSIX)) {
  338. dev_err(&efct->pci->dev,
  339. "%s : MSI-X not available\n", __func__);
  340. return -EIO;
  341. }
  342. efct->n_msix_vec = num_intrs;
  343. rc = pci_alloc_irq_vectors(efct->pci, num_intrs, num_intrs,
  344. PCI_IRQ_MSIX | PCI_IRQ_AFFINITY);
  345. if (rc < 0) {
  346. dev_err(&efct->pci->dev, "Failed to alloc irq : %d\n", rc);
  347. return rc;
  348. }
  349. for (i = 0; i < num_intrs; i++) {
  350. struct efct_intr_context *intr_ctx = NULL;
  351. intr_ctx = &efct->intr_context[i];
  352. intr_ctx->efct = efct;
  353. intr_ctx->index = i;
  354. rc = request_threaded_irq(pci_irq_vector(efct->pci, i),
  355. efct_intr_msix, efct_intr_thread, 0,
  356. EFCT_DRIVER_NAME, intr_ctx);
  357. if (rc) {
  358. dev_err(&efct->pci->dev,
  359. "Failed to register %d vector: %d\n", i, rc);
  360. goto out;
  361. }
  362. }
  363. return rc;
  364. out:
  365. while (--i >= 0)
  366. free_irq(pci_irq_vector(efct->pci, i),
  367. &efct->intr_context[i]);
  368. pci_free_irq_vectors(efct->pci);
  369. return rc;
  370. }
  371. static struct pci_device_id efct_pci_table[] = {
  372. {PCI_DEVICE(EFCT_VENDOR_ID, EFCT_DEVICE_LANCER_G6), 0},
  373. {PCI_DEVICE(EFCT_VENDOR_ID, EFCT_DEVICE_LANCER_G7), 0},
  374. {} /* terminate list */
  375. };
  376. static int
  377. efct_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
  378. {
  379. struct efct *efct = NULL;
  380. int rc;
  381. u32 i, r;
  382. int num_interrupts = 0;
  383. int nid;
  384. dev_info(&pdev->dev, "%s\n", EFCT_DRIVER_NAME);
  385. rc = pci_enable_device_mem(pdev);
  386. if (rc)
  387. return rc;
  388. pci_set_master(pdev);
  389. rc = pci_set_mwi(pdev);
  390. if (rc) {
  391. dev_info(&pdev->dev, "pci_set_mwi returned %d\n", rc);
  392. goto mwi_out;
  393. }
  394. rc = pci_request_regions(pdev, EFCT_DRIVER_NAME);
  395. if (rc) {
  396. dev_err(&pdev->dev, "pci_request_regions failed %d\n", rc);
  397. goto req_regions_out;
  398. }
  399. /* Fetch the Numa node id for this device */
  400. nid = dev_to_node(&pdev->dev);
  401. if (nid < 0) {
  402. dev_err(&pdev->dev, "Warning Numa node ID is %d\n", nid);
  403. nid = 0;
  404. }
  405. /* Allocate efct */
  406. efct = efct_device_alloc(nid);
  407. if (!efct) {
  408. dev_err(&pdev->dev, "Failed to allocate efct\n");
  409. rc = -ENOMEM;
  410. goto alloc_out;
  411. }
  412. efct->pci = pdev;
  413. efct->numa_node = nid;
  414. /* Map all memory BARs */
  415. for (i = 0, r = 0; i < EFCT_PCI_MAX_REGS; i++) {
  416. if (pci_resource_flags(pdev, i) & IORESOURCE_MEM) {
  417. efct->reg[r] = ioremap(pci_resource_start(pdev, i),
  418. pci_resource_len(pdev, i));
  419. r++;
  420. }
  421. /*
  422. * If the 64-bit attribute is set, both this BAR and the
  423. * next form the complete address. Skip processing the
  424. * next BAR.
  425. */
  426. if (pci_resource_flags(pdev, i) & IORESOURCE_MEM_64)
  427. i++;
  428. }
  429. pci_set_drvdata(pdev, efct);
  430. rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
  431. if (rc) {
  432. dev_err(&pdev->dev, "setting DMA_BIT_MASK failed\n");
  433. goto dma_mask_out;
  434. }
  435. num_interrupts = efct_device_interrupts_required(efct);
  436. if (num_interrupts < 0) {
  437. efc_log_err(efct, "efct_device_interrupts_required failed\n");
  438. rc = -1;
  439. goto dma_mask_out;
  440. }
  441. /*
  442. * Initialize MSIX interrupts, note,
  443. * efct_setup_msix() enables the interrupt
  444. */
  445. rc = efct_setup_msix(efct, num_interrupts);
  446. if (rc) {
  447. dev_err(&pdev->dev, "Can't setup msix\n");
  448. goto dma_mask_out;
  449. }
  450. /* Disable interrupt for now */
  451. for (i = 0; i < efct->n_msix_vec; i++) {
  452. efc_log_debug(efct, "irq %d disabled\n", i);
  453. disable_irq(pci_irq_vector(efct->pci, i));
  454. }
  455. rc = efct_device_attach(efct);
  456. if (rc)
  457. goto attach_out;
  458. return 0;
  459. attach_out:
  460. efct_teardown_msix(efct);
  461. dma_mask_out:
  462. pci_set_drvdata(pdev, NULL);
  463. for (i = 0; i < EFCT_PCI_MAX_REGS; i++) {
  464. if (efct->reg[i])
  465. iounmap(efct->reg[i]);
  466. }
  467. efct_device_free(efct);
  468. alloc_out:
  469. pci_release_regions(pdev);
  470. req_regions_out:
  471. pci_clear_mwi(pdev);
  472. mwi_out:
  473. pci_disable_device(pdev);
  474. return rc;
  475. }
  476. static void
  477. efct_pci_remove(struct pci_dev *pdev)
  478. {
  479. struct efct *efct = pci_get_drvdata(pdev);
  480. u32 i;
  481. if (!efct)
  482. return;
  483. efct_device_detach(efct);
  484. efct_teardown_msix(efct);
  485. for (i = 0; i < EFCT_PCI_MAX_REGS; i++) {
  486. if (efct->reg[i])
  487. iounmap(efct->reg[i]);
  488. }
  489. pci_set_drvdata(pdev, NULL);
  490. efct_device_free(efct);
  491. pci_release_regions(pdev);
  492. pci_disable_device(pdev);
  493. }
  494. static void
  495. efct_device_prep_for_reset(struct efct *efct, struct pci_dev *pdev)
  496. {
  497. if (efct) {
  498. efc_log_debug(efct,
  499. "PCI channel disable preparing for reset\n");
  500. efct_device_detach(efct);
  501. /* Disable interrupt and pci device */
  502. efct_teardown_msix(efct);
  503. }
  504. pci_disable_device(pdev);
  505. }
  506. static void
  507. efct_device_prep_for_recover(struct efct *efct)
  508. {
  509. if (efct) {
  510. efc_log_debug(efct, "PCI channel preparing for recovery\n");
  511. efct_hw_io_abort_all(&efct->hw);
  512. }
  513. }
  514. /**
  515. * efct_pci_io_error_detected - method for handling PCI I/O error
  516. * @pdev: pointer to PCI device.
  517. * @state: the current PCI connection state.
  518. *
  519. * This routine is registered to the PCI subsystem for error handling. This
  520. * function is called by the PCI subsystem after a PCI bus error affecting
  521. * this device has been detected. When this routine is invoked, it dispatches
  522. * device error detected handling routine, which will perform the proper
  523. * error detected operation.
  524. *
  525. * Return codes
  526. * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery
  527. * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
  528. */
  529. static pci_ers_result_t
  530. efct_pci_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
  531. {
  532. struct efct *efct = pci_get_drvdata(pdev);
  533. pci_ers_result_t rc;
  534. switch (state) {
  535. case pci_channel_io_normal:
  536. efct_device_prep_for_recover(efct);
  537. rc = PCI_ERS_RESULT_CAN_RECOVER;
  538. break;
  539. case pci_channel_io_frozen:
  540. efct_device_prep_for_reset(efct, pdev);
  541. rc = PCI_ERS_RESULT_NEED_RESET;
  542. break;
  543. case pci_channel_io_perm_failure:
  544. efct_device_detach(efct);
  545. rc = PCI_ERS_RESULT_DISCONNECT;
  546. break;
  547. default:
  548. efc_log_debug(efct, "Unknown PCI error state:0x%x\n", state);
  549. efct_device_prep_for_reset(efct, pdev);
  550. rc = PCI_ERS_RESULT_NEED_RESET;
  551. break;
  552. }
  553. return rc;
  554. }
  555. static pci_ers_result_t
  556. efct_pci_io_slot_reset(struct pci_dev *pdev)
  557. {
  558. int rc;
  559. struct efct *efct = pci_get_drvdata(pdev);
  560. rc = pci_enable_device_mem(pdev);
  561. if (rc) {
  562. efc_log_err(efct, "failed to enable PCI device after reset\n");
  563. return PCI_ERS_RESULT_DISCONNECT;
  564. }
  565. /*
  566. * As the new kernel behavior of pci_restore_state() API call clears
  567. * device saved_state flag, need to save the restored state again.
  568. */
  569. pci_save_state(pdev);
  570. pci_set_master(pdev);
  571. rc = efct_setup_msix(efct, efct->n_msix_vec);
  572. if (rc)
  573. efc_log_err(efct, "rc %d returned, IRQ allocation failed\n",
  574. rc);
  575. /* Perform device reset */
  576. efct_device_detach(efct);
  577. /* Bring device to online*/
  578. efct_device_attach(efct);
  579. return PCI_ERS_RESULT_RECOVERED;
  580. }
  581. static void
  582. efct_pci_io_resume(struct pci_dev *pdev)
  583. {
  584. struct efct *efct = pci_get_drvdata(pdev);
  585. /* Perform device reset */
  586. efct_device_detach(efct);
  587. /* Bring device to online*/
  588. efct_device_attach(efct);
  589. }
  590. MODULE_DEVICE_TABLE(pci, efct_pci_table);
  591. static struct pci_error_handlers efct_pci_err_handler = {
  592. .error_detected = efct_pci_io_error_detected,
  593. .slot_reset = efct_pci_io_slot_reset,
  594. .resume = efct_pci_io_resume,
  595. };
  596. static struct pci_driver efct_pci_driver = {
  597. .name = EFCT_DRIVER_NAME,
  598. .id_table = efct_pci_table,
  599. .probe = efct_pci_probe,
  600. .remove = efct_pci_remove,
  601. .err_handler = &efct_pci_err_handler,
  602. };
  603. static
  604. int __init efct_init(void)
  605. {
  606. int rc;
  607. rc = efct_device_init();
  608. if (rc) {
  609. pr_err("efct_device_init failed rc=%d\n", rc);
  610. return rc;
  611. }
  612. rc = pci_register_driver(&efct_pci_driver);
  613. if (rc) {
  614. pr_err("pci_register_driver failed rc=%d\n", rc);
  615. efct_device_shutdown();
  616. }
  617. return rc;
  618. }
  619. static void __exit efct_exit(void)
  620. {
  621. pci_unregister_driver(&efct_pci_driver);
  622. efct_device_shutdown();
  623. }
  624. module_init(efct_init);
  625. module_exit(efct_exit);
  626. MODULE_VERSION(EFCT_DRIVER_VERSION);
  627. MODULE_LICENSE("GPL");
  628. MODULE_AUTHOR("Broadcom");