nitrox_main.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. #include <linux/aer.h>
  3. #include <linux/delay.h>
  4. #include <linux/firmware.h>
  5. #include <linux/list.h>
  6. #include <linux/module.h>
  7. #include <linux/mutex.h>
  8. #include <linux/pci.h>
  9. #include <linux/pci_ids.h>
  10. #include "nitrox_dev.h"
  11. #include "nitrox_common.h"
  12. #include "nitrox_csr.h"
  13. #include "nitrox_hal.h"
  14. #include "nitrox_isr.h"
  15. #include "nitrox_debugfs.h"
  16. #define CNN55XX_DEV_ID 0x12
  17. #define UCODE_HLEN 48
  18. #define DEFAULT_SE_GROUP 0
  19. #define DEFAULT_AE_GROUP 0
  20. #define DRIVER_VERSION "1.2"
  21. #define CNN55XX_UCD_BLOCK_SIZE 32768
  22. #define CNN55XX_MAX_UCODE_SIZE (CNN55XX_UCD_BLOCK_SIZE * 2)
  23. #define FW_DIR "cavium/"
  24. /* SE microcode */
  25. #define SE_FW FW_DIR "cnn55xx_se.fw"
  26. /* AE microcode */
  27. #define AE_FW FW_DIR "cnn55xx_ae.fw"
  28. static const char nitrox_driver_name[] = "CNN55XX";
  29. static LIST_HEAD(ndevlist);
  30. static DEFINE_MUTEX(devlist_lock);
  31. static unsigned int num_devices;
  32. /*
  33. * nitrox_pci_tbl - PCI Device ID Table
  34. */
  35. static const struct pci_device_id nitrox_pci_tbl[] = {
  36. {PCI_VDEVICE(CAVIUM, CNN55XX_DEV_ID), 0},
  37. /* required last entry */
  38. {0, }
  39. };
  40. MODULE_DEVICE_TABLE(pci, nitrox_pci_tbl);
  41. static unsigned int qlen = DEFAULT_CMD_QLEN;
  42. module_param(qlen, uint, 0644);
  43. MODULE_PARM_DESC(qlen, "Command queue length - default 2048");
  44. /**
  45. * struct ucode - Firmware Header
  46. * @id: microcode ID
  47. * @version: firmware version
  48. * @code_size: code section size
  49. * @raz: alignment
  50. * @code: code section
  51. */
  52. struct ucode {
  53. u8 id;
  54. char version[VERSION_LEN - 1];
  55. __be32 code_size;
  56. u8 raz[12];
  57. u64 code[];
  58. };
  59. /*
  60. * write_to_ucd_unit - Write Firmware to NITROX UCD unit
  61. */
  62. static void write_to_ucd_unit(struct nitrox_device *ndev, u32 ucode_size,
  63. u64 *ucode_data, int block_num)
  64. {
  65. u32 code_size;
  66. u64 offset, data;
  67. int i = 0;
  68. /*
  69. * UCD structure
  70. *
  71. * -------------
  72. * | BLK 7 |
  73. * -------------
  74. * | BLK 6 |
  75. * -------------
  76. * | ... |
  77. * -------------
  78. * | BLK 0 |
  79. * -------------
  80. * Total of 8 blocks, each size 32KB
  81. */
  82. /* set the block number */
  83. offset = UCD_UCODE_LOAD_BLOCK_NUM;
  84. nitrox_write_csr(ndev, offset, block_num);
  85. code_size = roundup(ucode_size, 16);
  86. while (code_size) {
  87. data = ucode_data[i];
  88. /* write 8 bytes at a time */
  89. offset = UCD_UCODE_LOAD_IDX_DATAX(i);
  90. nitrox_write_csr(ndev, offset, data);
  91. code_size -= 8;
  92. i++;
  93. }
  94. usleep_range(300, 400);
  95. }
  96. static int nitrox_load_fw(struct nitrox_device *ndev)
  97. {
  98. const struct firmware *fw;
  99. const char *fw_name;
  100. struct ucode *ucode;
  101. u64 *ucode_data;
  102. u64 offset;
  103. union ucd_core_eid_ucode_block_num core_2_eid_val;
  104. union aqm_grp_execmsk_lo aqm_grp_execmask_lo;
  105. union aqm_grp_execmsk_hi aqm_grp_execmask_hi;
  106. u32 ucode_size;
  107. int ret, i = 0;
  108. fw_name = SE_FW;
  109. dev_info(DEV(ndev), "Loading firmware \"%s\"\n", fw_name);
  110. ret = request_firmware(&fw, fw_name, DEV(ndev));
  111. if (ret < 0) {
  112. dev_err(DEV(ndev), "failed to get firmware %s\n", fw_name);
  113. return ret;
  114. }
  115. ucode = (struct ucode *)fw->data;
  116. ucode_size = be32_to_cpu(ucode->code_size) * 2;
  117. if (!ucode_size || ucode_size > CNN55XX_MAX_UCODE_SIZE) {
  118. dev_err(DEV(ndev), "Invalid ucode size: %u for firmware %s\n",
  119. ucode_size, fw_name);
  120. release_firmware(fw);
  121. return -EINVAL;
  122. }
  123. ucode_data = ucode->code;
  124. /* copy the firmware version */
  125. memcpy(&ndev->hw.fw_name[0][0], ucode->version, (VERSION_LEN - 2));
  126. ndev->hw.fw_name[0][VERSION_LEN - 1] = '\0';
  127. /* Load SE Firmware on UCD Block 0 */
  128. write_to_ucd_unit(ndev, ucode_size, ucode_data, 0);
  129. release_firmware(fw);
  130. /* put all SE cores in DEFAULT_SE_GROUP */
  131. offset = POM_GRP_EXECMASKX(DEFAULT_SE_GROUP);
  132. nitrox_write_csr(ndev, offset, (~0ULL));
  133. /* write block number and firmware length
  134. * bit:<2:0> block number
  135. * bit:3 is set SE uses 32KB microcode
  136. * bit:3 is clear SE uses 64KB microcode
  137. */
  138. core_2_eid_val.value = 0ULL;
  139. core_2_eid_val.ucode_blk = 0;
  140. if (ucode_size <= CNN55XX_UCD_BLOCK_SIZE)
  141. core_2_eid_val.ucode_len = 1;
  142. else
  143. core_2_eid_val.ucode_len = 0;
  144. for (i = 0; i < ndev->hw.se_cores; i++) {
  145. offset = UCD_SE_EID_UCODE_BLOCK_NUMX(i);
  146. nitrox_write_csr(ndev, offset, core_2_eid_val.value);
  147. }
  148. fw_name = AE_FW;
  149. dev_info(DEV(ndev), "Loading firmware \"%s\"\n", fw_name);
  150. ret = request_firmware(&fw, fw_name, DEV(ndev));
  151. if (ret < 0) {
  152. dev_err(DEV(ndev), "failed to get firmware %s\n", fw_name);
  153. return ret;
  154. }
  155. ucode = (struct ucode *)fw->data;
  156. ucode_size = be32_to_cpu(ucode->code_size) * 2;
  157. if (!ucode_size || ucode_size > CNN55XX_MAX_UCODE_SIZE) {
  158. dev_err(DEV(ndev), "Invalid ucode size: %u for firmware %s\n",
  159. ucode_size, fw_name);
  160. release_firmware(fw);
  161. return -EINVAL;
  162. }
  163. ucode_data = ucode->code;
  164. /* copy the firmware version */
  165. memcpy(&ndev->hw.fw_name[1][0], ucode->version, (VERSION_LEN - 2));
  166. ndev->hw.fw_name[1][VERSION_LEN - 1] = '\0';
  167. /* Load AE Firmware on UCD Block 2 */
  168. write_to_ucd_unit(ndev, ucode_size, ucode_data, 2);
  169. release_firmware(fw);
  170. /* put all AE cores in DEFAULT_AE_GROUP */
  171. offset = AQM_GRP_EXECMSK_LOX(DEFAULT_AE_GROUP);
  172. aqm_grp_execmask_lo.exec_0_to_39 = 0xFFFFFFFFFFULL;
  173. nitrox_write_csr(ndev, offset, aqm_grp_execmask_lo.value);
  174. offset = AQM_GRP_EXECMSK_HIX(DEFAULT_AE_GROUP);
  175. aqm_grp_execmask_hi.exec_40_to_79 = 0xFFFFFFFFFFULL;
  176. nitrox_write_csr(ndev, offset, aqm_grp_execmask_hi.value);
  177. /* write block number and firmware length
  178. * bit:<2:0> block number
  179. * bit:3 is set AE uses 32KB microcode
  180. * bit:3 is clear AE uses 64KB microcode
  181. */
  182. core_2_eid_val.value = 0ULL;
  183. core_2_eid_val.ucode_blk = 2;
  184. if (ucode_size <= CNN55XX_UCD_BLOCK_SIZE)
  185. core_2_eid_val.ucode_len = 1;
  186. else
  187. core_2_eid_val.ucode_len = 0;
  188. for (i = 0; i < ndev->hw.ae_cores; i++) {
  189. offset = UCD_AE_EID_UCODE_BLOCK_NUMX(i);
  190. nitrox_write_csr(ndev, offset, core_2_eid_val.value);
  191. }
  192. return 0;
  193. }
  194. /**
  195. * nitrox_add_to_devlist - add NITROX device to global device list
  196. * @ndev: NITROX device
  197. */
  198. static int nitrox_add_to_devlist(struct nitrox_device *ndev)
  199. {
  200. struct nitrox_device *dev;
  201. int ret = 0;
  202. INIT_LIST_HEAD(&ndev->list);
  203. refcount_set(&ndev->refcnt, 1);
  204. mutex_lock(&devlist_lock);
  205. list_for_each_entry(dev, &ndevlist, list) {
  206. if (dev == ndev) {
  207. ret = -EEXIST;
  208. goto unlock;
  209. }
  210. }
  211. ndev->idx = num_devices++;
  212. list_add_tail(&ndev->list, &ndevlist);
  213. unlock:
  214. mutex_unlock(&devlist_lock);
  215. return ret;
  216. }
  217. /**
  218. * nitrox_remove_from_devlist - remove NITROX device from
  219. * global device list
  220. * @ndev: NITROX device
  221. */
  222. static void nitrox_remove_from_devlist(struct nitrox_device *ndev)
  223. {
  224. mutex_lock(&devlist_lock);
  225. list_del(&ndev->list);
  226. num_devices--;
  227. mutex_unlock(&devlist_lock);
  228. }
  229. struct nitrox_device *nitrox_get_first_device(void)
  230. {
  231. struct nitrox_device *ndev = NULL, *iter;
  232. mutex_lock(&devlist_lock);
  233. list_for_each_entry(iter, &ndevlist, list) {
  234. if (nitrox_ready(iter)) {
  235. ndev = iter;
  236. break;
  237. }
  238. }
  239. mutex_unlock(&devlist_lock);
  240. if (!ndev)
  241. return NULL;
  242. refcount_inc(&ndev->refcnt);
  243. /* barrier to sync with other cpus */
  244. smp_mb__after_atomic();
  245. return ndev;
  246. }
  247. void nitrox_put_device(struct nitrox_device *ndev)
  248. {
  249. if (!ndev)
  250. return;
  251. refcount_dec(&ndev->refcnt);
  252. /* barrier to sync with other cpus */
  253. smp_mb__after_atomic();
  254. }
  255. static int nitrox_device_flr(struct pci_dev *pdev)
  256. {
  257. int pos = 0;
  258. pos = pci_save_state(pdev);
  259. if (pos) {
  260. dev_err(&pdev->dev, "Failed to save pci state\n");
  261. return -ENOMEM;
  262. }
  263. pcie_reset_flr(pdev, PCI_RESET_DO_RESET);
  264. pci_restore_state(pdev);
  265. return 0;
  266. }
  267. static int nitrox_pf_sw_init(struct nitrox_device *ndev)
  268. {
  269. int err;
  270. err = nitrox_common_sw_init(ndev);
  271. if (err)
  272. return err;
  273. err = nitrox_register_interrupts(ndev);
  274. if (err)
  275. nitrox_common_sw_cleanup(ndev);
  276. return err;
  277. }
  278. static void nitrox_pf_sw_cleanup(struct nitrox_device *ndev)
  279. {
  280. nitrox_unregister_interrupts(ndev);
  281. nitrox_common_sw_cleanup(ndev);
  282. }
  283. /**
  284. * nitrox_bist_check - Check NITROX BIST registers status
  285. * @ndev: NITROX device
  286. */
  287. static int nitrox_bist_check(struct nitrox_device *ndev)
  288. {
  289. u64 value = 0;
  290. int i;
  291. for (i = 0; i < NR_CLUSTERS; i++) {
  292. value += nitrox_read_csr(ndev, EMU_BIST_STATUSX(i));
  293. value += nitrox_read_csr(ndev, EFL_CORE_BIST_REGX(i));
  294. }
  295. value += nitrox_read_csr(ndev, UCD_BIST_STATUS);
  296. value += nitrox_read_csr(ndev, NPS_CORE_BIST_REG);
  297. value += nitrox_read_csr(ndev, NPS_CORE_NPC_BIST_REG);
  298. value += nitrox_read_csr(ndev, NPS_PKT_SLC_BIST_REG);
  299. value += nitrox_read_csr(ndev, NPS_PKT_IN_BIST_REG);
  300. value += nitrox_read_csr(ndev, POM_BIST_REG);
  301. value += nitrox_read_csr(ndev, BMI_BIST_REG);
  302. value += nitrox_read_csr(ndev, EFL_TOP_BIST_STAT);
  303. value += nitrox_read_csr(ndev, BMO_BIST_REG);
  304. value += nitrox_read_csr(ndev, LBC_BIST_STATUS);
  305. value += nitrox_read_csr(ndev, PEM_BIST_STATUSX(0));
  306. if (value)
  307. return -EIO;
  308. return 0;
  309. }
  310. static int nitrox_pf_hw_init(struct nitrox_device *ndev)
  311. {
  312. int err;
  313. err = nitrox_bist_check(ndev);
  314. if (err) {
  315. dev_err(&ndev->pdev->dev, "BIST check failed\n");
  316. return err;
  317. }
  318. /* get cores information */
  319. nitrox_get_hwinfo(ndev);
  320. nitrox_config_nps_core_unit(ndev);
  321. nitrox_config_aqm_unit(ndev);
  322. nitrox_config_nps_pkt_unit(ndev);
  323. nitrox_config_pom_unit(ndev);
  324. nitrox_config_efl_unit(ndev);
  325. /* configure IO units */
  326. nitrox_config_bmi_unit(ndev);
  327. nitrox_config_bmo_unit(ndev);
  328. /* configure Local Buffer Cache */
  329. nitrox_config_lbc_unit(ndev);
  330. nitrox_config_rand_unit(ndev);
  331. /* load firmware on cores */
  332. err = nitrox_load_fw(ndev);
  333. if (err)
  334. return err;
  335. nitrox_config_emu_unit(ndev);
  336. return 0;
  337. }
  338. /**
  339. * nitrox_probe - NITROX Initialization function.
  340. * @pdev: PCI device information struct
  341. * @id: entry in nitrox_pci_tbl
  342. *
  343. * Return: 0, if the driver is bound to the device, or
  344. * a negative error if there is failure.
  345. */
  346. static int nitrox_probe(struct pci_dev *pdev,
  347. const struct pci_device_id *id)
  348. {
  349. struct nitrox_device *ndev;
  350. int err;
  351. dev_info_once(&pdev->dev, "%s driver version %s\n",
  352. nitrox_driver_name, DRIVER_VERSION);
  353. err = pci_enable_device_mem(pdev);
  354. if (err)
  355. return err;
  356. /* do FLR */
  357. err = nitrox_device_flr(pdev);
  358. if (err) {
  359. dev_err(&pdev->dev, "FLR failed\n");
  360. goto flr_fail;
  361. }
  362. if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) {
  363. dev_dbg(&pdev->dev, "DMA to 64-BIT address\n");
  364. } else {
  365. err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
  366. if (err) {
  367. dev_err(&pdev->dev, "DMA configuration failed\n");
  368. goto flr_fail;
  369. }
  370. }
  371. err = pci_request_mem_regions(pdev, nitrox_driver_name);
  372. if (err)
  373. goto flr_fail;
  374. pci_set_master(pdev);
  375. ndev = kzalloc(sizeof(*ndev), GFP_KERNEL);
  376. if (!ndev) {
  377. err = -ENOMEM;
  378. goto ndev_fail;
  379. }
  380. pci_set_drvdata(pdev, ndev);
  381. ndev->pdev = pdev;
  382. /* add to device list */
  383. nitrox_add_to_devlist(ndev);
  384. ndev->hw.vendor_id = pdev->vendor;
  385. ndev->hw.device_id = pdev->device;
  386. ndev->hw.revision_id = pdev->revision;
  387. /* command timeout in jiffies */
  388. ndev->timeout = msecs_to_jiffies(CMD_TIMEOUT);
  389. ndev->node = dev_to_node(&pdev->dev);
  390. if (ndev->node == NUMA_NO_NODE)
  391. ndev->node = 0;
  392. ndev->bar_addr = ioremap(pci_resource_start(pdev, 0),
  393. pci_resource_len(pdev, 0));
  394. if (!ndev->bar_addr) {
  395. err = -EIO;
  396. goto ioremap_err;
  397. }
  398. /* allocate command queus based on cpus, max queues are 64 */
  399. ndev->nr_queues = min_t(u32, MAX_PF_QUEUES, num_online_cpus());
  400. ndev->qlen = qlen;
  401. err = nitrox_pf_sw_init(ndev);
  402. if (err)
  403. goto pf_sw_fail;
  404. err = nitrox_pf_hw_init(ndev);
  405. if (err)
  406. goto pf_hw_fail;
  407. nitrox_debugfs_init(ndev);
  408. /* clear the statistics */
  409. atomic64_set(&ndev->stats.posted, 0);
  410. atomic64_set(&ndev->stats.completed, 0);
  411. atomic64_set(&ndev->stats.dropped, 0);
  412. atomic_set(&ndev->state, __NDEV_READY);
  413. /* barrier to sync with other cpus */
  414. smp_mb__after_atomic();
  415. err = nitrox_crypto_register();
  416. if (err)
  417. goto crypto_fail;
  418. return 0;
  419. crypto_fail:
  420. nitrox_debugfs_exit(ndev);
  421. atomic_set(&ndev->state, __NDEV_NOT_READY);
  422. /* barrier to sync with other cpus */
  423. smp_mb__after_atomic();
  424. pf_hw_fail:
  425. nitrox_pf_sw_cleanup(ndev);
  426. pf_sw_fail:
  427. iounmap(ndev->bar_addr);
  428. ioremap_err:
  429. nitrox_remove_from_devlist(ndev);
  430. kfree(ndev);
  431. pci_set_drvdata(pdev, NULL);
  432. ndev_fail:
  433. pci_release_mem_regions(pdev);
  434. flr_fail:
  435. pci_disable_device(pdev);
  436. return err;
  437. }
  438. /**
  439. * nitrox_remove - Unbind the driver from the device.
  440. * @pdev: PCI device information struct
  441. */
  442. static void nitrox_remove(struct pci_dev *pdev)
  443. {
  444. struct nitrox_device *ndev = pci_get_drvdata(pdev);
  445. if (!ndev)
  446. return;
  447. if (!refcount_dec_and_test(&ndev->refcnt)) {
  448. dev_err(DEV(ndev), "Device refcnt not zero (%d)\n",
  449. refcount_read(&ndev->refcnt));
  450. return;
  451. }
  452. dev_info(DEV(ndev), "Removing Device %x:%x\n",
  453. ndev->hw.vendor_id, ndev->hw.device_id);
  454. atomic_set(&ndev->state, __NDEV_NOT_READY);
  455. /* barrier to sync with other cpus */
  456. smp_mb__after_atomic();
  457. nitrox_remove_from_devlist(ndev);
  458. /* disable SR-IOV */
  459. nitrox_sriov_configure(pdev, 0);
  460. nitrox_crypto_unregister();
  461. nitrox_debugfs_exit(ndev);
  462. nitrox_pf_sw_cleanup(ndev);
  463. iounmap(ndev->bar_addr);
  464. kfree(ndev);
  465. pci_set_drvdata(pdev, NULL);
  466. pci_release_mem_regions(pdev);
  467. pci_disable_device(pdev);
  468. }
  469. static void nitrox_shutdown(struct pci_dev *pdev)
  470. {
  471. pci_set_drvdata(pdev, NULL);
  472. pci_release_mem_regions(pdev);
  473. pci_disable_device(pdev);
  474. }
  475. static struct pci_driver nitrox_driver = {
  476. .name = nitrox_driver_name,
  477. .id_table = nitrox_pci_tbl,
  478. .probe = nitrox_probe,
  479. .remove = nitrox_remove,
  480. .shutdown = nitrox_shutdown,
  481. .sriov_configure = nitrox_sriov_configure,
  482. };
  483. module_pci_driver(nitrox_driver);
  484. MODULE_AUTHOR("Srikanth Jampala <[email protected]>");
  485. MODULE_DESCRIPTION("Cavium CNN55XX PF Driver" DRIVER_VERSION " ");
  486. MODULE_LICENSE("GPL");
  487. MODULE_VERSION(DRIVER_VERSION);
  488. MODULE_FIRMWARE(SE_FW);