zip_main.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651
  1. /***********************license start************************************
  2. * Copyright (c) 2003-2017 Cavium, Inc.
  3. * All rights reserved.
  4. *
  5. * License: one of 'Cavium License' or 'GNU General Public License Version 2'
  6. *
  7. * This file is provided under the terms of the Cavium License (see below)
  8. * or under the terms of GNU General Public License, Version 2, as
  9. * published by the Free Software Foundation. When using or redistributing
  10. * this file, you may do so under either license.
  11. *
  12. * Cavium License: Redistribution and use in source and binary forms, with
  13. * or without modification, are permitted provided that the following
  14. * conditions are met:
  15. *
  16. * * Redistributions of source code must retain the above copyright
  17. * notice, this list of conditions and the following disclaimer.
  18. *
  19. * * Redistributions in binary form must reproduce the above
  20. * copyright notice, this list of conditions and the following
  21. * disclaimer in the documentation and/or other materials provided
  22. * with the distribution.
  23. *
  24. * * Neither the name of Cavium Inc. nor the names of its contributors may be
  25. * used to endorse or promote products derived from this software without
  26. * specific prior written permission.
  27. *
  28. * This Software, including technical data, may be subject to U.S. export
  29. * control laws, including the U.S. Export Administration Act and its
  30. * associated regulations, and may be subject to export or import
  31. * regulations in other countries.
  32. *
  33. * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
  34. * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS
  35. * OR WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH
  36. * RESPECT TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
  37. * REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
  38. * DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY)
  39. * WARRANTIES OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A
  40. * PARTICULAR PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET
  41. * ENJOYMENT, QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE
  42. * ENTIRE RISK ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES
  43. * WITH YOU.
  44. ***********************license end**************************************/
  45. #include "common.h"
  46. #include "zip_crypto.h"
  47. #define DRV_NAME "ThunderX-ZIP"
  48. static struct zip_device *zip_dev[MAX_ZIP_DEVICES];
  49. static const struct pci_device_id zip_id_table[] = {
  50. { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVICE_ID_THUNDERX_ZIP) },
  51. { 0, }
  52. };
  53. static void zip_debugfs_init(void);
  54. static void zip_debugfs_exit(void);
  55. static int zip_register_compression_device(void);
  56. static void zip_unregister_compression_device(void);
  57. void zip_reg_write(u64 val, u64 __iomem *addr)
  58. {
  59. writeq(val, addr);
  60. }
  61. u64 zip_reg_read(u64 __iomem *addr)
  62. {
  63. return readq(addr);
  64. }
  65. /*
  66. * Allocates new ZIP device structure
  67. * Returns zip_device pointer or NULL if cannot allocate memory for zip_device
  68. */
  69. static struct zip_device *zip_alloc_device(struct pci_dev *pdev)
  70. {
  71. struct zip_device *zip = NULL;
  72. int idx;
  73. for (idx = 0; idx < MAX_ZIP_DEVICES; idx++) {
  74. if (!zip_dev[idx])
  75. break;
  76. }
  77. /* To ensure that the index is within the limit */
  78. if (idx < MAX_ZIP_DEVICES)
  79. zip = devm_kzalloc(&pdev->dev, sizeof(*zip), GFP_KERNEL);
  80. if (!zip)
  81. return NULL;
  82. zip_dev[idx] = zip;
  83. zip->index = idx;
  84. return zip;
  85. }
  86. /**
  87. * zip_get_device - Get ZIP device based on node id of cpu
  88. *
  89. * @node: Node id of the current cpu
  90. * Return: Pointer to Zip device structure
  91. */
  92. struct zip_device *zip_get_device(int node)
  93. {
  94. if ((node < MAX_ZIP_DEVICES) && (node >= 0))
  95. return zip_dev[node];
  96. zip_err("ZIP device not found for node id %d\n", node);
  97. return NULL;
  98. }
  99. /**
  100. * zip_get_node_id - Get the node id of the current cpu
  101. *
  102. * Return: Node id of the current cpu
  103. */
  104. int zip_get_node_id(void)
  105. {
  106. return cpu_to_node(raw_smp_processor_id());
  107. }
  108. /* Initializes the ZIP h/w sub-system */
  109. static int zip_init_hw(struct zip_device *zip)
  110. {
  111. union zip_cmd_ctl cmd_ctl;
  112. union zip_constants constants;
  113. union zip_que_ena que_ena;
  114. union zip_quex_map que_map;
  115. union zip_que_pri que_pri;
  116. union zip_quex_sbuf_addr que_sbuf_addr;
  117. union zip_quex_sbuf_ctl que_sbuf_ctl;
  118. int q = 0;
  119. /* Enable the ZIP Engine(Core) Clock */
  120. cmd_ctl.u_reg64 = zip_reg_read(zip->reg_base + ZIP_CMD_CTL);
  121. cmd_ctl.s.forceclk = 1;
  122. zip_reg_write(cmd_ctl.u_reg64 & 0xFF, (zip->reg_base + ZIP_CMD_CTL));
  123. zip_msg("ZIP_CMD_CTL : 0x%016llx",
  124. zip_reg_read(zip->reg_base + ZIP_CMD_CTL));
  125. constants.u_reg64 = zip_reg_read(zip->reg_base + ZIP_CONSTANTS);
  126. zip->depth = constants.s.depth;
  127. zip->onfsize = constants.s.onfsize;
  128. zip->ctxsize = constants.s.ctxsize;
  129. zip_msg("depth: 0x%016llx , onfsize : 0x%016llx , ctxsize : 0x%016llx",
  130. zip->depth, zip->onfsize, zip->ctxsize);
  131. /*
  132. * Program ZIP_QUE(0..7)_SBUF_ADDR and ZIP_QUE(0..7)_SBUF_CTL to
  133. * have the correct buffer pointer and size configured for each
  134. * instruction queue.
  135. */
  136. for (q = 0; q < ZIP_NUM_QUEUES; q++) {
  137. que_sbuf_ctl.u_reg64 = 0ull;
  138. que_sbuf_ctl.s.size = (ZIP_CMD_QBUF_SIZE / sizeof(u64));
  139. que_sbuf_ctl.s.inst_be = 0;
  140. que_sbuf_ctl.s.stream_id = 0;
  141. zip_reg_write(que_sbuf_ctl.u_reg64,
  142. (zip->reg_base + ZIP_QUEX_SBUF_CTL(q)));
  143. zip_msg("QUEX_SBUF_CTL[%d]: 0x%016llx", q,
  144. zip_reg_read(zip->reg_base + ZIP_QUEX_SBUF_CTL(q)));
  145. }
  146. for (q = 0; q < ZIP_NUM_QUEUES; q++) {
  147. memset(&zip->iq[q], 0x0, sizeof(struct zip_iq));
  148. spin_lock_init(&zip->iq[q].lock);
  149. if (zip_cmd_qbuf_alloc(zip, q)) {
  150. while (q != 0) {
  151. q--;
  152. zip_cmd_qbuf_free(zip, q);
  153. }
  154. return -ENOMEM;
  155. }
  156. /* Initialize tail ptr to head */
  157. zip->iq[q].sw_tail = zip->iq[q].sw_head;
  158. zip->iq[q].hw_tail = zip->iq[q].sw_head;
  159. /* Write the physical addr to register */
  160. que_sbuf_addr.u_reg64 = 0ull;
  161. que_sbuf_addr.s.ptr = (__pa(zip->iq[q].sw_head) >>
  162. ZIP_128B_ALIGN);
  163. zip_msg("QUE[%d]_PTR(PHYS): 0x%016llx", q,
  164. (u64)que_sbuf_addr.s.ptr);
  165. zip_reg_write(que_sbuf_addr.u_reg64,
  166. (zip->reg_base + ZIP_QUEX_SBUF_ADDR(q)));
  167. zip_msg("QUEX_SBUF_ADDR[%d]: 0x%016llx", q,
  168. zip_reg_read(zip->reg_base + ZIP_QUEX_SBUF_ADDR(q)));
  169. zip_dbg("sw_head :0x%lx sw_tail :0x%lx hw_tail :0x%lx",
  170. zip->iq[q].sw_head, zip->iq[q].sw_tail,
  171. zip->iq[q].hw_tail);
  172. zip_dbg("sw_head phy addr : 0x%lx", que_sbuf_addr.s.ptr);
  173. }
  174. /*
  175. * Queue-to-ZIP core mapping
  176. * If a queue is not mapped to a particular core, it is equivalent to
  177. * the ZIP core being disabled.
  178. */
  179. que_ena.u_reg64 = 0x0ull;
  180. /* Enabling queues based on ZIP_NUM_QUEUES */
  181. for (q = 0; q < ZIP_NUM_QUEUES; q++)
  182. que_ena.s.ena |= (0x1 << q);
  183. zip_reg_write(que_ena.u_reg64, (zip->reg_base + ZIP_QUE_ENA));
  184. zip_msg("QUE_ENA : 0x%016llx",
  185. zip_reg_read(zip->reg_base + ZIP_QUE_ENA));
  186. for (q = 0; q < ZIP_NUM_QUEUES; q++) {
  187. que_map.u_reg64 = 0ull;
  188. /* Mapping each queue to two ZIP cores */
  189. que_map.s.zce = 0x3;
  190. zip_reg_write(que_map.u_reg64,
  191. (zip->reg_base + ZIP_QUEX_MAP(q)));
  192. zip_msg("QUE_MAP(%d) : 0x%016llx", q,
  193. zip_reg_read(zip->reg_base + ZIP_QUEX_MAP(q)));
  194. }
  195. que_pri.u_reg64 = 0ull;
  196. for (q = 0; q < ZIP_NUM_QUEUES; q++)
  197. que_pri.s.pri |= (0x1 << q); /* Higher Priority RR */
  198. zip_reg_write(que_pri.u_reg64, (zip->reg_base + ZIP_QUE_PRI));
  199. zip_msg("QUE_PRI %016llx", zip_reg_read(zip->reg_base + ZIP_QUE_PRI));
  200. return 0;
  201. }
  202. static void zip_reset(struct zip_device *zip)
  203. {
  204. union zip_cmd_ctl cmd_ctl;
  205. cmd_ctl.u_reg64 = 0x0ull;
  206. cmd_ctl.s.reset = 1; /* Forces ZIP cores to do reset */
  207. zip_reg_write(cmd_ctl.u_reg64, (zip->reg_base + ZIP_CMD_CTL));
  208. }
  209. static int zip_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
  210. {
  211. struct device *dev = &pdev->dev;
  212. struct zip_device *zip = NULL;
  213. int err;
  214. zip = zip_alloc_device(pdev);
  215. if (!zip)
  216. return -ENOMEM;
  217. dev_info(dev, "Found ZIP device %d %x:%x on Node %d\n", zip->index,
  218. pdev->vendor, pdev->device, dev_to_node(dev));
  219. pci_set_drvdata(pdev, zip);
  220. zip->pdev = pdev;
  221. err = pci_enable_device(pdev);
  222. if (err) {
  223. dev_err(dev, "Failed to enable PCI device");
  224. goto err_free_device;
  225. }
  226. err = pci_request_regions(pdev, DRV_NAME);
  227. if (err) {
  228. dev_err(dev, "PCI request regions failed 0x%x", err);
  229. goto err_disable_device;
  230. }
  231. err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(48));
  232. if (err) {
  233. dev_err(dev, "Unable to get usable 48-bit DMA configuration\n");
  234. goto err_release_regions;
  235. }
  236. /* MAP configuration registers */
  237. zip->reg_base = pci_ioremap_bar(pdev, PCI_CFG_ZIP_PF_BAR0);
  238. if (!zip->reg_base) {
  239. dev_err(dev, "ZIP: Cannot map BAR0 CSR memory space, aborting");
  240. err = -ENOMEM;
  241. goto err_release_regions;
  242. }
  243. /* Initialize ZIP Hardware */
  244. err = zip_init_hw(zip);
  245. if (err)
  246. goto err_release_regions;
  247. /* Register with the Kernel Crypto Interface */
  248. err = zip_register_compression_device();
  249. if (err < 0) {
  250. zip_err("ZIP: Kernel Crypto Registration failed\n");
  251. goto err_register;
  252. }
  253. /* comp-decomp statistics are handled with debugfs interface */
  254. zip_debugfs_init();
  255. return 0;
  256. err_register:
  257. zip_reset(zip);
  258. err_release_regions:
  259. if (zip->reg_base)
  260. iounmap(zip->reg_base);
  261. pci_release_regions(pdev);
  262. err_disable_device:
  263. pci_disable_device(pdev);
  264. err_free_device:
  265. pci_set_drvdata(pdev, NULL);
  266. /* Remove zip_dev from zip_device list, free the zip_device memory */
  267. zip_dev[zip->index] = NULL;
  268. devm_kfree(dev, zip);
  269. return err;
  270. }
  271. static void zip_remove(struct pci_dev *pdev)
  272. {
  273. struct zip_device *zip = pci_get_drvdata(pdev);
  274. int q = 0;
  275. if (!zip)
  276. return;
  277. zip_debugfs_exit();
  278. zip_unregister_compression_device();
  279. if (zip->reg_base) {
  280. zip_reset(zip);
  281. iounmap(zip->reg_base);
  282. }
  283. pci_release_regions(pdev);
  284. pci_disable_device(pdev);
  285. /*
  286. * Free Command Queue buffers. This free should be called for all
  287. * the enabled Queues.
  288. */
  289. for (q = 0; q < ZIP_NUM_QUEUES; q++)
  290. zip_cmd_qbuf_free(zip, q);
  291. pci_set_drvdata(pdev, NULL);
  292. /* remove zip device from zip device list */
  293. zip_dev[zip->index] = NULL;
  294. }
  295. /* PCI Sub-System Interface */
  296. static struct pci_driver zip_driver = {
  297. .name = DRV_NAME,
  298. .id_table = zip_id_table,
  299. .probe = zip_probe,
  300. .remove = zip_remove,
  301. };
  302. /* Kernel Crypto Subsystem Interface */
  303. static struct crypto_alg zip_comp_deflate = {
  304. .cra_name = "deflate",
  305. .cra_driver_name = "deflate-cavium",
  306. .cra_flags = CRYPTO_ALG_TYPE_COMPRESS,
  307. .cra_ctxsize = sizeof(struct zip_kernel_ctx),
  308. .cra_priority = 300,
  309. .cra_module = THIS_MODULE,
  310. .cra_init = zip_alloc_comp_ctx_deflate,
  311. .cra_exit = zip_free_comp_ctx,
  312. .cra_u = { .compress = {
  313. .coa_compress = zip_comp_compress,
  314. .coa_decompress = zip_comp_decompress
  315. } }
  316. };
  317. static struct crypto_alg zip_comp_lzs = {
  318. .cra_name = "lzs",
  319. .cra_driver_name = "lzs-cavium",
  320. .cra_flags = CRYPTO_ALG_TYPE_COMPRESS,
  321. .cra_ctxsize = sizeof(struct zip_kernel_ctx),
  322. .cra_priority = 300,
  323. .cra_module = THIS_MODULE,
  324. .cra_init = zip_alloc_comp_ctx_lzs,
  325. .cra_exit = zip_free_comp_ctx,
  326. .cra_u = { .compress = {
  327. .coa_compress = zip_comp_compress,
  328. .coa_decompress = zip_comp_decompress
  329. } }
  330. };
  331. static struct scomp_alg zip_scomp_deflate = {
  332. .alloc_ctx = zip_alloc_scomp_ctx_deflate,
  333. .free_ctx = zip_free_scomp_ctx,
  334. .compress = zip_scomp_compress,
  335. .decompress = zip_scomp_decompress,
  336. .base = {
  337. .cra_name = "deflate",
  338. .cra_driver_name = "deflate-scomp-cavium",
  339. .cra_module = THIS_MODULE,
  340. .cra_priority = 300,
  341. }
  342. };
  343. static struct scomp_alg zip_scomp_lzs = {
  344. .alloc_ctx = zip_alloc_scomp_ctx_lzs,
  345. .free_ctx = zip_free_scomp_ctx,
  346. .compress = zip_scomp_compress,
  347. .decompress = zip_scomp_decompress,
  348. .base = {
  349. .cra_name = "lzs",
  350. .cra_driver_name = "lzs-scomp-cavium",
  351. .cra_module = THIS_MODULE,
  352. .cra_priority = 300,
  353. }
  354. };
  355. static int zip_register_compression_device(void)
  356. {
  357. int ret;
  358. ret = crypto_register_alg(&zip_comp_deflate);
  359. if (ret < 0) {
  360. zip_err("Deflate algorithm registration failed\n");
  361. return ret;
  362. }
  363. ret = crypto_register_alg(&zip_comp_lzs);
  364. if (ret < 0) {
  365. zip_err("LZS algorithm registration failed\n");
  366. goto err_unregister_alg_deflate;
  367. }
  368. ret = crypto_register_scomp(&zip_scomp_deflate);
  369. if (ret < 0) {
  370. zip_err("Deflate scomp algorithm registration failed\n");
  371. goto err_unregister_alg_lzs;
  372. }
  373. ret = crypto_register_scomp(&zip_scomp_lzs);
  374. if (ret < 0) {
  375. zip_err("LZS scomp algorithm registration failed\n");
  376. goto err_unregister_scomp_deflate;
  377. }
  378. return ret;
  379. err_unregister_scomp_deflate:
  380. crypto_unregister_scomp(&zip_scomp_deflate);
  381. err_unregister_alg_lzs:
  382. crypto_unregister_alg(&zip_comp_lzs);
  383. err_unregister_alg_deflate:
  384. crypto_unregister_alg(&zip_comp_deflate);
  385. return ret;
  386. }
  387. static void zip_unregister_compression_device(void)
  388. {
  389. crypto_unregister_alg(&zip_comp_deflate);
  390. crypto_unregister_alg(&zip_comp_lzs);
  391. crypto_unregister_scomp(&zip_scomp_deflate);
  392. crypto_unregister_scomp(&zip_scomp_lzs);
  393. }
  394. /*
  395. * debugfs functions
  396. */
  397. #ifdef CONFIG_DEBUG_FS
  398. #include <linux/debugfs.h>
  399. /* Displays ZIP device statistics */
  400. static int zip_stats_show(struct seq_file *s, void *unused)
  401. {
  402. u64 val = 0ull;
  403. u64 avg_chunk = 0ull, avg_cr = 0ull;
  404. u32 q = 0;
  405. int index = 0;
  406. struct zip_device *zip;
  407. struct zip_stats *st;
  408. for (index = 0; index < MAX_ZIP_DEVICES; index++) {
  409. u64 pending = 0;
  410. if (zip_dev[index]) {
  411. zip = zip_dev[index];
  412. st = &zip->stats;
  413. /* Get all the pending requests */
  414. for (q = 0; q < ZIP_NUM_QUEUES; q++) {
  415. val = zip_reg_read((zip->reg_base +
  416. ZIP_DBG_QUEX_STA(q)));
  417. pending += val >> 32 & 0xffffff;
  418. }
  419. val = atomic64_read(&st->comp_req_complete);
  420. avg_chunk = (val) ? atomic64_read(&st->comp_in_bytes) / val : 0;
  421. val = atomic64_read(&st->comp_out_bytes);
  422. avg_cr = (val) ? atomic64_read(&st->comp_in_bytes) / val : 0;
  423. seq_printf(s, " ZIP Device %d Stats\n"
  424. "-----------------------------------\n"
  425. "Comp Req Submitted : \t%lld\n"
  426. "Comp Req Completed : \t%lld\n"
  427. "Compress In Bytes : \t%lld\n"
  428. "Compressed Out Bytes : \t%lld\n"
  429. "Average Chunk size : \t%llu\n"
  430. "Average Compression ratio : \t%llu\n"
  431. "Decomp Req Submitted : \t%lld\n"
  432. "Decomp Req Completed : \t%lld\n"
  433. "Decompress In Bytes : \t%lld\n"
  434. "Decompressed Out Bytes : \t%lld\n"
  435. "Decompress Bad requests : \t%lld\n"
  436. "Pending Req : \t%lld\n"
  437. "---------------------------------\n",
  438. index,
  439. (u64)atomic64_read(&st->comp_req_submit),
  440. (u64)atomic64_read(&st->comp_req_complete),
  441. (u64)atomic64_read(&st->comp_in_bytes),
  442. (u64)atomic64_read(&st->comp_out_bytes),
  443. avg_chunk,
  444. avg_cr,
  445. (u64)atomic64_read(&st->decomp_req_submit),
  446. (u64)atomic64_read(&st->decomp_req_complete),
  447. (u64)atomic64_read(&st->decomp_in_bytes),
  448. (u64)atomic64_read(&st->decomp_out_bytes),
  449. (u64)atomic64_read(&st->decomp_bad_reqs),
  450. pending);
  451. }
  452. }
  453. return 0;
  454. }
  455. /* Clears stats data */
  456. static int zip_clear_show(struct seq_file *s, void *unused)
  457. {
  458. int index = 0;
  459. for (index = 0; index < MAX_ZIP_DEVICES; index++) {
  460. if (zip_dev[index]) {
  461. memset(&zip_dev[index]->stats, 0,
  462. sizeof(struct zip_stats));
  463. seq_printf(s, "Cleared stats for zip %d\n", index);
  464. }
  465. }
  466. return 0;
  467. }
  468. static struct zip_registers zipregs[64] = {
  469. {"ZIP_CMD_CTL ", 0x0000ull},
  470. {"ZIP_THROTTLE ", 0x0010ull},
  471. {"ZIP_CONSTANTS ", 0x00A0ull},
  472. {"ZIP_QUE0_MAP ", 0x1400ull},
  473. {"ZIP_QUE1_MAP ", 0x1408ull},
  474. {"ZIP_QUE_ENA ", 0x0500ull},
  475. {"ZIP_QUE_PRI ", 0x0508ull},
  476. {"ZIP_QUE0_DONE ", 0x2000ull},
  477. {"ZIP_QUE1_DONE ", 0x2008ull},
  478. {"ZIP_QUE0_DOORBELL ", 0x4000ull},
  479. {"ZIP_QUE1_DOORBELL ", 0x4008ull},
  480. {"ZIP_QUE0_SBUF_ADDR ", 0x1000ull},
  481. {"ZIP_QUE1_SBUF_ADDR ", 0x1008ull},
  482. {"ZIP_QUE0_SBUF_CTL ", 0x1200ull},
  483. {"ZIP_QUE1_SBUF_CTL ", 0x1208ull},
  484. { NULL, 0}
  485. };
  486. /* Prints registers' contents */
  487. static int zip_regs_show(struct seq_file *s, void *unused)
  488. {
  489. u64 val = 0;
  490. int i = 0, index = 0;
  491. for (index = 0; index < MAX_ZIP_DEVICES; index++) {
  492. if (zip_dev[index]) {
  493. seq_printf(s, "--------------------------------\n"
  494. " ZIP Device %d Registers\n"
  495. "--------------------------------\n",
  496. index);
  497. i = 0;
  498. while (zipregs[i].reg_name) {
  499. val = zip_reg_read((zip_dev[index]->reg_base +
  500. zipregs[i].reg_offset));
  501. seq_printf(s, "%s: 0x%016llx\n",
  502. zipregs[i].reg_name, val);
  503. i++;
  504. }
  505. }
  506. }
  507. return 0;
  508. }
  509. DEFINE_SHOW_ATTRIBUTE(zip_stats);
  510. DEFINE_SHOW_ATTRIBUTE(zip_clear);
  511. DEFINE_SHOW_ATTRIBUTE(zip_regs);
  512. /* Root directory for thunderx_zip debugfs entry */
  513. static struct dentry *zip_debugfs_root;
  514. static void zip_debugfs_init(void)
  515. {
  516. if (!debugfs_initialized())
  517. return;
  518. zip_debugfs_root = debugfs_create_dir("thunderx_zip", NULL);
  519. /* Creating files for entries inside thunderx_zip directory */
  520. debugfs_create_file("zip_stats", 0444, zip_debugfs_root, NULL,
  521. &zip_stats_fops);
  522. debugfs_create_file("zip_clear", 0444, zip_debugfs_root, NULL,
  523. &zip_clear_fops);
  524. debugfs_create_file("zip_regs", 0444, zip_debugfs_root, NULL,
  525. &zip_regs_fops);
  526. }
  527. static void zip_debugfs_exit(void)
  528. {
  529. debugfs_remove_recursive(zip_debugfs_root);
  530. }
  531. #else
  532. static void __init zip_debugfs_init(void) { }
  533. static void __exit zip_debugfs_exit(void) { }
  534. #endif
  535. /* debugfs - end */
  536. module_pci_driver(zip_driver);
  537. MODULE_AUTHOR("Cavium Inc");
  538. MODULE_DESCRIPTION("Cavium Inc ThunderX ZIP Driver");
  539. MODULE_LICENSE("GPL v2");
  540. MODULE_DEVICE_TABLE(pci, zip_id_table);