ism_drv.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * ISM driver for s390.
  4. *
  5. * Copyright IBM Corp. 2018
  6. */
  7. #define KMSG_COMPONENT "ism"
  8. #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
  9. #include <linux/module.h>
  10. #include <linux/types.h>
  11. #include <linux/interrupt.h>
  12. #include <linux/device.h>
  13. #include <linux/pci.h>
  14. #include <linux/err.h>
  15. #include <linux/ctype.h>
  16. #include <linux/processor.h>
  17. #include <net/smc.h>
  18. #include <asm/debug.h>
  19. #include "ism.h"
  20. MODULE_DESCRIPTION("ISM driver for s390");
  21. MODULE_LICENSE("GPL");
  22. #define PCI_DEVICE_ID_IBM_ISM 0x04ED
  23. #define DRV_NAME "ism"
  24. static const struct pci_device_id ism_device_table[] = {
  25. { PCI_VDEVICE(IBM, PCI_DEVICE_ID_IBM_ISM), 0 },
  26. { 0, }
  27. };
  28. MODULE_DEVICE_TABLE(pci, ism_device_table);
  29. static debug_info_t *ism_debug_info;
  30. static int ism_cmd(struct ism_dev *ism, void *cmd)
  31. {
  32. struct ism_req_hdr *req = cmd;
  33. struct ism_resp_hdr *resp = cmd;
  34. __ism_write_cmd(ism, req + 1, sizeof(*req), req->len - sizeof(*req));
  35. __ism_write_cmd(ism, req, 0, sizeof(*req));
  36. WRITE_ONCE(resp->ret, ISM_ERROR);
  37. __ism_read_cmd(ism, resp, 0, sizeof(*resp));
  38. if (resp->ret) {
  39. debug_text_event(ism_debug_info, 0, "cmd failure");
  40. debug_event(ism_debug_info, 0, resp, sizeof(*resp));
  41. goto out;
  42. }
  43. __ism_read_cmd(ism, resp + 1, sizeof(*resp), resp->len - sizeof(*resp));
  44. out:
  45. return resp->ret;
  46. }
  47. static int ism_cmd_simple(struct ism_dev *ism, u32 cmd_code)
  48. {
  49. union ism_cmd_simple cmd;
  50. memset(&cmd, 0, sizeof(cmd));
  51. cmd.request.hdr.cmd = cmd_code;
  52. cmd.request.hdr.len = sizeof(cmd.request);
  53. return ism_cmd(ism, &cmd);
  54. }
  55. static int query_info(struct ism_dev *ism)
  56. {
  57. union ism_qi cmd;
  58. memset(&cmd, 0, sizeof(cmd));
  59. cmd.request.hdr.cmd = ISM_QUERY_INFO;
  60. cmd.request.hdr.len = sizeof(cmd.request);
  61. if (ism_cmd(ism, &cmd))
  62. goto out;
  63. debug_text_event(ism_debug_info, 3, "query info");
  64. debug_event(ism_debug_info, 3, &cmd.response, sizeof(cmd.response));
  65. out:
  66. return 0;
  67. }
  68. static int register_sba(struct ism_dev *ism)
  69. {
  70. union ism_reg_sba cmd;
  71. dma_addr_t dma_handle;
  72. struct ism_sba *sba;
  73. sba = dma_alloc_coherent(&ism->pdev->dev, PAGE_SIZE, &dma_handle,
  74. GFP_KERNEL);
  75. if (!sba)
  76. return -ENOMEM;
  77. memset(&cmd, 0, sizeof(cmd));
  78. cmd.request.hdr.cmd = ISM_REG_SBA;
  79. cmd.request.hdr.len = sizeof(cmd.request);
  80. cmd.request.sba = dma_handle;
  81. if (ism_cmd(ism, &cmd)) {
  82. dma_free_coherent(&ism->pdev->dev, PAGE_SIZE, sba, dma_handle);
  83. return -EIO;
  84. }
  85. ism->sba = sba;
  86. ism->sba_dma_addr = dma_handle;
  87. return 0;
  88. }
  89. static int register_ieq(struct ism_dev *ism)
  90. {
  91. union ism_reg_ieq cmd;
  92. dma_addr_t dma_handle;
  93. struct ism_eq *ieq;
  94. ieq = dma_alloc_coherent(&ism->pdev->dev, PAGE_SIZE, &dma_handle,
  95. GFP_KERNEL);
  96. if (!ieq)
  97. return -ENOMEM;
  98. memset(&cmd, 0, sizeof(cmd));
  99. cmd.request.hdr.cmd = ISM_REG_IEQ;
  100. cmd.request.hdr.len = sizeof(cmd.request);
  101. cmd.request.ieq = dma_handle;
  102. cmd.request.len = sizeof(*ieq);
  103. if (ism_cmd(ism, &cmd)) {
  104. dma_free_coherent(&ism->pdev->dev, PAGE_SIZE, ieq, dma_handle);
  105. return -EIO;
  106. }
  107. ism->ieq = ieq;
  108. ism->ieq_idx = -1;
  109. ism->ieq_dma_addr = dma_handle;
  110. return 0;
  111. }
  112. static int unregister_sba(struct ism_dev *ism)
  113. {
  114. int ret;
  115. if (!ism->sba)
  116. return 0;
  117. ret = ism_cmd_simple(ism, ISM_UNREG_SBA);
  118. if (ret && ret != ISM_ERROR)
  119. return -EIO;
  120. dma_free_coherent(&ism->pdev->dev, PAGE_SIZE,
  121. ism->sba, ism->sba_dma_addr);
  122. ism->sba = NULL;
  123. ism->sba_dma_addr = 0;
  124. return 0;
  125. }
  126. static int unregister_ieq(struct ism_dev *ism)
  127. {
  128. int ret;
  129. if (!ism->ieq)
  130. return 0;
  131. ret = ism_cmd_simple(ism, ISM_UNREG_IEQ);
  132. if (ret && ret != ISM_ERROR)
  133. return -EIO;
  134. dma_free_coherent(&ism->pdev->dev, PAGE_SIZE,
  135. ism->ieq, ism->ieq_dma_addr);
  136. ism->ieq = NULL;
  137. ism->ieq_dma_addr = 0;
  138. return 0;
  139. }
  140. static int ism_read_local_gid(struct ism_dev *ism)
  141. {
  142. union ism_read_gid cmd;
  143. int ret;
  144. memset(&cmd, 0, sizeof(cmd));
  145. cmd.request.hdr.cmd = ISM_READ_GID;
  146. cmd.request.hdr.len = sizeof(cmd.request);
  147. ret = ism_cmd(ism, &cmd);
  148. if (ret)
  149. goto out;
  150. ism->smcd->local_gid = cmd.response.gid;
  151. out:
  152. return ret;
  153. }
  154. static int ism_query_rgid(struct smcd_dev *smcd, u64 rgid, u32 vid_valid,
  155. u32 vid)
  156. {
  157. struct ism_dev *ism = smcd->priv;
  158. union ism_query_rgid cmd;
  159. memset(&cmd, 0, sizeof(cmd));
  160. cmd.request.hdr.cmd = ISM_QUERY_RGID;
  161. cmd.request.hdr.len = sizeof(cmd.request);
  162. cmd.request.rgid = rgid;
  163. cmd.request.vlan_valid = vid_valid;
  164. cmd.request.vlan_id = vid;
  165. return ism_cmd(ism, &cmd);
  166. }
  167. static void ism_free_dmb(struct ism_dev *ism, struct smcd_dmb *dmb)
  168. {
  169. clear_bit(dmb->sba_idx, ism->sba_bitmap);
  170. dma_free_coherent(&ism->pdev->dev, dmb->dmb_len,
  171. dmb->cpu_addr, dmb->dma_addr);
  172. }
  173. static int ism_alloc_dmb(struct ism_dev *ism, struct smcd_dmb *dmb)
  174. {
  175. unsigned long bit;
  176. if (PAGE_ALIGN(dmb->dmb_len) > dma_get_max_seg_size(&ism->pdev->dev))
  177. return -EINVAL;
  178. if (!dmb->sba_idx) {
  179. bit = find_next_zero_bit(ism->sba_bitmap, ISM_NR_DMBS,
  180. ISM_DMB_BIT_OFFSET);
  181. if (bit == ISM_NR_DMBS)
  182. return -ENOSPC;
  183. dmb->sba_idx = bit;
  184. }
  185. if (dmb->sba_idx < ISM_DMB_BIT_OFFSET ||
  186. test_and_set_bit(dmb->sba_idx, ism->sba_bitmap))
  187. return -EINVAL;
  188. dmb->cpu_addr = dma_alloc_coherent(&ism->pdev->dev, dmb->dmb_len,
  189. &dmb->dma_addr,
  190. GFP_KERNEL | __GFP_NOWARN | __GFP_NOMEMALLOC | __GFP_COMP | __GFP_NORETRY);
  191. if (!dmb->cpu_addr)
  192. clear_bit(dmb->sba_idx, ism->sba_bitmap);
  193. return dmb->cpu_addr ? 0 : -ENOMEM;
  194. }
  195. static int ism_register_dmb(struct smcd_dev *smcd, struct smcd_dmb *dmb)
  196. {
  197. struct ism_dev *ism = smcd->priv;
  198. union ism_reg_dmb cmd;
  199. int ret;
  200. ret = ism_alloc_dmb(ism, dmb);
  201. if (ret)
  202. goto out;
  203. memset(&cmd, 0, sizeof(cmd));
  204. cmd.request.hdr.cmd = ISM_REG_DMB;
  205. cmd.request.hdr.len = sizeof(cmd.request);
  206. cmd.request.dmb = dmb->dma_addr;
  207. cmd.request.dmb_len = dmb->dmb_len;
  208. cmd.request.sba_idx = dmb->sba_idx;
  209. cmd.request.vlan_valid = dmb->vlan_valid;
  210. cmd.request.vlan_id = dmb->vlan_id;
  211. cmd.request.rgid = dmb->rgid;
  212. ret = ism_cmd(ism, &cmd);
  213. if (ret) {
  214. ism_free_dmb(ism, dmb);
  215. goto out;
  216. }
  217. dmb->dmb_tok = cmd.response.dmb_tok;
  218. out:
  219. return ret;
  220. }
  221. static int ism_unregister_dmb(struct smcd_dev *smcd, struct smcd_dmb *dmb)
  222. {
  223. struct ism_dev *ism = smcd->priv;
  224. union ism_unreg_dmb cmd;
  225. int ret;
  226. memset(&cmd, 0, sizeof(cmd));
  227. cmd.request.hdr.cmd = ISM_UNREG_DMB;
  228. cmd.request.hdr.len = sizeof(cmd.request);
  229. cmd.request.dmb_tok = dmb->dmb_tok;
  230. ret = ism_cmd(ism, &cmd);
  231. if (ret && ret != ISM_ERROR)
  232. goto out;
  233. ism_free_dmb(ism, dmb);
  234. out:
  235. return ret;
  236. }
  237. static int ism_add_vlan_id(struct smcd_dev *smcd, u64 vlan_id)
  238. {
  239. struct ism_dev *ism = smcd->priv;
  240. union ism_set_vlan_id cmd;
  241. memset(&cmd, 0, sizeof(cmd));
  242. cmd.request.hdr.cmd = ISM_ADD_VLAN_ID;
  243. cmd.request.hdr.len = sizeof(cmd.request);
  244. cmd.request.vlan_id = vlan_id;
  245. return ism_cmd(ism, &cmd);
  246. }
  247. static int ism_del_vlan_id(struct smcd_dev *smcd, u64 vlan_id)
  248. {
  249. struct ism_dev *ism = smcd->priv;
  250. union ism_set_vlan_id cmd;
  251. memset(&cmd, 0, sizeof(cmd));
  252. cmd.request.hdr.cmd = ISM_DEL_VLAN_ID;
  253. cmd.request.hdr.len = sizeof(cmd.request);
  254. cmd.request.vlan_id = vlan_id;
  255. return ism_cmd(ism, &cmd);
  256. }
  257. static int ism_set_vlan_required(struct smcd_dev *smcd)
  258. {
  259. return ism_cmd_simple(smcd->priv, ISM_SET_VLAN);
  260. }
  261. static int ism_reset_vlan_required(struct smcd_dev *smcd)
  262. {
  263. return ism_cmd_simple(smcd->priv, ISM_RESET_VLAN);
  264. }
  265. static int ism_signal_ieq(struct smcd_dev *smcd, u64 rgid, u32 trigger_irq,
  266. u32 event_code, u64 info)
  267. {
  268. struct ism_dev *ism = smcd->priv;
  269. union ism_sig_ieq cmd;
  270. memset(&cmd, 0, sizeof(cmd));
  271. cmd.request.hdr.cmd = ISM_SIGNAL_IEQ;
  272. cmd.request.hdr.len = sizeof(cmd.request);
  273. cmd.request.rgid = rgid;
  274. cmd.request.trigger_irq = trigger_irq;
  275. cmd.request.event_code = event_code;
  276. cmd.request.info = info;
  277. return ism_cmd(ism, &cmd);
  278. }
  279. static unsigned int max_bytes(unsigned int start, unsigned int len,
  280. unsigned int boundary)
  281. {
  282. return min(boundary - (start & (boundary - 1)), len);
  283. }
  284. static int ism_move(struct smcd_dev *smcd, u64 dmb_tok, unsigned int idx,
  285. bool sf, unsigned int offset, void *data, unsigned int size)
  286. {
  287. struct ism_dev *ism = smcd->priv;
  288. unsigned int bytes;
  289. u64 dmb_req;
  290. int ret;
  291. while (size) {
  292. bytes = max_bytes(offset, size, PAGE_SIZE);
  293. dmb_req = ISM_CREATE_REQ(dmb_tok, idx, size == bytes ? sf : 0,
  294. offset);
  295. ret = __ism_move(ism, dmb_req, data, bytes);
  296. if (ret)
  297. return ret;
  298. size -= bytes;
  299. data += bytes;
  300. offset += bytes;
  301. }
  302. return 0;
  303. }
  304. static struct ism_systemeid SYSTEM_EID = {
  305. .seid_string = "IBM-SYSZ-ISMSEID00000000",
  306. .serial_number = "0000",
  307. .type = "0000",
  308. };
  309. static void ism_create_system_eid(void)
  310. {
  311. struct cpuid id;
  312. u16 ident_tail;
  313. char tmp[5];
  314. get_cpu_id(&id);
  315. ident_tail = (u16)(id.ident & ISM_IDENT_MASK);
  316. snprintf(tmp, 5, "%04X", ident_tail);
  317. memcpy(&SYSTEM_EID.serial_number, tmp, 4);
  318. snprintf(tmp, 5, "%04X", id.machine);
  319. memcpy(&SYSTEM_EID.type, tmp, 4);
  320. }
  321. static u8 *ism_get_system_eid(void)
  322. {
  323. return SYSTEM_EID.seid_string;
  324. }
  325. static u16 ism_get_chid(struct smcd_dev *smcd)
  326. {
  327. struct ism_dev *ism = (struct ism_dev *)smcd->priv;
  328. if (!ism || !ism->pdev)
  329. return 0;
  330. return to_zpci(ism->pdev)->pchid;
  331. }
  332. static void ism_handle_event(struct ism_dev *ism)
  333. {
  334. struct smcd_event *entry;
  335. while ((ism->ieq_idx + 1) != READ_ONCE(ism->ieq->header.idx)) {
  336. if (++(ism->ieq_idx) == ARRAY_SIZE(ism->ieq->entry))
  337. ism->ieq_idx = 0;
  338. entry = &ism->ieq->entry[ism->ieq_idx];
  339. debug_event(ism_debug_info, 2, entry, sizeof(*entry));
  340. smcd_handle_event(ism->smcd, entry);
  341. }
  342. }
  343. static irqreturn_t ism_handle_irq(int irq, void *data)
  344. {
  345. struct ism_dev *ism = data;
  346. unsigned long bit, end;
  347. unsigned long *bv;
  348. u16 dmbemask;
  349. bv = (void *) &ism->sba->dmb_bits[ISM_DMB_WORD_OFFSET];
  350. end = sizeof(ism->sba->dmb_bits) * BITS_PER_BYTE - ISM_DMB_BIT_OFFSET;
  351. spin_lock(&ism->lock);
  352. ism->sba->s = 0;
  353. barrier();
  354. for (bit = 0;;) {
  355. bit = find_next_bit_inv(bv, end, bit);
  356. if (bit >= end)
  357. break;
  358. clear_bit_inv(bit, bv);
  359. dmbemask = ism->sba->dmbe_mask[bit + ISM_DMB_BIT_OFFSET];
  360. ism->sba->dmbe_mask[bit + ISM_DMB_BIT_OFFSET] = 0;
  361. barrier();
  362. smcd_handle_irq(ism->smcd, bit + ISM_DMB_BIT_OFFSET, dmbemask);
  363. }
  364. if (ism->sba->e) {
  365. ism->sba->e = 0;
  366. barrier();
  367. ism_handle_event(ism);
  368. }
  369. spin_unlock(&ism->lock);
  370. return IRQ_HANDLED;
  371. }
  372. static const struct smcd_ops ism_ops = {
  373. .query_remote_gid = ism_query_rgid,
  374. .register_dmb = ism_register_dmb,
  375. .unregister_dmb = ism_unregister_dmb,
  376. .add_vlan_id = ism_add_vlan_id,
  377. .del_vlan_id = ism_del_vlan_id,
  378. .set_vlan_required = ism_set_vlan_required,
  379. .reset_vlan_required = ism_reset_vlan_required,
  380. .signal_event = ism_signal_ieq,
  381. .move_data = ism_move,
  382. .get_system_eid = ism_get_system_eid,
  383. .get_chid = ism_get_chid,
  384. };
  385. static int ism_dev_init(struct ism_dev *ism)
  386. {
  387. struct pci_dev *pdev = ism->pdev;
  388. int ret;
  389. ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSI);
  390. if (ret <= 0)
  391. goto out;
  392. ret = request_irq(pci_irq_vector(pdev, 0), ism_handle_irq, 0,
  393. pci_name(pdev), ism);
  394. if (ret)
  395. goto free_vectors;
  396. ret = register_sba(ism);
  397. if (ret)
  398. goto free_irq;
  399. ret = register_ieq(ism);
  400. if (ret)
  401. goto unreg_sba;
  402. ret = ism_read_local_gid(ism);
  403. if (ret)
  404. goto unreg_ieq;
  405. if (!ism_add_vlan_id(ism->smcd, ISM_RESERVED_VLANID))
  406. /* hardware is V2 capable */
  407. ism_create_system_eid();
  408. ret = smcd_register_dev(ism->smcd);
  409. if (ret)
  410. goto unreg_ieq;
  411. query_info(ism);
  412. return 0;
  413. unreg_ieq:
  414. unregister_ieq(ism);
  415. unreg_sba:
  416. unregister_sba(ism);
  417. free_irq:
  418. free_irq(pci_irq_vector(pdev, 0), ism);
  419. free_vectors:
  420. pci_free_irq_vectors(pdev);
  421. out:
  422. return ret;
  423. }
  424. static int ism_probe(struct pci_dev *pdev, const struct pci_device_id *id)
  425. {
  426. struct ism_dev *ism;
  427. int ret;
  428. ism = kzalloc(sizeof(*ism), GFP_KERNEL);
  429. if (!ism)
  430. return -ENOMEM;
  431. spin_lock_init(&ism->lock);
  432. dev_set_drvdata(&pdev->dev, ism);
  433. ism->pdev = pdev;
  434. ret = pci_enable_device_mem(pdev);
  435. if (ret)
  436. goto err;
  437. ret = pci_request_mem_regions(pdev, DRV_NAME);
  438. if (ret)
  439. goto err_disable;
  440. ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
  441. if (ret)
  442. goto err_resource;
  443. dma_set_seg_boundary(&pdev->dev, SZ_1M - 1);
  444. dma_set_max_seg_size(&pdev->dev, SZ_1M);
  445. pci_set_master(pdev);
  446. ism->smcd = smcd_alloc_dev(&pdev->dev, dev_name(&pdev->dev), &ism_ops,
  447. ISM_NR_DMBS);
  448. if (!ism->smcd) {
  449. ret = -ENOMEM;
  450. goto err_resource;
  451. }
  452. ism->smcd->priv = ism;
  453. ret = ism_dev_init(ism);
  454. if (ret)
  455. goto err_free;
  456. return 0;
  457. err_free:
  458. smcd_free_dev(ism->smcd);
  459. err_resource:
  460. pci_release_mem_regions(pdev);
  461. err_disable:
  462. pci_disable_device(pdev);
  463. err:
  464. kfree(ism);
  465. dev_set_drvdata(&pdev->dev, NULL);
  466. return ret;
  467. }
  468. static void ism_dev_exit(struct ism_dev *ism)
  469. {
  470. struct pci_dev *pdev = ism->pdev;
  471. smcd_unregister_dev(ism->smcd);
  472. if (SYSTEM_EID.serial_number[0] != '0' ||
  473. SYSTEM_EID.type[0] != '0')
  474. ism_del_vlan_id(ism->smcd, ISM_RESERVED_VLANID);
  475. unregister_ieq(ism);
  476. unregister_sba(ism);
  477. free_irq(pci_irq_vector(pdev, 0), ism);
  478. pci_free_irq_vectors(pdev);
  479. }
  480. static void ism_remove(struct pci_dev *pdev)
  481. {
  482. struct ism_dev *ism = dev_get_drvdata(&pdev->dev);
  483. ism_dev_exit(ism);
  484. smcd_free_dev(ism->smcd);
  485. pci_release_mem_regions(pdev);
  486. pci_disable_device(pdev);
  487. dev_set_drvdata(&pdev->dev, NULL);
  488. kfree(ism);
  489. }
  490. static struct pci_driver ism_driver = {
  491. .name = DRV_NAME,
  492. .id_table = ism_device_table,
  493. .probe = ism_probe,
  494. .remove = ism_remove,
  495. };
  496. static int __init ism_init(void)
  497. {
  498. int ret;
  499. ism_debug_info = debug_register("ism", 2, 1, 16);
  500. if (!ism_debug_info)
  501. return -ENODEV;
  502. debug_register_view(ism_debug_info, &debug_hex_ascii_view);
  503. ret = pci_register_driver(&ism_driver);
  504. if (ret)
  505. debug_unregister(ism_debug_info);
  506. return ret;
  507. }
  508. static void __exit ism_exit(void)
  509. {
  510. pci_unregister_driver(&ism_driver);
  511. debug_unregister(ism_debug_info);
  512. }
  513. module_init(ism_init);
  514. module_exit(ism_exit);