eni_vdpa.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * vDPA bridge driver for Alibaba ENI(Elastic Network Interface)
  4. *
  5. * Copyright (c) 2021, Alibaba Inc. All rights reserved.
  6. * Author: Wu Zongyong <[email protected]>
  7. *
  8. */
  9. #include "linux/bits.h"
  10. #include <linux/interrupt.h>
  11. #include <linux/module.h>
  12. #include <linux/pci.h>
  13. #include <linux/vdpa.h>
  14. #include <linux/virtio.h>
  15. #include <linux/virtio_config.h>
  16. #include <linux/virtio_ring.h>
  17. #include <linux/virtio_pci.h>
  18. #include <linux/virtio_pci_legacy.h>
  19. #include <uapi/linux/virtio_net.h>
  20. #define ENI_MSIX_NAME_SIZE 256
  21. #define ENI_ERR(pdev, fmt, ...) \
  22. dev_err(&pdev->dev, "%s"fmt, "eni_vdpa: ", ##__VA_ARGS__)
  23. #define ENI_DBG(pdev, fmt, ...) \
  24. dev_dbg(&pdev->dev, "%s"fmt, "eni_vdpa: ", ##__VA_ARGS__)
  25. #define ENI_INFO(pdev, fmt, ...) \
  26. dev_info(&pdev->dev, "%s"fmt, "eni_vdpa: ", ##__VA_ARGS__)
  27. struct eni_vring {
  28. void __iomem *notify;
  29. char msix_name[ENI_MSIX_NAME_SIZE];
  30. struct vdpa_callback cb;
  31. int irq;
  32. };
  33. struct eni_vdpa {
  34. struct vdpa_device vdpa;
  35. struct virtio_pci_legacy_device ldev;
  36. struct eni_vring *vring;
  37. struct vdpa_callback config_cb;
  38. char msix_name[ENI_MSIX_NAME_SIZE];
  39. int config_irq;
  40. int queues;
  41. int vectors;
  42. };
  43. static struct eni_vdpa *vdpa_to_eni(struct vdpa_device *vdpa)
  44. {
  45. return container_of(vdpa, struct eni_vdpa, vdpa);
  46. }
  47. static struct virtio_pci_legacy_device *vdpa_to_ldev(struct vdpa_device *vdpa)
  48. {
  49. struct eni_vdpa *eni_vdpa = vdpa_to_eni(vdpa);
  50. return &eni_vdpa->ldev;
  51. }
  52. static u64 eni_vdpa_get_device_features(struct vdpa_device *vdpa)
  53. {
  54. struct virtio_pci_legacy_device *ldev = vdpa_to_ldev(vdpa);
  55. u64 features = vp_legacy_get_features(ldev);
  56. features |= BIT_ULL(VIRTIO_F_ACCESS_PLATFORM);
  57. features |= BIT_ULL(VIRTIO_F_ORDER_PLATFORM);
  58. return features;
  59. }
  60. static int eni_vdpa_set_driver_features(struct vdpa_device *vdpa, u64 features)
  61. {
  62. struct virtio_pci_legacy_device *ldev = vdpa_to_ldev(vdpa);
  63. if (!(features & BIT_ULL(VIRTIO_NET_F_MRG_RXBUF)) && features) {
  64. ENI_ERR(ldev->pci_dev,
  65. "VIRTIO_NET_F_MRG_RXBUF is not negotiated\n");
  66. return -EINVAL;
  67. }
  68. vp_legacy_set_features(ldev, (u32)features);
  69. return 0;
  70. }
  71. static u64 eni_vdpa_get_driver_features(struct vdpa_device *vdpa)
  72. {
  73. struct virtio_pci_legacy_device *ldev = vdpa_to_ldev(vdpa);
  74. return vp_legacy_get_driver_features(ldev);
  75. }
  76. static u8 eni_vdpa_get_status(struct vdpa_device *vdpa)
  77. {
  78. struct virtio_pci_legacy_device *ldev = vdpa_to_ldev(vdpa);
  79. return vp_legacy_get_status(ldev);
  80. }
  81. static int eni_vdpa_get_vq_irq(struct vdpa_device *vdpa, u16 idx)
  82. {
  83. struct eni_vdpa *eni_vdpa = vdpa_to_eni(vdpa);
  84. int irq = eni_vdpa->vring[idx].irq;
  85. if (irq == VIRTIO_MSI_NO_VECTOR)
  86. return -EINVAL;
  87. return irq;
  88. }
  89. static void eni_vdpa_free_irq(struct eni_vdpa *eni_vdpa)
  90. {
  91. struct virtio_pci_legacy_device *ldev = &eni_vdpa->ldev;
  92. struct pci_dev *pdev = ldev->pci_dev;
  93. int i;
  94. for (i = 0; i < eni_vdpa->queues; i++) {
  95. if (eni_vdpa->vring[i].irq != VIRTIO_MSI_NO_VECTOR) {
  96. vp_legacy_queue_vector(ldev, i, VIRTIO_MSI_NO_VECTOR);
  97. devm_free_irq(&pdev->dev, eni_vdpa->vring[i].irq,
  98. &eni_vdpa->vring[i]);
  99. eni_vdpa->vring[i].irq = VIRTIO_MSI_NO_VECTOR;
  100. }
  101. }
  102. if (eni_vdpa->config_irq != VIRTIO_MSI_NO_VECTOR) {
  103. vp_legacy_config_vector(ldev, VIRTIO_MSI_NO_VECTOR);
  104. devm_free_irq(&pdev->dev, eni_vdpa->config_irq, eni_vdpa);
  105. eni_vdpa->config_irq = VIRTIO_MSI_NO_VECTOR;
  106. }
  107. if (eni_vdpa->vectors) {
  108. pci_free_irq_vectors(pdev);
  109. eni_vdpa->vectors = 0;
  110. }
  111. }
  112. static irqreturn_t eni_vdpa_vq_handler(int irq, void *arg)
  113. {
  114. struct eni_vring *vring = arg;
  115. if (vring->cb.callback)
  116. return vring->cb.callback(vring->cb.private);
  117. return IRQ_HANDLED;
  118. }
  119. static irqreturn_t eni_vdpa_config_handler(int irq, void *arg)
  120. {
  121. struct eni_vdpa *eni_vdpa = arg;
  122. if (eni_vdpa->config_cb.callback)
  123. return eni_vdpa->config_cb.callback(eni_vdpa->config_cb.private);
  124. return IRQ_HANDLED;
  125. }
  126. static int eni_vdpa_request_irq(struct eni_vdpa *eni_vdpa)
  127. {
  128. struct virtio_pci_legacy_device *ldev = &eni_vdpa->ldev;
  129. struct pci_dev *pdev = ldev->pci_dev;
  130. int i, ret, irq;
  131. int queues = eni_vdpa->queues;
  132. int vectors = queues + 1;
  133. ret = pci_alloc_irq_vectors(pdev, vectors, vectors, PCI_IRQ_MSIX);
  134. if (ret != vectors) {
  135. ENI_ERR(pdev,
  136. "failed to allocate irq vectors want %d but %d\n",
  137. vectors, ret);
  138. return ret;
  139. }
  140. eni_vdpa->vectors = vectors;
  141. for (i = 0; i < queues; i++) {
  142. snprintf(eni_vdpa->vring[i].msix_name, ENI_MSIX_NAME_SIZE,
  143. "eni-vdpa[%s]-%d\n", pci_name(pdev), i);
  144. irq = pci_irq_vector(pdev, i);
  145. ret = devm_request_irq(&pdev->dev, irq,
  146. eni_vdpa_vq_handler,
  147. 0, eni_vdpa->vring[i].msix_name,
  148. &eni_vdpa->vring[i]);
  149. if (ret) {
  150. ENI_ERR(pdev, "failed to request irq for vq %d\n", i);
  151. goto err;
  152. }
  153. vp_legacy_queue_vector(ldev, i, i);
  154. eni_vdpa->vring[i].irq = irq;
  155. }
  156. snprintf(eni_vdpa->msix_name, ENI_MSIX_NAME_SIZE, "eni-vdpa[%s]-config\n",
  157. pci_name(pdev));
  158. irq = pci_irq_vector(pdev, queues);
  159. ret = devm_request_irq(&pdev->dev, irq, eni_vdpa_config_handler, 0,
  160. eni_vdpa->msix_name, eni_vdpa);
  161. if (ret) {
  162. ENI_ERR(pdev, "failed to request irq for config vq %d\n", i);
  163. goto err;
  164. }
  165. vp_legacy_config_vector(ldev, queues);
  166. eni_vdpa->config_irq = irq;
  167. return 0;
  168. err:
  169. eni_vdpa_free_irq(eni_vdpa);
  170. return ret;
  171. }
  172. static void eni_vdpa_set_status(struct vdpa_device *vdpa, u8 status)
  173. {
  174. struct eni_vdpa *eni_vdpa = vdpa_to_eni(vdpa);
  175. struct virtio_pci_legacy_device *ldev = &eni_vdpa->ldev;
  176. u8 s = eni_vdpa_get_status(vdpa);
  177. if (status & VIRTIO_CONFIG_S_DRIVER_OK &&
  178. !(s & VIRTIO_CONFIG_S_DRIVER_OK)) {
  179. eni_vdpa_request_irq(eni_vdpa);
  180. }
  181. vp_legacy_set_status(ldev, status);
  182. if (!(status & VIRTIO_CONFIG_S_DRIVER_OK) &&
  183. (s & VIRTIO_CONFIG_S_DRIVER_OK))
  184. eni_vdpa_free_irq(eni_vdpa);
  185. }
  186. static int eni_vdpa_reset(struct vdpa_device *vdpa)
  187. {
  188. struct eni_vdpa *eni_vdpa = vdpa_to_eni(vdpa);
  189. struct virtio_pci_legacy_device *ldev = &eni_vdpa->ldev;
  190. u8 s = eni_vdpa_get_status(vdpa);
  191. vp_legacy_set_status(ldev, 0);
  192. if (s & VIRTIO_CONFIG_S_DRIVER_OK)
  193. eni_vdpa_free_irq(eni_vdpa);
  194. return 0;
  195. }
  196. static u16 eni_vdpa_get_vq_num_max(struct vdpa_device *vdpa)
  197. {
  198. struct virtio_pci_legacy_device *ldev = vdpa_to_ldev(vdpa);
  199. return vp_legacy_get_queue_size(ldev, 0);
  200. }
  201. static u16 eni_vdpa_get_vq_num_min(struct vdpa_device *vdpa)
  202. {
  203. struct virtio_pci_legacy_device *ldev = vdpa_to_ldev(vdpa);
  204. return vp_legacy_get_queue_size(ldev, 0);
  205. }
  206. static int eni_vdpa_get_vq_state(struct vdpa_device *vdpa, u16 qid,
  207. struct vdpa_vq_state *state)
  208. {
  209. return -EOPNOTSUPP;
  210. }
  211. static int eni_vdpa_set_vq_state(struct vdpa_device *vdpa, u16 qid,
  212. const struct vdpa_vq_state *state)
  213. {
  214. struct virtio_pci_legacy_device *ldev = vdpa_to_ldev(vdpa);
  215. const struct vdpa_vq_state_split *split = &state->split;
  216. /* ENI is build upon virtio-pci specfication which not support
  217. * to set state of virtqueue. But if the state is equal to the
  218. * device initial state by chance, we can let it go.
  219. */
  220. if (!vp_legacy_get_queue_enable(ldev, qid)
  221. && split->avail_index == 0)
  222. return 0;
  223. return -EOPNOTSUPP;
  224. }
  225. static void eni_vdpa_set_vq_cb(struct vdpa_device *vdpa, u16 qid,
  226. struct vdpa_callback *cb)
  227. {
  228. struct eni_vdpa *eni_vdpa = vdpa_to_eni(vdpa);
  229. eni_vdpa->vring[qid].cb = *cb;
  230. }
  231. static void eni_vdpa_set_vq_ready(struct vdpa_device *vdpa, u16 qid,
  232. bool ready)
  233. {
  234. struct virtio_pci_legacy_device *ldev = vdpa_to_ldev(vdpa);
  235. /* ENI is a legacy virtio-pci device. This is not supported
  236. * by specification. But we can disable virtqueue by setting
  237. * address to 0.
  238. */
  239. if (!ready)
  240. vp_legacy_set_queue_address(ldev, qid, 0);
  241. }
  242. static bool eni_vdpa_get_vq_ready(struct vdpa_device *vdpa, u16 qid)
  243. {
  244. struct virtio_pci_legacy_device *ldev = vdpa_to_ldev(vdpa);
  245. return vp_legacy_get_queue_enable(ldev, qid);
  246. }
  247. static void eni_vdpa_set_vq_num(struct vdpa_device *vdpa, u16 qid,
  248. u32 num)
  249. {
  250. struct virtio_pci_legacy_device *ldev = vdpa_to_ldev(vdpa);
  251. struct pci_dev *pdev = ldev->pci_dev;
  252. u16 n = vp_legacy_get_queue_size(ldev, qid);
  253. /* ENI is a legacy virtio-pci device which not allow to change
  254. * virtqueue size. Just report a error if someone tries to
  255. * change it.
  256. */
  257. if (num != n)
  258. ENI_ERR(pdev,
  259. "not support to set vq %u fixed num %u to %u\n",
  260. qid, n, num);
  261. }
  262. static int eni_vdpa_set_vq_address(struct vdpa_device *vdpa, u16 qid,
  263. u64 desc_area, u64 driver_area,
  264. u64 device_area)
  265. {
  266. struct virtio_pci_legacy_device *ldev = vdpa_to_ldev(vdpa);
  267. u32 pfn = desc_area >> VIRTIO_PCI_QUEUE_ADDR_SHIFT;
  268. vp_legacy_set_queue_address(ldev, qid, pfn);
  269. return 0;
  270. }
  271. static void eni_vdpa_kick_vq(struct vdpa_device *vdpa, u16 qid)
  272. {
  273. struct eni_vdpa *eni_vdpa = vdpa_to_eni(vdpa);
  274. iowrite16(qid, eni_vdpa->vring[qid].notify);
  275. }
  276. static u32 eni_vdpa_get_device_id(struct vdpa_device *vdpa)
  277. {
  278. struct virtio_pci_legacy_device *ldev = vdpa_to_ldev(vdpa);
  279. return ldev->id.device;
  280. }
  281. static u32 eni_vdpa_get_vendor_id(struct vdpa_device *vdpa)
  282. {
  283. struct virtio_pci_legacy_device *ldev = vdpa_to_ldev(vdpa);
  284. return ldev->id.vendor;
  285. }
  286. static u32 eni_vdpa_get_vq_align(struct vdpa_device *vdpa)
  287. {
  288. return VIRTIO_PCI_VRING_ALIGN;
  289. }
  290. static size_t eni_vdpa_get_config_size(struct vdpa_device *vdpa)
  291. {
  292. return sizeof(struct virtio_net_config);
  293. }
  294. static void eni_vdpa_get_config(struct vdpa_device *vdpa,
  295. unsigned int offset,
  296. void *buf, unsigned int len)
  297. {
  298. struct eni_vdpa *eni_vdpa = vdpa_to_eni(vdpa);
  299. struct virtio_pci_legacy_device *ldev = &eni_vdpa->ldev;
  300. void __iomem *ioaddr = ldev->ioaddr +
  301. VIRTIO_PCI_CONFIG_OFF(eni_vdpa->vectors) +
  302. offset;
  303. u8 *p = buf;
  304. int i;
  305. for (i = 0; i < len; i++)
  306. *p++ = ioread8(ioaddr + i);
  307. }
  308. static void eni_vdpa_set_config(struct vdpa_device *vdpa,
  309. unsigned int offset, const void *buf,
  310. unsigned int len)
  311. {
  312. struct eni_vdpa *eni_vdpa = vdpa_to_eni(vdpa);
  313. struct virtio_pci_legacy_device *ldev = &eni_vdpa->ldev;
  314. void __iomem *ioaddr = ldev->ioaddr +
  315. VIRTIO_PCI_CONFIG_OFF(eni_vdpa->vectors) +
  316. offset;
  317. const u8 *p = buf;
  318. int i;
  319. for (i = 0; i < len; i++)
  320. iowrite8(*p++, ioaddr + i);
  321. }
  322. static void eni_vdpa_set_config_cb(struct vdpa_device *vdpa,
  323. struct vdpa_callback *cb)
  324. {
  325. struct eni_vdpa *eni_vdpa = vdpa_to_eni(vdpa);
  326. eni_vdpa->config_cb = *cb;
  327. }
  328. static const struct vdpa_config_ops eni_vdpa_ops = {
  329. .get_device_features = eni_vdpa_get_device_features,
  330. .set_driver_features = eni_vdpa_set_driver_features,
  331. .get_driver_features = eni_vdpa_get_driver_features,
  332. .get_status = eni_vdpa_get_status,
  333. .set_status = eni_vdpa_set_status,
  334. .reset = eni_vdpa_reset,
  335. .get_vq_num_max = eni_vdpa_get_vq_num_max,
  336. .get_vq_num_min = eni_vdpa_get_vq_num_min,
  337. .get_vq_state = eni_vdpa_get_vq_state,
  338. .set_vq_state = eni_vdpa_set_vq_state,
  339. .set_vq_cb = eni_vdpa_set_vq_cb,
  340. .set_vq_ready = eni_vdpa_set_vq_ready,
  341. .get_vq_ready = eni_vdpa_get_vq_ready,
  342. .set_vq_num = eni_vdpa_set_vq_num,
  343. .set_vq_address = eni_vdpa_set_vq_address,
  344. .kick_vq = eni_vdpa_kick_vq,
  345. .get_device_id = eni_vdpa_get_device_id,
  346. .get_vendor_id = eni_vdpa_get_vendor_id,
  347. .get_vq_align = eni_vdpa_get_vq_align,
  348. .get_config_size = eni_vdpa_get_config_size,
  349. .get_config = eni_vdpa_get_config,
  350. .set_config = eni_vdpa_set_config,
  351. .set_config_cb = eni_vdpa_set_config_cb,
  352. .get_vq_irq = eni_vdpa_get_vq_irq,
  353. };
  354. static u16 eni_vdpa_get_num_queues(struct eni_vdpa *eni_vdpa)
  355. {
  356. struct virtio_pci_legacy_device *ldev = &eni_vdpa->ldev;
  357. u32 features = vp_legacy_get_features(ldev);
  358. u16 num = 2;
  359. if (features & BIT_ULL(VIRTIO_NET_F_MQ)) {
  360. __virtio16 max_virtqueue_pairs;
  361. eni_vdpa_get_config(&eni_vdpa->vdpa,
  362. offsetof(struct virtio_net_config, max_virtqueue_pairs),
  363. &max_virtqueue_pairs,
  364. sizeof(max_virtqueue_pairs));
  365. num = 2 * __virtio16_to_cpu(virtio_legacy_is_little_endian(),
  366. max_virtqueue_pairs);
  367. }
  368. if (features & BIT_ULL(VIRTIO_NET_F_CTRL_VQ))
  369. num += 1;
  370. return num;
  371. }
  372. static int eni_vdpa_probe(struct pci_dev *pdev, const struct pci_device_id *id)
  373. {
  374. struct device *dev = &pdev->dev;
  375. struct eni_vdpa *eni_vdpa;
  376. struct virtio_pci_legacy_device *ldev;
  377. int ret, i;
  378. ret = pcim_enable_device(pdev);
  379. if (ret)
  380. return ret;
  381. eni_vdpa = vdpa_alloc_device(struct eni_vdpa, vdpa,
  382. dev, &eni_vdpa_ops, 1, 1, NULL, false);
  383. if (IS_ERR(eni_vdpa)) {
  384. ENI_ERR(pdev, "failed to allocate vDPA structure\n");
  385. return PTR_ERR(eni_vdpa);
  386. }
  387. ldev = &eni_vdpa->ldev;
  388. ldev->pci_dev = pdev;
  389. ret = vp_legacy_probe(ldev);
  390. if (ret) {
  391. ENI_ERR(pdev, "failed to probe legacy PCI device\n");
  392. goto err;
  393. }
  394. pci_set_master(pdev);
  395. pci_set_drvdata(pdev, eni_vdpa);
  396. eni_vdpa->vdpa.dma_dev = &pdev->dev;
  397. eni_vdpa->queues = eni_vdpa_get_num_queues(eni_vdpa);
  398. eni_vdpa->vring = devm_kcalloc(&pdev->dev, eni_vdpa->queues,
  399. sizeof(*eni_vdpa->vring),
  400. GFP_KERNEL);
  401. if (!eni_vdpa->vring) {
  402. ret = -ENOMEM;
  403. ENI_ERR(pdev, "failed to allocate virtqueues\n");
  404. goto err;
  405. }
  406. for (i = 0; i < eni_vdpa->queues; i++) {
  407. eni_vdpa->vring[i].irq = VIRTIO_MSI_NO_VECTOR;
  408. eni_vdpa->vring[i].notify = ldev->ioaddr + VIRTIO_PCI_QUEUE_NOTIFY;
  409. }
  410. eni_vdpa->config_irq = VIRTIO_MSI_NO_VECTOR;
  411. ret = vdpa_register_device(&eni_vdpa->vdpa, eni_vdpa->queues);
  412. if (ret) {
  413. ENI_ERR(pdev, "failed to register to vdpa bus\n");
  414. goto err;
  415. }
  416. return 0;
  417. err:
  418. put_device(&eni_vdpa->vdpa.dev);
  419. return ret;
  420. }
  421. static void eni_vdpa_remove(struct pci_dev *pdev)
  422. {
  423. struct eni_vdpa *eni_vdpa = pci_get_drvdata(pdev);
  424. vdpa_unregister_device(&eni_vdpa->vdpa);
  425. vp_legacy_remove(&eni_vdpa->ldev);
  426. }
  427. static struct pci_device_id eni_pci_ids[] = {
  428. { PCI_DEVICE_SUB(PCI_VENDOR_ID_REDHAT_QUMRANET,
  429. VIRTIO_TRANS_ID_NET,
  430. PCI_SUBVENDOR_ID_REDHAT_QUMRANET,
  431. VIRTIO_ID_NET) },
  432. { 0 },
  433. };
  434. static struct pci_driver eni_vdpa_driver = {
  435. .name = "alibaba-eni-vdpa",
  436. .id_table = eni_pci_ids,
  437. .probe = eni_vdpa_probe,
  438. .remove = eni_vdpa_remove,
  439. };
  440. module_pci_driver(eni_vdpa_driver);
  441. MODULE_AUTHOR("Wu Zongyong <[email protected]>");
  442. MODULE_DESCRIPTION("Alibaba ENI vDPA driver");
  443. MODULE_LICENSE("GPL v2");