ifcvf_main.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Intel IFC VF NIC driver for virtio dataplane offloading
  4. *
  5. * Copyright (C) 2020 Intel Corporation.
  6. *
  7. * Author: Zhu Lingshan <[email protected]>
  8. *
  9. */
  10. #include <linux/interrupt.h>
  11. #include <linux/module.h>
  12. #include <linux/pci.h>
  13. #include <linux/sysfs.h>
  14. #include "ifcvf_base.h"
  15. #define DRIVER_AUTHOR "Intel Corporation"
  16. #define IFCVF_DRIVER_NAME "ifcvf"
  17. static irqreturn_t ifcvf_config_changed(int irq, void *arg)
  18. {
  19. struct ifcvf_hw *vf = arg;
  20. if (vf->config_cb.callback)
  21. return vf->config_cb.callback(vf->config_cb.private);
  22. return IRQ_HANDLED;
  23. }
  24. static irqreturn_t ifcvf_vq_intr_handler(int irq, void *arg)
  25. {
  26. struct vring_info *vring = arg;
  27. if (vring->cb.callback)
  28. return vring->cb.callback(vring->cb.private);
  29. return IRQ_HANDLED;
  30. }
  31. static irqreturn_t ifcvf_vqs_reused_intr_handler(int irq, void *arg)
  32. {
  33. struct ifcvf_hw *vf = arg;
  34. struct vring_info *vring;
  35. int i;
  36. for (i = 0; i < vf->nr_vring; i++) {
  37. vring = &vf->vring[i];
  38. if (vring->cb.callback)
  39. vring->cb.callback(vring->cb.private);
  40. }
  41. return IRQ_HANDLED;
  42. }
  43. static irqreturn_t ifcvf_dev_intr_handler(int irq, void *arg)
  44. {
  45. struct ifcvf_hw *vf = arg;
  46. u8 isr;
  47. isr = vp_ioread8(vf->isr);
  48. if (isr & VIRTIO_PCI_ISR_CONFIG)
  49. ifcvf_config_changed(irq, arg);
  50. return ifcvf_vqs_reused_intr_handler(irq, arg);
  51. }
  52. static void ifcvf_free_irq_vectors(void *data)
  53. {
  54. pci_free_irq_vectors(data);
  55. }
  56. static void ifcvf_free_per_vq_irq(struct ifcvf_hw *vf)
  57. {
  58. struct pci_dev *pdev = vf->pdev;
  59. int i;
  60. for (i = 0; i < vf->nr_vring; i++) {
  61. if (vf->vring[i].irq != -EINVAL) {
  62. devm_free_irq(&pdev->dev, vf->vring[i].irq, &vf->vring[i]);
  63. vf->vring[i].irq = -EINVAL;
  64. }
  65. }
  66. }
  67. static void ifcvf_free_vqs_reused_irq(struct ifcvf_hw *vf)
  68. {
  69. struct pci_dev *pdev = vf->pdev;
  70. if (vf->vqs_reused_irq != -EINVAL) {
  71. devm_free_irq(&pdev->dev, vf->vqs_reused_irq, vf);
  72. vf->vqs_reused_irq = -EINVAL;
  73. }
  74. }
  75. static void ifcvf_free_vq_irq(struct ifcvf_hw *vf)
  76. {
  77. if (vf->msix_vector_status == MSIX_VECTOR_PER_VQ_AND_CONFIG)
  78. ifcvf_free_per_vq_irq(vf);
  79. else
  80. ifcvf_free_vqs_reused_irq(vf);
  81. }
  82. static void ifcvf_free_config_irq(struct ifcvf_hw *vf)
  83. {
  84. struct pci_dev *pdev = vf->pdev;
  85. if (vf->config_irq == -EINVAL)
  86. return;
  87. /* If the irq is shared by all vqs and the config interrupt,
  88. * it is already freed in ifcvf_free_vq_irq, so here only
  89. * need to free config irq when msix_vector_status != MSIX_VECTOR_DEV_SHARED
  90. */
  91. if (vf->msix_vector_status != MSIX_VECTOR_DEV_SHARED) {
  92. devm_free_irq(&pdev->dev, vf->config_irq, vf);
  93. vf->config_irq = -EINVAL;
  94. }
  95. }
  96. static void ifcvf_free_irq(struct ifcvf_hw *vf)
  97. {
  98. struct pci_dev *pdev = vf->pdev;
  99. ifcvf_free_vq_irq(vf);
  100. ifcvf_free_config_irq(vf);
  101. ifcvf_free_irq_vectors(pdev);
  102. }
  103. /* ifcvf MSIX vectors allocator, this helper tries to allocate
  104. * vectors for all virtqueues and the config interrupt.
  105. * It returns the number of allocated vectors, negative
  106. * return value when fails.
  107. */
  108. static int ifcvf_alloc_vectors(struct ifcvf_hw *vf)
  109. {
  110. struct pci_dev *pdev = vf->pdev;
  111. int max_intr, ret;
  112. /* all queues and config interrupt */
  113. max_intr = vf->nr_vring + 1;
  114. ret = pci_alloc_irq_vectors(pdev, 1, max_intr, PCI_IRQ_MSIX | PCI_IRQ_AFFINITY);
  115. if (ret < 0) {
  116. IFCVF_ERR(pdev, "Failed to alloc IRQ vectors\n");
  117. return ret;
  118. }
  119. if (ret < max_intr)
  120. IFCVF_INFO(pdev,
  121. "Requested %u vectors, however only %u allocated, lower performance\n",
  122. max_intr, ret);
  123. return ret;
  124. }
  125. static int ifcvf_request_per_vq_irq(struct ifcvf_hw *vf)
  126. {
  127. struct pci_dev *pdev = vf->pdev;
  128. int i, vector, ret, irq;
  129. vf->vqs_reused_irq = -EINVAL;
  130. for (i = 0; i < vf->nr_vring; i++) {
  131. snprintf(vf->vring[i].msix_name, 256, "ifcvf[%s]-%d\n", pci_name(pdev), i);
  132. vector = i;
  133. irq = pci_irq_vector(pdev, vector);
  134. ret = devm_request_irq(&pdev->dev, irq,
  135. ifcvf_vq_intr_handler, 0,
  136. vf->vring[i].msix_name,
  137. &vf->vring[i]);
  138. if (ret) {
  139. IFCVF_ERR(pdev, "Failed to request irq for vq %d\n", i);
  140. goto err;
  141. }
  142. vf->vring[i].irq = irq;
  143. ret = ifcvf_set_vq_vector(vf, i, vector);
  144. if (ret == VIRTIO_MSI_NO_VECTOR) {
  145. IFCVF_ERR(pdev, "No msix vector for vq %u\n", i);
  146. goto err;
  147. }
  148. }
  149. return 0;
  150. err:
  151. ifcvf_free_irq(vf);
  152. return -EFAULT;
  153. }
  154. static int ifcvf_request_vqs_reused_irq(struct ifcvf_hw *vf)
  155. {
  156. struct pci_dev *pdev = vf->pdev;
  157. int i, vector, ret, irq;
  158. vector = 0;
  159. snprintf(vf->vring[0].msix_name, 256, "ifcvf[%s]-vqs-reused-irq\n", pci_name(pdev));
  160. irq = pci_irq_vector(pdev, vector);
  161. ret = devm_request_irq(&pdev->dev, irq,
  162. ifcvf_vqs_reused_intr_handler, 0,
  163. vf->vring[0].msix_name, vf);
  164. if (ret) {
  165. IFCVF_ERR(pdev, "Failed to request reused irq for the device\n");
  166. goto err;
  167. }
  168. vf->vqs_reused_irq = irq;
  169. for (i = 0; i < vf->nr_vring; i++) {
  170. vf->vring[i].irq = -EINVAL;
  171. ret = ifcvf_set_vq_vector(vf, i, vector);
  172. if (ret == VIRTIO_MSI_NO_VECTOR) {
  173. IFCVF_ERR(pdev, "No msix vector for vq %u\n", i);
  174. goto err;
  175. }
  176. }
  177. return 0;
  178. err:
  179. ifcvf_free_irq(vf);
  180. return -EFAULT;
  181. }
  182. static int ifcvf_request_dev_irq(struct ifcvf_hw *vf)
  183. {
  184. struct pci_dev *pdev = vf->pdev;
  185. int i, vector, ret, irq;
  186. vector = 0;
  187. snprintf(vf->vring[0].msix_name, 256, "ifcvf[%s]-dev-irq\n", pci_name(pdev));
  188. irq = pci_irq_vector(pdev, vector);
  189. ret = devm_request_irq(&pdev->dev, irq,
  190. ifcvf_dev_intr_handler, 0,
  191. vf->vring[0].msix_name, vf);
  192. if (ret) {
  193. IFCVF_ERR(pdev, "Failed to request irq for the device\n");
  194. goto err;
  195. }
  196. vf->vqs_reused_irq = irq;
  197. for (i = 0; i < vf->nr_vring; i++) {
  198. vf->vring[i].irq = -EINVAL;
  199. ret = ifcvf_set_vq_vector(vf, i, vector);
  200. if (ret == VIRTIO_MSI_NO_VECTOR) {
  201. IFCVF_ERR(pdev, "No msix vector for vq %u\n", i);
  202. goto err;
  203. }
  204. }
  205. vf->config_irq = irq;
  206. ret = ifcvf_set_config_vector(vf, vector);
  207. if (ret == VIRTIO_MSI_NO_VECTOR) {
  208. IFCVF_ERR(pdev, "No msix vector for device config\n");
  209. goto err;
  210. }
  211. return 0;
  212. err:
  213. ifcvf_free_irq(vf);
  214. return -EFAULT;
  215. }
  216. static int ifcvf_request_vq_irq(struct ifcvf_hw *vf)
  217. {
  218. int ret;
  219. if (vf->msix_vector_status == MSIX_VECTOR_PER_VQ_AND_CONFIG)
  220. ret = ifcvf_request_per_vq_irq(vf);
  221. else
  222. ret = ifcvf_request_vqs_reused_irq(vf);
  223. return ret;
  224. }
  225. static int ifcvf_request_config_irq(struct ifcvf_hw *vf)
  226. {
  227. struct pci_dev *pdev = vf->pdev;
  228. int config_vector, ret;
  229. if (vf->msix_vector_status == MSIX_VECTOR_PER_VQ_AND_CONFIG)
  230. config_vector = vf->nr_vring;
  231. else if (vf->msix_vector_status == MSIX_VECTOR_SHARED_VQ_AND_CONFIG)
  232. /* vector 0 for vqs and 1 for config interrupt */
  233. config_vector = 1;
  234. else if (vf->msix_vector_status == MSIX_VECTOR_DEV_SHARED)
  235. /* re-use the vqs vector */
  236. return 0;
  237. else
  238. return -EINVAL;
  239. snprintf(vf->config_msix_name, 256, "ifcvf[%s]-config\n",
  240. pci_name(pdev));
  241. vf->config_irq = pci_irq_vector(pdev, config_vector);
  242. ret = devm_request_irq(&pdev->dev, vf->config_irq,
  243. ifcvf_config_changed, 0,
  244. vf->config_msix_name, vf);
  245. if (ret) {
  246. IFCVF_ERR(pdev, "Failed to request config irq\n");
  247. goto err;
  248. }
  249. ret = ifcvf_set_config_vector(vf, config_vector);
  250. if (ret == VIRTIO_MSI_NO_VECTOR) {
  251. IFCVF_ERR(pdev, "No msix vector for device config\n");
  252. goto err;
  253. }
  254. return 0;
  255. err:
  256. ifcvf_free_irq(vf);
  257. return -EFAULT;
  258. }
  259. static int ifcvf_request_irq(struct ifcvf_hw *vf)
  260. {
  261. int nvectors, ret, max_intr;
  262. nvectors = ifcvf_alloc_vectors(vf);
  263. if (nvectors <= 0)
  264. return -EFAULT;
  265. vf->msix_vector_status = MSIX_VECTOR_PER_VQ_AND_CONFIG;
  266. max_intr = vf->nr_vring + 1;
  267. if (nvectors < max_intr)
  268. vf->msix_vector_status = MSIX_VECTOR_SHARED_VQ_AND_CONFIG;
  269. if (nvectors == 1) {
  270. vf->msix_vector_status = MSIX_VECTOR_DEV_SHARED;
  271. ret = ifcvf_request_dev_irq(vf);
  272. return ret;
  273. }
  274. ret = ifcvf_request_vq_irq(vf);
  275. if (ret)
  276. return ret;
  277. ret = ifcvf_request_config_irq(vf);
  278. if (ret)
  279. return ret;
  280. return 0;
  281. }
  282. static int ifcvf_start_datapath(void *private)
  283. {
  284. struct ifcvf_hw *vf = ifcvf_private_to_vf(private);
  285. u8 status;
  286. int ret;
  287. ret = ifcvf_start_hw(vf);
  288. if (ret < 0) {
  289. status = ifcvf_get_status(vf);
  290. status |= VIRTIO_CONFIG_S_FAILED;
  291. ifcvf_set_status(vf, status);
  292. }
  293. return ret;
  294. }
  295. static int ifcvf_stop_datapath(void *private)
  296. {
  297. struct ifcvf_hw *vf = ifcvf_private_to_vf(private);
  298. int i;
  299. for (i = 0; i < vf->nr_vring; i++)
  300. vf->vring[i].cb.callback = NULL;
  301. ifcvf_stop_hw(vf);
  302. return 0;
  303. }
  304. static void ifcvf_reset_vring(struct ifcvf_adapter *adapter)
  305. {
  306. struct ifcvf_hw *vf = ifcvf_private_to_vf(adapter);
  307. int i;
  308. for (i = 0; i < vf->nr_vring; i++) {
  309. vf->vring[i].last_avail_idx = 0;
  310. vf->vring[i].desc = 0;
  311. vf->vring[i].avail = 0;
  312. vf->vring[i].used = 0;
  313. vf->vring[i].ready = 0;
  314. vf->vring[i].cb.callback = NULL;
  315. vf->vring[i].cb.private = NULL;
  316. }
  317. ifcvf_reset(vf);
  318. }
  319. static struct ifcvf_adapter *vdpa_to_adapter(struct vdpa_device *vdpa_dev)
  320. {
  321. return container_of(vdpa_dev, struct ifcvf_adapter, vdpa);
  322. }
  323. static struct ifcvf_hw *vdpa_to_vf(struct vdpa_device *vdpa_dev)
  324. {
  325. struct ifcvf_adapter *adapter = vdpa_to_adapter(vdpa_dev);
  326. return adapter->vf;
  327. }
  328. static u64 ifcvf_vdpa_get_device_features(struct vdpa_device *vdpa_dev)
  329. {
  330. struct ifcvf_adapter *adapter = vdpa_to_adapter(vdpa_dev);
  331. struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
  332. struct pci_dev *pdev = adapter->pdev;
  333. u32 type = vf->dev_type;
  334. u64 features;
  335. if (type == VIRTIO_ID_NET || type == VIRTIO_ID_BLOCK)
  336. features = ifcvf_get_features(vf);
  337. else {
  338. features = 0;
  339. IFCVF_ERR(pdev, "VIRTIO ID %u not supported\n", vf->dev_type);
  340. }
  341. return features;
  342. }
  343. static int ifcvf_vdpa_set_driver_features(struct vdpa_device *vdpa_dev, u64 features)
  344. {
  345. struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
  346. int ret;
  347. ret = ifcvf_verify_min_features(vf, features);
  348. if (ret)
  349. return ret;
  350. vf->req_features = features;
  351. return 0;
  352. }
  353. static u64 ifcvf_vdpa_get_driver_features(struct vdpa_device *vdpa_dev)
  354. {
  355. struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
  356. return vf->req_features;
  357. }
  358. static u8 ifcvf_vdpa_get_status(struct vdpa_device *vdpa_dev)
  359. {
  360. struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
  361. return ifcvf_get_status(vf);
  362. }
  363. static void ifcvf_vdpa_set_status(struct vdpa_device *vdpa_dev, u8 status)
  364. {
  365. struct ifcvf_adapter *adapter;
  366. struct ifcvf_hw *vf;
  367. u8 status_old;
  368. int ret;
  369. vf = vdpa_to_vf(vdpa_dev);
  370. adapter = vdpa_to_adapter(vdpa_dev);
  371. status_old = ifcvf_get_status(vf);
  372. if (status_old == status)
  373. return;
  374. if ((status & VIRTIO_CONFIG_S_DRIVER_OK) &&
  375. !(status_old & VIRTIO_CONFIG_S_DRIVER_OK)) {
  376. ret = ifcvf_request_irq(vf);
  377. if (ret) {
  378. status = ifcvf_get_status(vf);
  379. status |= VIRTIO_CONFIG_S_FAILED;
  380. ifcvf_set_status(vf, status);
  381. return;
  382. }
  383. if (ifcvf_start_datapath(adapter) < 0)
  384. IFCVF_ERR(adapter->pdev,
  385. "Failed to set ifcvf vdpa status %u\n",
  386. status);
  387. }
  388. ifcvf_set_status(vf, status);
  389. }
  390. static int ifcvf_vdpa_reset(struct vdpa_device *vdpa_dev)
  391. {
  392. struct ifcvf_adapter *adapter;
  393. struct ifcvf_hw *vf;
  394. u8 status_old;
  395. vf = vdpa_to_vf(vdpa_dev);
  396. adapter = vdpa_to_adapter(vdpa_dev);
  397. status_old = ifcvf_get_status(vf);
  398. if (status_old == 0)
  399. return 0;
  400. if (status_old & VIRTIO_CONFIG_S_DRIVER_OK) {
  401. ifcvf_stop_datapath(adapter);
  402. ifcvf_free_irq(vf);
  403. }
  404. ifcvf_reset_vring(adapter);
  405. return 0;
  406. }
  407. static u16 ifcvf_vdpa_get_vq_num_max(struct vdpa_device *vdpa_dev)
  408. {
  409. return IFCVF_QUEUE_MAX;
  410. }
  411. static int ifcvf_vdpa_get_vq_state(struct vdpa_device *vdpa_dev, u16 qid,
  412. struct vdpa_vq_state *state)
  413. {
  414. struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
  415. state->split.avail_index = ifcvf_get_vq_state(vf, qid);
  416. return 0;
  417. }
  418. static int ifcvf_vdpa_set_vq_state(struct vdpa_device *vdpa_dev, u16 qid,
  419. const struct vdpa_vq_state *state)
  420. {
  421. struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
  422. return ifcvf_set_vq_state(vf, qid, state->split.avail_index);
  423. }
  424. static void ifcvf_vdpa_set_vq_cb(struct vdpa_device *vdpa_dev, u16 qid,
  425. struct vdpa_callback *cb)
  426. {
  427. struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
  428. vf->vring[qid].cb = *cb;
  429. }
  430. static void ifcvf_vdpa_set_vq_ready(struct vdpa_device *vdpa_dev,
  431. u16 qid, bool ready)
  432. {
  433. struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
  434. vf->vring[qid].ready = ready;
  435. }
  436. static bool ifcvf_vdpa_get_vq_ready(struct vdpa_device *vdpa_dev, u16 qid)
  437. {
  438. struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
  439. return vf->vring[qid].ready;
  440. }
  441. static void ifcvf_vdpa_set_vq_num(struct vdpa_device *vdpa_dev, u16 qid,
  442. u32 num)
  443. {
  444. struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
  445. vf->vring[qid].size = num;
  446. }
  447. static int ifcvf_vdpa_set_vq_address(struct vdpa_device *vdpa_dev, u16 qid,
  448. u64 desc_area, u64 driver_area,
  449. u64 device_area)
  450. {
  451. struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
  452. vf->vring[qid].desc = desc_area;
  453. vf->vring[qid].avail = driver_area;
  454. vf->vring[qid].used = device_area;
  455. return 0;
  456. }
  457. static void ifcvf_vdpa_kick_vq(struct vdpa_device *vdpa_dev, u16 qid)
  458. {
  459. struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
  460. ifcvf_notify_queue(vf, qid);
  461. }
  462. static u32 ifcvf_vdpa_get_generation(struct vdpa_device *vdpa_dev)
  463. {
  464. struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
  465. return vp_ioread8(&vf->common_cfg->config_generation);
  466. }
  467. static u32 ifcvf_vdpa_get_device_id(struct vdpa_device *vdpa_dev)
  468. {
  469. struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
  470. return vf->dev_type;
  471. }
  472. static u32 ifcvf_vdpa_get_vendor_id(struct vdpa_device *vdpa_dev)
  473. {
  474. struct ifcvf_adapter *adapter = vdpa_to_adapter(vdpa_dev);
  475. struct pci_dev *pdev = adapter->pdev;
  476. return pdev->subsystem_vendor;
  477. }
  478. static u32 ifcvf_vdpa_get_vq_align(struct vdpa_device *vdpa_dev)
  479. {
  480. return IFCVF_QUEUE_ALIGNMENT;
  481. }
  482. static size_t ifcvf_vdpa_get_config_size(struct vdpa_device *vdpa_dev)
  483. {
  484. struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
  485. return vf->config_size;
  486. }
  487. static u32 ifcvf_vdpa_get_vq_group(struct vdpa_device *vdpa, u16 idx)
  488. {
  489. return 0;
  490. }
  491. static void ifcvf_vdpa_get_config(struct vdpa_device *vdpa_dev,
  492. unsigned int offset,
  493. void *buf, unsigned int len)
  494. {
  495. struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
  496. ifcvf_read_dev_config(vf, offset, buf, len);
  497. }
  498. static void ifcvf_vdpa_set_config(struct vdpa_device *vdpa_dev,
  499. unsigned int offset, const void *buf,
  500. unsigned int len)
  501. {
  502. struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
  503. ifcvf_write_dev_config(vf, offset, buf, len);
  504. }
  505. static void ifcvf_vdpa_set_config_cb(struct vdpa_device *vdpa_dev,
  506. struct vdpa_callback *cb)
  507. {
  508. struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
  509. vf->config_cb.callback = cb->callback;
  510. vf->config_cb.private = cb->private;
  511. }
  512. static int ifcvf_vdpa_get_vq_irq(struct vdpa_device *vdpa_dev,
  513. u16 qid)
  514. {
  515. struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
  516. if (vf->vqs_reused_irq < 0)
  517. return vf->vring[qid].irq;
  518. else
  519. return -EINVAL;
  520. }
  521. static struct vdpa_notification_area ifcvf_get_vq_notification(struct vdpa_device *vdpa_dev,
  522. u16 idx)
  523. {
  524. struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
  525. struct vdpa_notification_area area;
  526. area.addr = vf->vring[idx].notify_pa;
  527. if (!vf->notify_off_multiplier)
  528. area.size = PAGE_SIZE;
  529. else
  530. area.size = vf->notify_off_multiplier;
  531. return area;
  532. }
  533. /*
  534. * IFCVF currently doesn't have on-chip IOMMU, so not
  535. * implemented set_map()/dma_map()/dma_unmap()
  536. */
  537. static const struct vdpa_config_ops ifc_vdpa_ops = {
  538. .get_device_features = ifcvf_vdpa_get_device_features,
  539. .set_driver_features = ifcvf_vdpa_set_driver_features,
  540. .get_driver_features = ifcvf_vdpa_get_driver_features,
  541. .get_status = ifcvf_vdpa_get_status,
  542. .set_status = ifcvf_vdpa_set_status,
  543. .reset = ifcvf_vdpa_reset,
  544. .get_vq_num_max = ifcvf_vdpa_get_vq_num_max,
  545. .get_vq_state = ifcvf_vdpa_get_vq_state,
  546. .set_vq_state = ifcvf_vdpa_set_vq_state,
  547. .set_vq_cb = ifcvf_vdpa_set_vq_cb,
  548. .set_vq_ready = ifcvf_vdpa_set_vq_ready,
  549. .get_vq_ready = ifcvf_vdpa_get_vq_ready,
  550. .set_vq_num = ifcvf_vdpa_set_vq_num,
  551. .set_vq_address = ifcvf_vdpa_set_vq_address,
  552. .get_vq_irq = ifcvf_vdpa_get_vq_irq,
  553. .kick_vq = ifcvf_vdpa_kick_vq,
  554. .get_generation = ifcvf_vdpa_get_generation,
  555. .get_device_id = ifcvf_vdpa_get_device_id,
  556. .get_vendor_id = ifcvf_vdpa_get_vendor_id,
  557. .get_vq_align = ifcvf_vdpa_get_vq_align,
  558. .get_vq_group = ifcvf_vdpa_get_vq_group,
  559. .get_config_size = ifcvf_vdpa_get_config_size,
  560. .get_config = ifcvf_vdpa_get_config,
  561. .set_config = ifcvf_vdpa_set_config,
  562. .set_config_cb = ifcvf_vdpa_set_config_cb,
  563. .get_vq_notification = ifcvf_get_vq_notification,
  564. };
  565. static struct virtio_device_id id_table_net[] = {
  566. {VIRTIO_ID_NET, VIRTIO_DEV_ANY_ID},
  567. {0},
  568. };
  569. static struct virtio_device_id id_table_blk[] = {
  570. {VIRTIO_ID_BLOCK, VIRTIO_DEV_ANY_ID},
  571. {0},
  572. };
  573. static u32 get_dev_type(struct pci_dev *pdev)
  574. {
  575. u32 dev_type;
  576. /* This drirver drives both modern virtio devices and transitional
  577. * devices in modern mode.
  578. * vDPA requires feature bit VIRTIO_F_ACCESS_PLATFORM,
  579. * so legacy devices and transitional devices in legacy
  580. * mode will not work for vDPA, this driver will not
  581. * drive devices with legacy interface.
  582. */
  583. if (pdev->device < 0x1040)
  584. dev_type = pdev->subsystem_device;
  585. else
  586. dev_type = pdev->device - 0x1040;
  587. return dev_type;
  588. }
  589. static int ifcvf_vdpa_dev_add(struct vdpa_mgmt_dev *mdev, const char *name,
  590. const struct vdpa_dev_set_config *config)
  591. {
  592. struct ifcvf_vdpa_mgmt_dev *ifcvf_mgmt_dev;
  593. struct ifcvf_adapter *adapter;
  594. struct vdpa_device *vdpa_dev;
  595. struct pci_dev *pdev;
  596. struct ifcvf_hw *vf;
  597. int ret;
  598. ifcvf_mgmt_dev = container_of(mdev, struct ifcvf_vdpa_mgmt_dev, mdev);
  599. vf = &ifcvf_mgmt_dev->vf;
  600. pdev = vf->pdev;
  601. adapter = vdpa_alloc_device(struct ifcvf_adapter, vdpa,
  602. &pdev->dev, &ifc_vdpa_ops, 1, 1, NULL, false);
  603. if (IS_ERR(adapter)) {
  604. IFCVF_ERR(pdev, "Failed to allocate vDPA structure");
  605. return PTR_ERR(adapter);
  606. }
  607. ifcvf_mgmt_dev->adapter = adapter;
  608. adapter->pdev = pdev;
  609. adapter->vdpa.dma_dev = &pdev->dev;
  610. adapter->vdpa.mdev = mdev;
  611. adapter->vf = vf;
  612. vdpa_dev = &adapter->vdpa;
  613. if (name)
  614. ret = dev_set_name(&vdpa_dev->dev, "%s", name);
  615. else
  616. ret = dev_set_name(&vdpa_dev->dev, "vdpa%u", vdpa_dev->index);
  617. ret = _vdpa_register_device(&adapter->vdpa, vf->nr_vring);
  618. if (ret) {
  619. put_device(&adapter->vdpa.dev);
  620. IFCVF_ERR(pdev, "Failed to register to vDPA bus");
  621. return ret;
  622. }
  623. return 0;
  624. }
  625. static void ifcvf_vdpa_dev_del(struct vdpa_mgmt_dev *mdev, struct vdpa_device *dev)
  626. {
  627. struct ifcvf_vdpa_mgmt_dev *ifcvf_mgmt_dev;
  628. ifcvf_mgmt_dev = container_of(mdev, struct ifcvf_vdpa_mgmt_dev, mdev);
  629. _vdpa_unregister_device(dev);
  630. ifcvf_mgmt_dev->adapter = NULL;
  631. }
  632. static const struct vdpa_mgmtdev_ops ifcvf_vdpa_mgmt_dev_ops = {
  633. .dev_add = ifcvf_vdpa_dev_add,
  634. .dev_del = ifcvf_vdpa_dev_del
  635. };
  636. static int ifcvf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
  637. {
  638. struct ifcvf_vdpa_mgmt_dev *ifcvf_mgmt_dev;
  639. struct device *dev = &pdev->dev;
  640. struct ifcvf_hw *vf;
  641. u32 dev_type;
  642. int ret, i;
  643. ret = pcim_enable_device(pdev);
  644. if (ret) {
  645. IFCVF_ERR(pdev, "Failed to enable device\n");
  646. return ret;
  647. }
  648. ret = pcim_iomap_regions(pdev, BIT(0) | BIT(2) | BIT(4),
  649. IFCVF_DRIVER_NAME);
  650. if (ret) {
  651. IFCVF_ERR(pdev, "Failed to request MMIO region\n");
  652. return ret;
  653. }
  654. ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
  655. if (ret) {
  656. IFCVF_ERR(pdev, "No usable DMA configuration\n");
  657. return ret;
  658. }
  659. ret = devm_add_action_or_reset(dev, ifcvf_free_irq_vectors, pdev);
  660. if (ret) {
  661. IFCVF_ERR(pdev,
  662. "Failed for adding devres for freeing irq vectors\n");
  663. return ret;
  664. }
  665. pci_set_master(pdev);
  666. ifcvf_mgmt_dev = kzalloc(sizeof(struct ifcvf_vdpa_mgmt_dev), GFP_KERNEL);
  667. if (!ifcvf_mgmt_dev) {
  668. IFCVF_ERR(pdev, "Failed to alloc memory for the vDPA management device\n");
  669. return -ENOMEM;
  670. }
  671. vf = &ifcvf_mgmt_dev->vf;
  672. vf->dev_type = get_dev_type(pdev);
  673. vf->base = pcim_iomap_table(pdev);
  674. vf->pdev = pdev;
  675. ret = ifcvf_init_hw(vf, pdev);
  676. if (ret) {
  677. IFCVF_ERR(pdev, "Failed to init IFCVF hw\n");
  678. goto err;
  679. }
  680. for (i = 0; i < vf->nr_vring; i++)
  681. vf->vring[i].irq = -EINVAL;
  682. vf->hw_features = ifcvf_get_hw_features(vf);
  683. vf->config_size = ifcvf_get_config_size(vf);
  684. dev_type = get_dev_type(pdev);
  685. switch (dev_type) {
  686. case VIRTIO_ID_NET:
  687. ifcvf_mgmt_dev->mdev.id_table = id_table_net;
  688. break;
  689. case VIRTIO_ID_BLOCK:
  690. ifcvf_mgmt_dev->mdev.id_table = id_table_blk;
  691. break;
  692. default:
  693. IFCVF_ERR(pdev, "VIRTIO ID %u not supported\n", dev_type);
  694. ret = -EOPNOTSUPP;
  695. goto err;
  696. }
  697. ifcvf_mgmt_dev->mdev.ops = &ifcvf_vdpa_mgmt_dev_ops;
  698. ifcvf_mgmt_dev->mdev.device = dev;
  699. ifcvf_mgmt_dev->mdev.max_supported_vqs = vf->nr_vring;
  700. ifcvf_mgmt_dev->mdev.supported_features = vf->hw_features;
  701. ret = vdpa_mgmtdev_register(&ifcvf_mgmt_dev->mdev);
  702. if (ret) {
  703. IFCVF_ERR(pdev,
  704. "Failed to initialize the management interfaces\n");
  705. goto err;
  706. }
  707. pci_set_drvdata(pdev, ifcvf_mgmt_dev);
  708. return 0;
  709. err:
  710. kfree(ifcvf_mgmt_dev);
  711. return ret;
  712. }
  713. static void ifcvf_remove(struct pci_dev *pdev)
  714. {
  715. struct ifcvf_vdpa_mgmt_dev *ifcvf_mgmt_dev;
  716. ifcvf_mgmt_dev = pci_get_drvdata(pdev);
  717. vdpa_mgmtdev_unregister(&ifcvf_mgmt_dev->mdev);
  718. kfree(ifcvf_mgmt_dev);
  719. }
  720. static struct pci_device_id ifcvf_pci_ids[] = {
  721. /* N3000 network device */
  722. { PCI_DEVICE_SUB(PCI_VENDOR_ID_REDHAT_QUMRANET,
  723. N3000_DEVICE_ID,
  724. PCI_VENDOR_ID_INTEL,
  725. N3000_SUBSYS_DEVICE_ID) },
  726. /* C5000X-PL network device */
  727. { PCI_DEVICE_SUB(PCI_VENDOR_ID_REDHAT_QUMRANET,
  728. VIRTIO_TRANS_ID_NET,
  729. PCI_VENDOR_ID_INTEL,
  730. VIRTIO_ID_NET) },
  731. /* C5000X-PL block device */
  732. { PCI_DEVICE_SUB(PCI_VENDOR_ID_REDHAT_QUMRANET,
  733. VIRTIO_TRANS_ID_BLOCK,
  734. PCI_VENDOR_ID_INTEL,
  735. VIRTIO_ID_BLOCK) },
  736. { 0 },
  737. };
  738. MODULE_DEVICE_TABLE(pci, ifcvf_pci_ids);
  739. static struct pci_driver ifcvf_driver = {
  740. .name = IFCVF_DRIVER_NAME,
  741. .id_table = ifcvf_pci_ids,
  742. .probe = ifcvf_probe,
  743. .remove = ifcvf_remove,
  744. };
  745. module_pci_driver(ifcvf_driver);
  746. MODULE_LICENSE("GPL v2");