vnic_dev.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. // Copyright 2014 Cisco Systems, Inc. All rights reserved.
  3. #include <linux/kernel.h>
  4. #include <linux/errno.h>
  5. #include <linux/types.h>
  6. #include <linux/pci.h>
  7. #include <linux/delay.h>
  8. #include <linux/if_ether.h>
  9. #include <linux/slab.h>
  10. #include "vnic_resource.h"
  11. #include "vnic_devcmd.h"
  12. #include "vnic_dev.h"
  13. #include "vnic_stats.h"
  14. #include "vnic_wq.h"
  15. #define VNIC_DVCMD_TMO 10000 /* Devcmd Timeout value */
  16. #define VNIC_NOTIFY_INTR_MASK 0x0000ffff00000000ULL
  17. struct devcmd2_controller {
  18. struct vnic_wq_ctrl __iomem *wq_ctrl;
  19. struct vnic_dev_ring results_ring;
  20. struct vnic_wq wq;
  21. struct vnic_devcmd2 *cmd_ring;
  22. struct devcmd2_result *result;
  23. u16 next_result;
  24. u16 result_size;
  25. int color;
  26. };
  27. struct vnic_res {
  28. void __iomem *vaddr;
  29. unsigned int count;
  30. };
  31. struct vnic_dev {
  32. void *priv;
  33. struct pci_dev *pdev;
  34. struct vnic_res res[RES_TYPE_MAX];
  35. enum vnic_dev_intr_mode intr_mode;
  36. struct vnic_devcmd __iomem *devcmd;
  37. struct vnic_devcmd_notify *notify;
  38. struct vnic_devcmd_notify notify_copy;
  39. dma_addr_t notify_pa;
  40. u32 *linkstatus;
  41. dma_addr_t linkstatus_pa;
  42. struct vnic_stats *stats;
  43. dma_addr_t stats_pa;
  44. struct vnic_devcmd_fw_info *fw_info;
  45. dma_addr_t fw_info_pa;
  46. u64 args[VNIC_DEVCMD_NARGS];
  47. struct devcmd2_controller *devcmd2;
  48. int (*devcmd_rtn)(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
  49. int wait);
  50. };
  51. #define VNIC_MAX_RES_HDR_SIZE \
  52. (sizeof(struct vnic_resource_header) + \
  53. sizeof(struct vnic_resource) * RES_TYPE_MAX)
  54. #define VNIC_RES_STRIDE 128
  55. void *svnic_dev_priv(struct vnic_dev *vdev)
  56. {
  57. return vdev->priv;
  58. }
  59. static int vnic_dev_discover_res(struct vnic_dev *vdev,
  60. struct vnic_dev_bar *bar, unsigned int num_bars)
  61. {
  62. struct vnic_resource_header __iomem *rh;
  63. struct vnic_resource __iomem *r;
  64. u8 type;
  65. if (num_bars == 0)
  66. return -EINVAL;
  67. if (bar->len < VNIC_MAX_RES_HDR_SIZE) {
  68. pr_err("vNIC BAR0 res hdr length error\n");
  69. return -EINVAL;
  70. }
  71. rh = bar->vaddr;
  72. if (!rh) {
  73. pr_err("vNIC BAR0 res hdr not mem-mapped\n");
  74. return -EINVAL;
  75. }
  76. if (ioread32(&rh->magic) != VNIC_RES_MAGIC ||
  77. ioread32(&rh->version) != VNIC_RES_VERSION) {
  78. pr_err("vNIC BAR0 res magic/version error exp (%lx/%lx) curr (%x/%x)\n",
  79. VNIC_RES_MAGIC, VNIC_RES_VERSION,
  80. ioread32(&rh->magic), ioread32(&rh->version));
  81. return -EINVAL;
  82. }
  83. r = (struct vnic_resource __iomem *)(rh + 1);
  84. while ((type = ioread8(&r->type)) != RES_TYPE_EOL) {
  85. u8 bar_num = ioread8(&r->bar);
  86. u32 bar_offset = ioread32(&r->bar_offset);
  87. u32 count = ioread32(&r->count);
  88. u32 len;
  89. r++;
  90. if (bar_num >= num_bars)
  91. continue;
  92. if (!bar[bar_num].len || !bar[bar_num].vaddr)
  93. continue;
  94. switch (type) {
  95. case RES_TYPE_WQ:
  96. case RES_TYPE_RQ:
  97. case RES_TYPE_CQ:
  98. case RES_TYPE_INTR_CTRL:
  99. /* each count is stride bytes long */
  100. len = count * VNIC_RES_STRIDE;
  101. if (len + bar_offset > bar->len) {
  102. pr_err("vNIC BAR0 resource %d out-of-bounds, offset 0x%x + size 0x%x > bar len 0x%lx\n",
  103. type, bar_offset,
  104. len,
  105. bar->len);
  106. return -EINVAL;
  107. }
  108. break;
  109. case RES_TYPE_INTR_PBA_LEGACY:
  110. case RES_TYPE_DEVCMD:
  111. case RES_TYPE_DEVCMD2:
  112. len = count;
  113. break;
  114. default:
  115. continue;
  116. }
  117. vdev->res[type].count = count;
  118. vdev->res[type].vaddr = (char __iomem *)bar->vaddr + bar_offset;
  119. }
  120. return 0;
  121. }
  122. unsigned int svnic_dev_get_res_count(struct vnic_dev *vdev,
  123. enum vnic_res_type type)
  124. {
  125. return vdev->res[type].count;
  126. }
  127. void __iomem *svnic_dev_get_res(struct vnic_dev *vdev, enum vnic_res_type type,
  128. unsigned int index)
  129. {
  130. if (!vdev->res[type].vaddr)
  131. return NULL;
  132. switch (type) {
  133. case RES_TYPE_WQ:
  134. case RES_TYPE_RQ:
  135. case RES_TYPE_CQ:
  136. case RES_TYPE_INTR_CTRL:
  137. return (char __iomem *)vdev->res[type].vaddr +
  138. index * VNIC_RES_STRIDE;
  139. default:
  140. return (char __iomem *)vdev->res[type].vaddr;
  141. }
  142. }
  143. unsigned int svnic_dev_desc_ring_size(struct vnic_dev_ring *ring,
  144. unsigned int desc_count,
  145. unsigned int desc_size)
  146. {
  147. /* The base address of the desc rings must be 512 byte aligned.
  148. * Descriptor count is aligned to groups of 32 descriptors. A
  149. * count of 0 means the maximum 4096 descriptors. Descriptor
  150. * size is aligned to 16 bytes.
  151. */
  152. unsigned int count_align = 32;
  153. unsigned int desc_align = 16;
  154. ring->base_align = 512;
  155. if (desc_count == 0)
  156. desc_count = 4096;
  157. ring->desc_count = ALIGN(desc_count, count_align);
  158. ring->desc_size = ALIGN(desc_size, desc_align);
  159. ring->size = ring->desc_count * ring->desc_size;
  160. ring->size_unaligned = ring->size + ring->base_align;
  161. return ring->size_unaligned;
  162. }
  163. void svnic_dev_clear_desc_ring(struct vnic_dev_ring *ring)
  164. {
  165. memset(ring->descs, 0, ring->size);
  166. }
  167. int svnic_dev_alloc_desc_ring(struct vnic_dev *vdev, struct vnic_dev_ring *ring,
  168. unsigned int desc_count, unsigned int desc_size)
  169. {
  170. svnic_dev_desc_ring_size(ring, desc_count, desc_size);
  171. ring->descs_unaligned = dma_alloc_coherent(&vdev->pdev->dev,
  172. ring->size_unaligned, &ring->base_addr_unaligned,
  173. GFP_KERNEL);
  174. if (!ring->descs_unaligned) {
  175. pr_err("Failed to allocate ring (size=%d), aborting\n",
  176. (int)ring->size);
  177. return -ENOMEM;
  178. }
  179. ring->base_addr = ALIGN(ring->base_addr_unaligned,
  180. ring->base_align);
  181. ring->descs = (u8 *)ring->descs_unaligned +
  182. (ring->base_addr - ring->base_addr_unaligned);
  183. svnic_dev_clear_desc_ring(ring);
  184. ring->desc_avail = ring->desc_count - 1;
  185. return 0;
  186. }
  187. void svnic_dev_free_desc_ring(struct vnic_dev *vdev, struct vnic_dev_ring *ring)
  188. {
  189. if (ring->descs) {
  190. dma_free_coherent(&vdev->pdev->dev,
  191. ring->size_unaligned,
  192. ring->descs_unaligned,
  193. ring->base_addr_unaligned);
  194. ring->descs = NULL;
  195. }
  196. }
  197. static int _svnic_dev_cmd2(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
  198. int wait)
  199. {
  200. struct devcmd2_controller *dc2c = vdev->devcmd2;
  201. struct devcmd2_result *result = NULL;
  202. unsigned int i;
  203. int delay;
  204. int err;
  205. u32 posted;
  206. u32 fetch_idx;
  207. u32 new_posted;
  208. u8 color;
  209. fetch_idx = ioread32(&dc2c->wq_ctrl->fetch_index);
  210. if (fetch_idx == 0xFFFFFFFF) { /* check for hardware gone */
  211. /* Hardware surprise removal: return error */
  212. return -ENODEV;
  213. }
  214. posted = ioread32(&dc2c->wq_ctrl->posted_index);
  215. if (posted == 0xFFFFFFFF) { /* check for hardware gone */
  216. /* Hardware surprise removal: return error */
  217. return -ENODEV;
  218. }
  219. new_posted = (posted + 1) % DEVCMD2_RING_SIZE;
  220. if (new_posted == fetch_idx) {
  221. pr_err("%s: wq is full while issuing devcmd2 command %d, fetch index: %u, posted index: %u\n",
  222. pci_name(vdev->pdev), _CMD_N(cmd), fetch_idx, posted);
  223. return -EBUSY;
  224. }
  225. dc2c->cmd_ring[posted].cmd = cmd;
  226. dc2c->cmd_ring[posted].flags = 0;
  227. if ((_CMD_FLAGS(cmd) & _CMD_FLAGS_NOWAIT))
  228. dc2c->cmd_ring[posted].flags |= DEVCMD2_FNORESULT;
  229. if (_CMD_DIR(cmd) & _CMD_DIR_WRITE) {
  230. for (i = 0; i < VNIC_DEVCMD_NARGS; i++)
  231. dc2c->cmd_ring[posted].args[i] = vdev->args[i];
  232. }
  233. /* Adding write memory barrier prevents compiler and/or CPU
  234. * reordering, thus avoiding descriptor posting before
  235. * descriptor is initialized. Otherwise, hardware can read
  236. * stale descriptor fields.
  237. */
  238. wmb();
  239. iowrite32(new_posted, &dc2c->wq_ctrl->posted_index);
  240. if (dc2c->cmd_ring[posted].flags & DEVCMD2_FNORESULT)
  241. return 0;
  242. result = dc2c->result + dc2c->next_result;
  243. color = dc2c->color;
  244. /*
  245. * Increment next_result, after posting the devcmd, irrespective of
  246. * devcmd result, and it should be done only once.
  247. */
  248. dc2c->next_result++;
  249. if (dc2c->next_result == dc2c->result_size) {
  250. dc2c->next_result = 0;
  251. dc2c->color = dc2c->color ? 0 : 1;
  252. }
  253. for (delay = 0; delay < wait; delay++) {
  254. udelay(100);
  255. if (result->color == color) {
  256. if (result->error) {
  257. err = (int) result->error;
  258. if (err != ERR_ECMDUNKNOWN ||
  259. cmd != CMD_CAPABILITY)
  260. pr_err("Error %d devcmd %d\n",
  261. err, _CMD_N(cmd));
  262. return err;
  263. }
  264. if (_CMD_DIR(cmd) & _CMD_DIR_READ) {
  265. for (i = 0; i < VNIC_DEVCMD_NARGS; i++)
  266. vdev->args[i] = result->results[i];
  267. }
  268. return 0;
  269. }
  270. }
  271. pr_err("Timed out devcmd %d\n", _CMD_N(cmd));
  272. return -ETIMEDOUT;
  273. }
  274. static int svnic_dev_init_devcmd2(struct vnic_dev *vdev)
  275. {
  276. struct devcmd2_controller *dc2c = NULL;
  277. unsigned int fetch_idx;
  278. int ret;
  279. void __iomem *p;
  280. if (vdev->devcmd2)
  281. return 0;
  282. p = svnic_dev_get_res(vdev, RES_TYPE_DEVCMD2, 0);
  283. if (!p)
  284. return -ENODEV;
  285. dc2c = kzalloc(sizeof(*dc2c), GFP_ATOMIC);
  286. if (!dc2c)
  287. return -ENOMEM;
  288. vdev->devcmd2 = dc2c;
  289. dc2c->color = 1;
  290. dc2c->result_size = DEVCMD2_RING_SIZE;
  291. ret = vnic_wq_devcmd2_alloc(vdev,
  292. &dc2c->wq,
  293. DEVCMD2_RING_SIZE,
  294. DEVCMD2_DESC_SIZE);
  295. if (ret)
  296. goto err_free_devcmd2;
  297. fetch_idx = ioread32(&dc2c->wq.ctrl->fetch_index);
  298. if (fetch_idx == 0xFFFFFFFF) { /* check for hardware gone */
  299. /* Hardware surprise removal: reset fetch_index */
  300. fetch_idx = 0;
  301. }
  302. /*
  303. * Don't change fetch_index ever and
  304. * set posted_index same as fetch_index
  305. * when setting up the WQ for devcmd2.
  306. */
  307. vnic_wq_init_start(&dc2c->wq, 0, fetch_idx, fetch_idx, 0, 0);
  308. svnic_wq_enable(&dc2c->wq);
  309. ret = svnic_dev_alloc_desc_ring(vdev,
  310. &dc2c->results_ring,
  311. DEVCMD2_RING_SIZE,
  312. DEVCMD2_DESC_SIZE);
  313. if (ret)
  314. goto err_free_wq;
  315. dc2c->result = (struct devcmd2_result *) dc2c->results_ring.descs;
  316. dc2c->cmd_ring = (struct vnic_devcmd2 *) dc2c->wq.ring.descs;
  317. dc2c->wq_ctrl = dc2c->wq.ctrl;
  318. vdev->args[0] = (u64) dc2c->results_ring.base_addr | VNIC_PADDR_TARGET;
  319. vdev->args[1] = DEVCMD2_RING_SIZE;
  320. ret = _svnic_dev_cmd2(vdev, CMD_INITIALIZE_DEVCMD2, VNIC_DVCMD_TMO);
  321. if (ret < 0)
  322. goto err_free_desc_ring;
  323. vdev->devcmd_rtn = &_svnic_dev_cmd2;
  324. pr_info("DEVCMD2 Initialized.\n");
  325. return ret;
  326. err_free_desc_ring:
  327. svnic_dev_free_desc_ring(vdev, &dc2c->results_ring);
  328. err_free_wq:
  329. svnic_wq_disable(&dc2c->wq);
  330. svnic_wq_free(&dc2c->wq);
  331. err_free_devcmd2:
  332. kfree(dc2c);
  333. vdev->devcmd2 = NULL;
  334. return ret;
  335. } /* end of svnic_dev_init_devcmd2 */
  336. static void vnic_dev_deinit_devcmd2(struct vnic_dev *vdev)
  337. {
  338. struct devcmd2_controller *dc2c = vdev->devcmd2;
  339. vdev->devcmd2 = NULL;
  340. vdev->devcmd_rtn = NULL;
  341. svnic_dev_free_desc_ring(vdev, &dc2c->results_ring);
  342. svnic_wq_disable(&dc2c->wq);
  343. svnic_wq_free(&dc2c->wq);
  344. kfree(dc2c);
  345. }
  346. int svnic_dev_cmd(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
  347. u64 *a0, u64 *a1, int wait)
  348. {
  349. int err;
  350. memset(vdev->args, 0, sizeof(vdev->args));
  351. vdev->args[0] = *a0;
  352. vdev->args[1] = *a1;
  353. err = (*vdev->devcmd_rtn)(vdev, cmd, wait);
  354. *a0 = vdev->args[0];
  355. *a1 = vdev->args[1];
  356. return err;
  357. }
  358. int svnic_dev_fw_info(struct vnic_dev *vdev,
  359. struct vnic_devcmd_fw_info **fw_info)
  360. {
  361. u64 a0, a1 = 0;
  362. int wait = VNIC_DVCMD_TMO;
  363. int err = 0;
  364. if (!vdev->fw_info) {
  365. vdev->fw_info = dma_alloc_coherent(&vdev->pdev->dev,
  366. sizeof(struct vnic_devcmd_fw_info),
  367. &vdev->fw_info_pa, GFP_KERNEL);
  368. if (!vdev->fw_info)
  369. return -ENOMEM;
  370. a0 = vdev->fw_info_pa;
  371. /* only get fw_info once and cache it */
  372. err = svnic_dev_cmd(vdev, CMD_MCPU_FW_INFO, &a0, &a1, wait);
  373. }
  374. *fw_info = vdev->fw_info;
  375. return err;
  376. }
  377. int svnic_dev_spec(struct vnic_dev *vdev, unsigned int offset,
  378. unsigned int size, void *value)
  379. {
  380. u64 a0, a1;
  381. int wait = VNIC_DVCMD_TMO;
  382. int err;
  383. a0 = offset;
  384. a1 = size;
  385. err = svnic_dev_cmd(vdev, CMD_DEV_SPEC, &a0, &a1, wait);
  386. switch (size) {
  387. case 1:
  388. *(u8 *)value = (u8)a0;
  389. break;
  390. case 2:
  391. *(u16 *)value = (u16)a0;
  392. break;
  393. case 4:
  394. *(u32 *)value = (u32)a0;
  395. break;
  396. case 8:
  397. *(u64 *)value = a0;
  398. break;
  399. default:
  400. BUG();
  401. break;
  402. }
  403. return err;
  404. }
  405. int svnic_dev_stats_clear(struct vnic_dev *vdev)
  406. {
  407. u64 a0 = 0, a1 = 0;
  408. int wait = VNIC_DVCMD_TMO;
  409. return svnic_dev_cmd(vdev, CMD_STATS_CLEAR, &a0, &a1, wait);
  410. }
  411. int svnic_dev_stats_dump(struct vnic_dev *vdev, struct vnic_stats **stats)
  412. {
  413. u64 a0, a1;
  414. int wait = VNIC_DVCMD_TMO;
  415. if (!vdev->stats) {
  416. vdev->stats = dma_alloc_coherent(&vdev->pdev->dev,
  417. sizeof(struct vnic_stats), &vdev->stats_pa, GFP_KERNEL);
  418. if (!vdev->stats)
  419. return -ENOMEM;
  420. }
  421. *stats = vdev->stats;
  422. a0 = vdev->stats_pa;
  423. a1 = sizeof(struct vnic_stats);
  424. return svnic_dev_cmd(vdev, CMD_STATS_DUMP, &a0, &a1, wait);
  425. }
  426. int svnic_dev_close(struct vnic_dev *vdev)
  427. {
  428. u64 a0 = 0, a1 = 0;
  429. int wait = VNIC_DVCMD_TMO;
  430. return svnic_dev_cmd(vdev, CMD_CLOSE, &a0, &a1, wait);
  431. }
  432. int svnic_dev_enable_wait(struct vnic_dev *vdev)
  433. {
  434. u64 a0 = 0, a1 = 0;
  435. int wait = VNIC_DVCMD_TMO;
  436. int err = 0;
  437. err = svnic_dev_cmd(vdev, CMD_ENABLE_WAIT, &a0, &a1, wait);
  438. if (err == ERR_ECMDUNKNOWN)
  439. return svnic_dev_cmd(vdev, CMD_ENABLE, &a0, &a1, wait);
  440. return err;
  441. }
  442. int svnic_dev_disable(struct vnic_dev *vdev)
  443. {
  444. u64 a0 = 0, a1 = 0;
  445. int wait = VNIC_DVCMD_TMO;
  446. return svnic_dev_cmd(vdev, CMD_DISABLE, &a0, &a1, wait);
  447. }
  448. int svnic_dev_open(struct vnic_dev *vdev, int arg)
  449. {
  450. u64 a0 = (u32)arg, a1 = 0;
  451. int wait = VNIC_DVCMD_TMO;
  452. return svnic_dev_cmd(vdev, CMD_OPEN, &a0, &a1, wait);
  453. }
  454. int svnic_dev_open_done(struct vnic_dev *vdev, int *done)
  455. {
  456. u64 a0 = 0, a1 = 0;
  457. int wait = VNIC_DVCMD_TMO;
  458. int err;
  459. *done = 0;
  460. err = svnic_dev_cmd(vdev, CMD_OPEN_STATUS, &a0, &a1, wait);
  461. if (err)
  462. return err;
  463. *done = (a0 == 0);
  464. return 0;
  465. }
  466. int svnic_dev_notify_set(struct vnic_dev *vdev, u16 intr)
  467. {
  468. u64 a0, a1;
  469. int wait = VNIC_DVCMD_TMO;
  470. if (!vdev->notify) {
  471. vdev->notify = dma_alloc_coherent(&vdev->pdev->dev,
  472. sizeof(struct vnic_devcmd_notify),
  473. &vdev->notify_pa, GFP_KERNEL);
  474. if (!vdev->notify)
  475. return -ENOMEM;
  476. }
  477. a0 = vdev->notify_pa;
  478. a1 = ((u64)intr << 32) & VNIC_NOTIFY_INTR_MASK;
  479. a1 += sizeof(struct vnic_devcmd_notify);
  480. return svnic_dev_cmd(vdev, CMD_NOTIFY, &a0, &a1, wait);
  481. }
  482. void svnic_dev_notify_unset(struct vnic_dev *vdev)
  483. {
  484. u64 a0, a1;
  485. int wait = VNIC_DVCMD_TMO;
  486. a0 = 0; /* paddr = 0 to unset notify buffer */
  487. a1 = VNIC_NOTIFY_INTR_MASK; /* intr num = -1 to unreg for intr */
  488. a1 += sizeof(struct vnic_devcmd_notify);
  489. svnic_dev_cmd(vdev, CMD_NOTIFY, &a0, &a1, wait);
  490. }
  491. static int vnic_dev_notify_ready(struct vnic_dev *vdev)
  492. {
  493. u32 *words;
  494. unsigned int nwords = sizeof(struct vnic_devcmd_notify) / 4;
  495. unsigned int i;
  496. u32 csum;
  497. if (!vdev->notify)
  498. return 0;
  499. do {
  500. csum = 0;
  501. memcpy(&vdev->notify_copy, vdev->notify,
  502. sizeof(struct vnic_devcmd_notify));
  503. words = (u32 *)&vdev->notify_copy;
  504. for (i = 1; i < nwords; i++)
  505. csum += words[i];
  506. } while (csum != words[0]);
  507. return 1;
  508. }
  509. int svnic_dev_init(struct vnic_dev *vdev, int arg)
  510. {
  511. u64 a0 = (u32)arg, a1 = 0;
  512. int wait = VNIC_DVCMD_TMO;
  513. return svnic_dev_cmd(vdev, CMD_INIT, &a0, &a1, wait);
  514. }
  515. int svnic_dev_link_status(struct vnic_dev *vdev)
  516. {
  517. if (vdev->linkstatus)
  518. return *vdev->linkstatus;
  519. if (!vnic_dev_notify_ready(vdev))
  520. return 0;
  521. return vdev->notify_copy.link_state;
  522. }
  523. u32 svnic_dev_link_down_cnt(struct vnic_dev *vdev)
  524. {
  525. if (!vnic_dev_notify_ready(vdev))
  526. return 0;
  527. return vdev->notify_copy.link_down_cnt;
  528. }
  529. void svnic_dev_set_intr_mode(struct vnic_dev *vdev,
  530. enum vnic_dev_intr_mode intr_mode)
  531. {
  532. vdev->intr_mode = intr_mode;
  533. }
  534. enum vnic_dev_intr_mode svnic_dev_get_intr_mode(struct vnic_dev *vdev)
  535. {
  536. return vdev->intr_mode;
  537. }
  538. void svnic_dev_unregister(struct vnic_dev *vdev)
  539. {
  540. if (vdev) {
  541. if (vdev->notify)
  542. dma_free_coherent(&vdev->pdev->dev,
  543. sizeof(struct vnic_devcmd_notify),
  544. vdev->notify,
  545. vdev->notify_pa);
  546. if (vdev->linkstatus)
  547. dma_free_coherent(&vdev->pdev->dev,
  548. sizeof(u32),
  549. vdev->linkstatus,
  550. vdev->linkstatus_pa);
  551. if (vdev->stats)
  552. dma_free_coherent(&vdev->pdev->dev,
  553. sizeof(struct vnic_stats),
  554. vdev->stats, vdev->stats_pa);
  555. if (vdev->fw_info)
  556. dma_free_coherent(&vdev->pdev->dev,
  557. sizeof(struct vnic_devcmd_fw_info),
  558. vdev->fw_info, vdev->fw_info_pa);
  559. if (vdev->devcmd2)
  560. vnic_dev_deinit_devcmd2(vdev);
  561. kfree(vdev);
  562. }
  563. }
  564. struct vnic_dev *svnic_dev_alloc_discover(struct vnic_dev *vdev,
  565. void *priv,
  566. struct pci_dev *pdev,
  567. struct vnic_dev_bar *bar,
  568. unsigned int num_bars)
  569. {
  570. if (!vdev) {
  571. vdev = kzalloc(sizeof(struct vnic_dev), GFP_ATOMIC);
  572. if (!vdev)
  573. return NULL;
  574. }
  575. vdev->priv = priv;
  576. vdev->pdev = pdev;
  577. if (vnic_dev_discover_res(vdev, bar, num_bars))
  578. goto err_out;
  579. return vdev;
  580. err_out:
  581. svnic_dev_unregister(vdev);
  582. return NULL;
  583. } /* end of svnic_dev_alloc_discover */
  584. /*
  585. * fallback option is left to keep the interface common for other vnics.
  586. */
  587. int svnic_dev_cmd_init(struct vnic_dev *vdev, int fallback)
  588. {
  589. int err = -ENODEV;
  590. void __iomem *p;
  591. p = svnic_dev_get_res(vdev, RES_TYPE_DEVCMD2, 0);
  592. if (p)
  593. err = svnic_dev_init_devcmd2(vdev);
  594. else
  595. pr_err("DEVCMD2 resource not found.\n");
  596. return err;
  597. } /* end of svnic_dev_cmd_init */