vnic_dev.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright 2008 Cisco Systems, Inc. All rights reserved.
  4. * Copyright 2007 Nuova Systems, Inc. All rights reserved.
  5. */
  6. #include <linux/kernel.h>
  7. #include <linux/errno.h>
  8. #include <linux/types.h>
  9. #include <linux/pci.h>
  10. #include <linux/delay.h>
  11. #include <linux/if_ether.h>
  12. #include <linux/slab.h>
  13. #include "vnic_resource.h"
  14. #include "vnic_devcmd.h"
  15. #include "vnic_dev.h"
  16. #include "vnic_stats.h"
  17. #include "vnic_wq.h"
  18. struct devcmd2_controller {
  19. struct vnic_wq_ctrl *wq_ctrl;
  20. struct vnic_dev_ring results_ring;
  21. struct vnic_wq wq;
  22. struct vnic_devcmd2 *cmd_ring;
  23. struct devcmd2_result *result;
  24. u16 next_result;
  25. u16 result_size;
  26. int color;
  27. };
  28. enum vnic_proxy_type {
  29. PROXY_NONE,
  30. PROXY_BY_BDF,
  31. PROXY_BY_INDEX,
  32. };
  33. struct vnic_res {
  34. void __iomem *vaddr;
  35. unsigned int count;
  36. };
  37. struct vnic_dev {
  38. void *priv;
  39. struct pci_dev *pdev;
  40. struct vnic_res res[RES_TYPE_MAX];
  41. enum vnic_dev_intr_mode intr_mode;
  42. struct vnic_devcmd __iomem *devcmd;
  43. struct vnic_devcmd_notify *notify;
  44. struct vnic_devcmd_notify notify_copy;
  45. dma_addr_t notify_pa;
  46. u32 *linkstatus;
  47. dma_addr_t linkstatus_pa;
  48. struct vnic_stats *stats;
  49. dma_addr_t stats_pa;
  50. struct vnic_devcmd_fw_info *fw_info;
  51. dma_addr_t fw_info_pa;
  52. enum vnic_proxy_type proxy;
  53. u32 proxy_index;
  54. u64 args[VNIC_DEVCMD_NARGS];
  55. struct devcmd2_controller *devcmd2;
  56. int (*devcmd_rtn)(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
  57. int wait);
  58. };
  59. #define VNIC_MAX_RES_HDR_SIZE \
  60. (sizeof(struct vnic_resource_header) + \
  61. sizeof(struct vnic_resource) * RES_TYPE_MAX)
  62. #define VNIC_RES_STRIDE 128
  63. void *vnic_dev_priv(struct vnic_dev *vdev)
  64. {
  65. return vdev->priv;
  66. }
  67. static int vnic_dev_discover_res(struct vnic_dev *vdev,
  68. struct vnic_dev_bar *bar)
  69. {
  70. struct vnic_resource_header __iomem *rh;
  71. struct vnic_resource __iomem *r;
  72. u8 type;
  73. if (bar->len < VNIC_MAX_RES_HDR_SIZE) {
  74. printk(KERN_ERR "vNIC BAR0 res hdr length error\n");
  75. return -EINVAL;
  76. }
  77. rh = bar->vaddr;
  78. if (!rh) {
  79. printk(KERN_ERR "vNIC BAR0 res hdr not mem-mapped\n");
  80. return -EINVAL;
  81. }
  82. if (ioread32(&rh->magic) != VNIC_RES_MAGIC ||
  83. ioread32(&rh->version) != VNIC_RES_VERSION) {
  84. printk(KERN_ERR "vNIC BAR0 res magic/version error "
  85. "exp (%lx/%lx) curr (%x/%x)\n",
  86. VNIC_RES_MAGIC, VNIC_RES_VERSION,
  87. ioread32(&rh->magic), ioread32(&rh->version));
  88. return -EINVAL;
  89. }
  90. r = (struct vnic_resource __iomem *)(rh + 1);
  91. while ((type = ioread8(&r->type)) != RES_TYPE_EOL) {
  92. u8 bar_num = ioread8(&r->bar);
  93. u32 bar_offset = ioread32(&r->bar_offset);
  94. u32 count = ioread32(&r->count);
  95. u32 len;
  96. r++;
  97. if (bar_num != 0) /* only mapping in BAR0 resources */
  98. continue;
  99. switch (type) {
  100. case RES_TYPE_WQ:
  101. case RES_TYPE_RQ:
  102. case RES_TYPE_CQ:
  103. case RES_TYPE_INTR_CTRL:
  104. /* each count is stride bytes long */
  105. len = count * VNIC_RES_STRIDE;
  106. if (len + bar_offset > bar->len) {
  107. printk(KERN_ERR "vNIC BAR0 resource %d "
  108. "out-of-bounds, offset 0x%x + "
  109. "size 0x%x > bar len 0x%lx\n",
  110. type, bar_offset,
  111. len,
  112. bar->len);
  113. return -EINVAL;
  114. }
  115. break;
  116. case RES_TYPE_INTR_PBA_LEGACY:
  117. case RES_TYPE_DEVCMD2:
  118. case RES_TYPE_DEVCMD:
  119. len = count;
  120. break;
  121. default:
  122. continue;
  123. }
  124. vdev->res[type].count = count;
  125. vdev->res[type].vaddr = (char __iomem *)bar->vaddr + bar_offset;
  126. }
  127. return 0;
  128. }
  129. unsigned int vnic_dev_get_res_count(struct vnic_dev *vdev,
  130. enum vnic_res_type type)
  131. {
  132. return vdev->res[type].count;
  133. }
  134. void __iomem *vnic_dev_get_res(struct vnic_dev *vdev, enum vnic_res_type type,
  135. unsigned int index)
  136. {
  137. if (!vdev->res[type].vaddr)
  138. return NULL;
  139. switch (type) {
  140. case RES_TYPE_WQ:
  141. case RES_TYPE_RQ:
  142. case RES_TYPE_CQ:
  143. case RES_TYPE_INTR_CTRL:
  144. return (char __iomem *)vdev->res[type].vaddr +
  145. index * VNIC_RES_STRIDE;
  146. default:
  147. return (char __iomem *)vdev->res[type].vaddr;
  148. }
  149. }
  150. unsigned int vnic_dev_desc_ring_size(struct vnic_dev_ring *ring,
  151. unsigned int desc_count,
  152. unsigned int desc_size)
  153. {
  154. /* The base address of the desc rings must be 512 byte aligned.
  155. * Descriptor count is aligned to groups of 32 descriptors. A
  156. * count of 0 means the maximum 4096 descriptors. Descriptor
  157. * size is aligned to 16 bytes.
  158. */
  159. unsigned int count_align = 32;
  160. unsigned int desc_align = 16;
  161. ring->base_align = 512;
  162. if (desc_count == 0)
  163. desc_count = 4096;
  164. ring->desc_count = ALIGN(desc_count, count_align);
  165. ring->desc_size = ALIGN(desc_size, desc_align);
  166. ring->size = ring->desc_count * ring->desc_size;
  167. ring->size_unaligned = ring->size + ring->base_align;
  168. return ring->size_unaligned;
  169. }
  170. void vnic_dev_clear_desc_ring(struct vnic_dev_ring *ring)
  171. {
  172. memset(ring->descs, 0, ring->size);
  173. }
  174. int vnic_dev_alloc_desc_ring(struct vnic_dev *vdev, struct vnic_dev_ring *ring,
  175. unsigned int desc_count, unsigned int desc_size)
  176. {
  177. vnic_dev_desc_ring_size(ring, desc_count, desc_size);
  178. ring->descs_unaligned = dma_alloc_coherent(&vdev->pdev->dev,
  179. ring->size_unaligned,
  180. &ring->base_addr_unaligned, GFP_KERNEL);
  181. if (!ring->descs_unaligned) {
  182. printk(KERN_ERR
  183. "Failed to allocate ring (size=%d), aborting\n",
  184. (int)ring->size);
  185. return -ENOMEM;
  186. }
  187. ring->base_addr = ALIGN(ring->base_addr_unaligned,
  188. ring->base_align);
  189. ring->descs = (u8 *)ring->descs_unaligned +
  190. (ring->base_addr - ring->base_addr_unaligned);
  191. vnic_dev_clear_desc_ring(ring);
  192. ring->desc_avail = ring->desc_count - 1;
  193. return 0;
  194. }
  195. void vnic_dev_free_desc_ring(struct vnic_dev *vdev, struct vnic_dev_ring *ring)
  196. {
  197. if (ring->descs) {
  198. dma_free_coherent(&vdev->pdev->dev,
  199. ring->size_unaligned,
  200. ring->descs_unaligned,
  201. ring->base_addr_unaligned);
  202. ring->descs = NULL;
  203. }
  204. }
  205. static int vnic_dev_cmd1(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd, int wait)
  206. {
  207. struct vnic_devcmd __iomem *devcmd = vdev->devcmd;
  208. int delay;
  209. u32 status;
  210. static const int dev_cmd_err[] = {
  211. /* convert from fw's version of error.h to host's version */
  212. 0, /* ERR_SUCCESS */
  213. EINVAL, /* ERR_EINVAL */
  214. EFAULT, /* ERR_EFAULT */
  215. EPERM, /* ERR_EPERM */
  216. EBUSY, /* ERR_EBUSY */
  217. };
  218. int err;
  219. u64 *a0 = &vdev->args[0];
  220. u64 *a1 = &vdev->args[1];
  221. status = ioread32(&devcmd->status);
  222. if (status & STAT_BUSY) {
  223. printk(KERN_ERR "Busy devcmd %d\n", _CMD_N(cmd));
  224. return -EBUSY;
  225. }
  226. if (_CMD_DIR(cmd) & _CMD_DIR_WRITE) {
  227. writeq(*a0, &devcmd->args[0]);
  228. writeq(*a1, &devcmd->args[1]);
  229. wmb();
  230. }
  231. iowrite32(cmd, &devcmd->cmd);
  232. if ((_CMD_FLAGS(cmd) & _CMD_FLAGS_NOWAIT))
  233. return 0;
  234. for (delay = 0; delay < wait; delay++) {
  235. udelay(100);
  236. status = ioread32(&devcmd->status);
  237. if (!(status & STAT_BUSY)) {
  238. if (status & STAT_ERROR) {
  239. err = dev_cmd_err[(int)readq(&devcmd->args[0])];
  240. printk(KERN_ERR "Error %d devcmd %d\n",
  241. err, _CMD_N(cmd));
  242. return -err;
  243. }
  244. if (_CMD_DIR(cmd) & _CMD_DIR_READ) {
  245. rmb();
  246. *a0 = readq(&devcmd->args[0]);
  247. *a1 = readq(&devcmd->args[1]);
  248. }
  249. return 0;
  250. }
  251. }
  252. printk(KERN_ERR "Timedout devcmd %d\n", _CMD_N(cmd));
  253. return -ETIMEDOUT;
  254. }
  255. static int vnic_dev_cmd2(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
  256. int wait)
  257. {
  258. struct devcmd2_controller *dc2c = vdev->devcmd2;
  259. struct devcmd2_result *result;
  260. u8 color;
  261. unsigned int i;
  262. int delay;
  263. int err;
  264. u32 fetch_index;
  265. u32 posted;
  266. u32 new_posted;
  267. posted = ioread32(&dc2c->wq_ctrl->posted_index);
  268. fetch_index = ioread32(&dc2c->wq_ctrl->fetch_index);
  269. if (posted == 0xFFFFFFFF || fetch_index == 0xFFFFFFFF) {
  270. /* Hardware surprise removal: return error */
  271. pr_err("%s: devcmd2 invalid posted or fetch index on cmd %d\n",
  272. pci_name(vdev->pdev), _CMD_N(cmd));
  273. pr_err("%s: fetch index: %u, posted index: %u\n",
  274. pci_name(vdev->pdev), fetch_index, posted);
  275. return -ENODEV;
  276. }
  277. new_posted = (posted + 1) % DEVCMD2_RING_SIZE;
  278. if (new_posted == fetch_index) {
  279. pr_err("%s: devcmd2 wq full while issuing cmd %d\n",
  280. pci_name(vdev->pdev), _CMD_N(cmd));
  281. pr_err("%s: fetch index: %u, posted index: %u\n",
  282. pci_name(vdev->pdev), fetch_index, posted);
  283. return -EBUSY;
  284. }
  285. dc2c->cmd_ring[posted].cmd = cmd;
  286. dc2c->cmd_ring[posted].flags = 0;
  287. if ((_CMD_FLAGS(cmd) & _CMD_FLAGS_NOWAIT))
  288. dc2c->cmd_ring[posted].flags |= DEVCMD2_FNORESULT;
  289. if (_CMD_DIR(cmd) & _CMD_DIR_WRITE) {
  290. for (i = 0; i < VNIC_DEVCMD_NARGS; i++)
  291. dc2c->cmd_ring[posted].args[i] = vdev->args[i];
  292. }
  293. /* Adding write memory barrier prevents compiler and/or CPU
  294. * reordering, thus avoiding descriptor posting before
  295. * descriptor is initialized. Otherwise, hardware can read
  296. * stale descriptor fields.
  297. */
  298. wmb();
  299. iowrite32(new_posted, &dc2c->wq_ctrl->posted_index);
  300. if (dc2c->cmd_ring[posted].flags & DEVCMD2_FNORESULT)
  301. return 0;
  302. result = dc2c->result + dc2c->next_result;
  303. color = dc2c->color;
  304. dc2c->next_result++;
  305. if (dc2c->next_result == dc2c->result_size) {
  306. dc2c->next_result = 0;
  307. dc2c->color = dc2c->color ? 0 : 1;
  308. }
  309. for (delay = 0; delay < wait; delay++) {
  310. udelay(100);
  311. if (result->color == color) {
  312. if (result->error) {
  313. err = -(int) result->error;
  314. if (err != ERR_ECMDUNKNOWN ||
  315. cmd != CMD_CAPABILITY)
  316. pr_err("%s:Error %d devcmd %d\n",
  317. pci_name(vdev->pdev),
  318. err, _CMD_N(cmd));
  319. return err;
  320. }
  321. if (_CMD_DIR(cmd) & _CMD_DIR_READ) {
  322. rmb(); /*prevent reorder while reding result*/
  323. for (i = 0; i < VNIC_DEVCMD_NARGS; i++)
  324. vdev->args[i] = result->results[i];
  325. }
  326. return 0;
  327. }
  328. }
  329. pr_err("%s:Timed out devcmd %d\n", pci_name(vdev->pdev), _CMD_N(cmd));
  330. return -ETIMEDOUT;
  331. }
  332. static int vnic_dev_init_devcmd1(struct vnic_dev *vdev)
  333. {
  334. vdev->devcmd = vnic_dev_get_res(vdev, RES_TYPE_DEVCMD, 0);
  335. if (!vdev->devcmd)
  336. return -ENODEV;
  337. vdev->devcmd_rtn = &vnic_dev_cmd1;
  338. return 0;
  339. }
  340. static int vnic_dev_init_devcmd2(struct vnic_dev *vdev)
  341. {
  342. int err;
  343. unsigned int fetch_index;
  344. if (vdev->devcmd2)
  345. return 0;
  346. vdev->devcmd2 = kzalloc(sizeof(*vdev->devcmd2), GFP_ATOMIC);
  347. if (!vdev->devcmd2)
  348. return -ENOMEM;
  349. vdev->devcmd2->color = 1;
  350. vdev->devcmd2->result_size = DEVCMD2_RING_SIZE;
  351. err = vnic_wq_devcmd2_alloc(vdev, &vdev->devcmd2->wq,
  352. DEVCMD2_RING_SIZE, DEVCMD2_DESC_SIZE);
  353. if (err)
  354. goto err_free_devcmd2;
  355. fetch_index = ioread32(&vdev->devcmd2->wq.ctrl->fetch_index);
  356. if (fetch_index == 0xFFFFFFFF) { /* check for hardware gone */
  357. pr_err("error in devcmd2 init");
  358. err = -ENODEV;
  359. goto err_free_wq;
  360. }
  361. /*
  362. * Don't change fetch_index ever and
  363. * set posted_index same as fetch_index
  364. * when setting up the WQ for devcmd2.
  365. */
  366. vnic_wq_init_start(&vdev->devcmd2->wq, 0, fetch_index,
  367. fetch_index, 0, 0);
  368. vnic_wq_enable(&vdev->devcmd2->wq);
  369. err = vnic_dev_alloc_desc_ring(vdev, &vdev->devcmd2->results_ring,
  370. DEVCMD2_RING_SIZE, DEVCMD2_DESC_SIZE);
  371. if (err)
  372. goto err_disable_wq;
  373. vdev->devcmd2->result =
  374. (struct devcmd2_result *) vdev->devcmd2->results_ring.descs;
  375. vdev->devcmd2->cmd_ring =
  376. (struct vnic_devcmd2 *) vdev->devcmd2->wq.ring.descs;
  377. vdev->devcmd2->wq_ctrl = vdev->devcmd2->wq.ctrl;
  378. vdev->args[0] = (u64) vdev->devcmd2->results_ring.base_addr |
  379. VNIC_PADDR_TARGET;
  380. vdev->args[1] = DEVCMD2_RING_SIZE;
  381. err = vnic_dev_cmd2(vdev, CMD_INITIALIZE_DEVCMD2, 1000);
  382. if (err)
  383. goto err_free_desc_ring;
  384. vdev->devcmd_rtn = &vnic_dev_cmd2;
  385. return 0;
  386. err_free_desc_ring:
  387. vnic_dev_free_desc_ring(vdev, &vdev->devcmd2->results_ring);
  388. err_disable_wq:
  389. vnic_wq_disable(&vdev->devcmd2->wq);
  390. err_free_wq:
  391. vnic_wq_free(&vdev->devcmd2->wq);
  392. err_free_devcmd2:
  393. kfree(vdev->devcmd2);
  394. vdev->devcmd2 = NULL;
  395. return err;
  396. }
  397. static void vnic_dev_deinit_devcmd2(struct vnic_dev *vdev)
  398. {
  399. vnic_dev_free_desc_ring(vdev, &vdev->devcmd2->results_ring);
  400. vnic_wq_disable(&vdev->devcmd2->wq);
  401. vnic_wq_free(&vdev->devcmd2->wq);
  402. kfree(vdev->devcmd2);
  403. vdev->devcmd2 = NULL;
  404. vdev->devcmd_rtn = &vnic_dev_cmd1;
  405. }
  406. static int vnic_dev_cmd_no_proxy(struct vnic_dev *vdev,
  407. enum vnic_devcmd_cmd cmd, u64 *a0, u64 *a1, int wait)
  408. {
  409. int err;
  410. vdev->args[0] = *a0;
  411. vdev->args[1] = *a1;
  412. err = (*vdev->devcmd_rtn)(vdev, cmd, wait);
  413. *a0 = vdev->args[0];
  414. *a1 = vdev->args[1];
  415. return err;
  416. }
  417. int vnic_dev_cmd(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
  418. u64 *a0, u64 *a1, int wait)
  419. {
  420. memset(vdev->args, 0, sizeof(vdev->args));
  421. switch (vdev->proxy) {
  422. case PROXY_NONE:
  423. default:
  424. return vnic_dev_cmd_no_proxy(vdev, cmd, a0, a1, wait);
  425. }
  426. }
  427. int vnic_dev_fw_info(struct vnic_dev *vdev,
  428. struct vnic_devcmd_fw_info **fw_info)
  429. {
  430. u64 a0, a1 = 0;
  431. int wait = 1000;
  432. int err = 0;
  433. if (!vdev->fw_info) {
  434. vdev->fw_info = dma_alloc_coherent(&vdev->pdev->dev,
  435. sizeof(struct vnic_devcmd_fw_info),
  436. &vdev->fw_info_pa, GFP_KERNEL);
  437. if (!vdev->fw_info)
  438. return -ENOMEM;
  439. a0 = vdev->fw_info_pa;
  440. /* only get fw_info once and cache it */
  441. err = vnic_dev_cmd(vdev, CMD_MCPU_FW_INFO, &a0, &a1, wait);
  442. }
  443. *fw_info = vdev->fw_info;
  444. return err;
  445. }
  446. int vnic_dev_spec(struct vnic_dev *vdev, unsigned int offset, unsigned int size,
  447. void *value)
  448. {
  449. u64 a0, a1;
  450. int wait = 1000;
  451. int err;
  452. a0 = offset;
  453. a1 = size;
  454. err = vnic_dev_cmd(vdev, CMD_DEV_SPEC, &a0, &a1, wait);
  455. switch (size) {
  456. case 1:
  457. *(u8 *)value = (u8)a0;
  458. break;
  459. case 2:
  460. *(u16 *)value = (u16)a0;
  461. break;
  462. case 4:
  463. *(u32 *)value = (u32)a0;
  464. break;
  465. case 8:
  466. *(u64 *)value = a0;
  467. break;
  468. default:
  469. BUG();
  470. break;
  471. }
  472. return err;
  473. }
  474. int vnic_dev_stats_clear(struct vnic_dev *vdev)
  475. {
  476. u64 a0 = 0, a1 = 0;
  477. int wait = 1000;
  478. return vnic_dev_cmd(vdev, CMD_STATS_CLEAR, &a0, &a1, wait);
  479. }
  480. int vnic_dev_stats_dump(struct vnic_dev *vdev, struct vnic_stats **stats)
  481. {
  482. u64 a0, a1;
  483. int wait = 1000;
  484. if (!vdev->stats) {
  485. vdev->stats = dma_alloc_coherent(&vdev->pdev->dev,
  486. sizeof(struct vnic_stats), &vdev->stats_pa, GFP_KERNEL);
  487. if (!vdev->stats)
  488. return -ENOMEM;
  489. }
  490. *stats = vdev->stats;
  491. a0 = vdev->stats_pa;
  492. a1 = sizeof(struct vnic_stats);
  493. return vnic_dev_cmd(vdev, CMD_STATS_DUMP, &a0, &a1, wait);
  494. }
  495. int vnic_dev_close(struct vnic_dev *vdev)
  496. {
  497. u64 a0 = 0, a1 = 0;
  498. int wait = 1000;
  499. return vnic_dev_cmd(vdev, CMD_CLOSE, &a0, &a1, wait);
  500. }
  501. int vnic_dev_enable(struct vnic_dev *vdev)
  502. {
  503. u64 a0 = 0, a1 = 0;
  504. int wait = 1000;
  505. return vnic_dev_cmd(vdev, CMD_ENABLE, &a0, &a1, wait);
  506. }
  507. int vnic_dev_disable(struct vnic_dev *vdev)
  508. {
  509. u64 a0 = 0, a1 = 0;
  510. int wait = 1000;
  511. return vnic_dev_cmd(vdev, CMD_DISABLE, &a0, &a1, wait);
  512. }
  513. int vnic_dev_open(struct vnic_dev *vdev, int arg)
  514. {
  515. u64 a0 = (u32)arg, a1 = 0;
  516. int wait = 1000;
  517. return vnic_dev_cmd(vdev, CMD_OPEN, &a0, &a1, wait);
  518. }
  519. int vnic_dev_open_done(struct vnic_dev *vdev, int *done)
  520. {
  521. u64 a0 = 0, a1 = 0;
  522. int wait = 1000;
  523. int err;
  524. *done = 0;
  525. err = vnic_dev_cmd(vdev, CMD_OPEN_STATUS, &a0, &a1, wait);
  526. if (err)
  527. return err;
  528. *done = (a0 == 0);
  529. return 0;
  530. }
  531. int vnic_dev_soft_reset(struct vnic_dev *vdev, int arg)
  532. {
  533. u64 a0 = (u32)arg, a1 = 0;
  534. int wait = 1000;
  535. return vnic_dev_cmd(vdev, CMD_SOFT_RESET, &a0, &a1, wait);
  536. }
  537. int vnic_dev_soft_reset_done(struct vnic_dev *vdev, int *done)
  538. {
  539. u64 a0 = 0, a1 = 0;
  540. int wait = 1000;
  541. int err;
  542. *done = 0;
  543. err = vnic_dev_cmd(vdev, CMD_SOFT_RESET_STATUS, &a0, &a1, wait);
  544. if (err)
  545. return err;
  546. *done = (a0 == 0);
  547. return 0;
  548. }
  549. int vnic_dev_hang_notify(struct vnic_dev *vdev)
  550. {
  551. u64 a0 = 0, a1 = 0;
  552. int wait = 1000;
  553. return vnic_dev_cmd(vdev, CMD_HANG_NOTIFY, &a0, &a1, wait);
  554. }
  555. int vnic_dev_mac_addr(struct vnic_dev *vdev, u8 *mac_addr)
  556. {
  557. u64 a[2] = {};
  558. int wait = 1000;
  559. int err, i;
  560. for (i = 0; i < ETH_ALEN; i++)
  561. mac_addr[i] = 0;
  562. err = vnic_dev_cmd(vdev, CMD_MAC_ADDR, &a[0], &a[1], wait);
  563. if (err)
  564. return err;
  565. for (i = 0; i < ETH_ALEN; i++)
  566. mac_addr[i] = ((u8 *)&a)[i];
  567. return 0;
  568. }
  569. void vnic_dev_packet_filter(struct vnic_dev *vdev, int directed, int multicast,
  570. int broadcast, int promisc, int allmulti)
  571. {
  572. u64 a0, a1 = 0;
  573. int wait = 1000;
  574. int err;
  575. a0 = (directed ? CMD_PFILTER_DIRECTED : 0) |
  576. (multicast ? CMD_PFILTER_MULTICAST : 0) |
  577. (broadcast ? CMD_PFILTER_BROADCAST : 0) |
  578. (promisc ? CMD_PFILTER_PROMISCUOUS : 0) |
  579. (allmulti ? CMD_PFILTER_ALL_MULTICAST : 0);
  580. err = vnic_dev_cmd(vdev, CMD_PACKET_FILTER, &a0, &a1, wait);
  581. if (err)
  582. printk(KERN_ERR "Can't set packet filter\n");
  583. }
  584. void vnic_dev_add_addr(struct vnic_dev *vdev, u8 *addr)
  585. {
  586. u64 a[2] = {};
  587. int wait = 1000;
  588. int err;
  589. int i;
  590. for (i = 0; i < ETH_ALEN; i++)
  591. ((u8 *)&a)[i] = addr[i];
  592. err = vnic_dev_cmd(vdev, CMD_ADDR_ADD, &a[0], &a[1], wait);
  593. if (err)
  594. pr_err("Can't add addr [%pM], %d\n", addr, err);
  595. }
  596. void vnic_dev_del_addr(struct vnic_dev *vdev, u8 *addr)
  597. {
  598. u64 a[2] = {};
  599. int wait = 1000;
  600. int err;
  601. int i;
  602. for (i = 0; i < ETH_ALEN; i++)
  603. ((u8 *)&a)[i] = addr[i];
  604. err = vnic_dev_cmd(vdev, CMD_ADDR_DEL, &a[0], &a[1], wait);
  605. if (err)
  606. pr_err("Can't del addr [%pM], %d\n", addr, err);
  607. }
  608. int vnic_dev_notify_set(struct vnic_dev *vdev, u16 intr)
  609. {
  610. u64 a0, a1;
  611. int wait = 1000;
  612. if (!vdev->notify) {
  613. vdev->notify = dma_alloc_coherent(&vdev->pdev->dev,
  614. sizeof(struct vnic_devcmd_notify),
  615. &vdev->notify_pa, GFP_KERNEL);
  616. if (!vdev->notify)
  617. return -ENOMEM;
  618. }
  619. a0 = vdev->notify_pa;
  620. a1 = ((u64)intr << 32) & 0x0000ffff00000000ULL;
  621. a1 += sizeof(struct vnic_devcmd_notify);
  622. return vnic_dev_cmd(vdev, CMD_NOTIFY, &a0, &a1, wait);
  623. }
  624. void vnic_dev_notify_unset(struct vnic_dev *vdev)
  625. {
  626. u64 a0, a1;
  627. int wait = 1000;
  628. a0 = 0; /* paddr = 0 to unset notify buffer */
  629. a1 = 0x0000ffff00000000ULL; /* intr num = -1 to unreg for intr */
  630. a1 += sizeof(struct vnic_devcmd_notify);
  631. vnic_dev_cmd(vdev, CMD_NOTIFY, &a0, &a1, wait);
  632. }
  633. static int vnic_dev_notify_ready(struct vnic_dev *vdev)
  634. {
  635. u32 *words;
  636. unsigned int nwords = sizeof(struct vnic_devcmd_notify) / 4;
  637. unsigned int i;
  638. u32 csum;
  639. if (!vdev->notify)
  640. return 0;
  641. do {
  642. csum = 0;
  643. memcpy(&vdev->notify_copy, vdev->notify,
  644. sizeof(struct vnic_devcmd_notify));
  645. words = (u32 *)&vdev->notify_copy;
  646. for (i = 1; i < nwords; i++)
  647. csum += words[i];
  648. } while (csum != words[0]);
  649. return 1;
  650. }
  651. int vnic_dev_init(struct vnic_dev *vdev, int arg)
  652. {
  653. u64 a0 = (u32)arg, a1 = 0;
  654. int wait = 1000;
  655. return vnic_dev_cmd(vdev, CMD_INIT, &a0, &a1, wait);
  656. }
  657. u16 vnic_dev_set_default_vlan(struct vnic_dev *vdev, u16 new_default_vlan)
  658. {
  659. u64 a0 = new_default_vlan, a1 = 0;
  660. int wait = 1000;
  661. int old_vlan = 0;
  662. old_vlan = vnic_dev_cmd(vdev, CMD_SET_DEFAULT_VLAN, &a0, &a1, wait);
  663. return (u16)old_vlan;
  664. }
  665. int vnic_dev_link_status(struct vnic_dev *vdev)
  666. {
  667. if (vdev->linkstatus)
  668. return *vdev->linkstatus;
  669. if (!vnic_dev_notify_ready(vdev))
  670. return 0;
  671. return vdev->notify_copy.link_state;
  672. }
  673. u32 vnic_dev_port_speed(struct vnic_dev *vdev)
  674. {
  675. if (!vnic_dev_notify_ready(vdev))
  676. return 0;
  677. return vdev->notify_copy.port_speed;
  678. }
  679. u32 vnic_dev_msg_lvl(struct vnic_dev *vdev)
  680. {
  681. if (!vnic_dev_notify_ready(vdev))
  682. return 0;
  683. return vdev->notify_copy.msglvl;
  684. }
  685. u32 vnic_dev_mtu(struct vnic_dev *vdev)
  686. {
  687. if (!vnic_dev_notify_ready(vdev))
  688. return 0;
  689. return vdev->notify_copy.mtu;
  690. }
  691. u32 vnic_dev_link_down_cnt(struct vnic_dev *vdev)
  692. {
  693. if (!vnic_dev_notify_ready(vdev))
  694. return 0;
  695. return vdev->notify_copy.link_down_cnt;
  696. }
  697. void vnic_dev_set_intr_mode(struct vnic_dev *vdev,
  698. enum vnic_dev_intr_mode intr_mode)
  699. {
  700. vdev->intr_mode = intr_mode;
  701. }
  702. enum vnic_dev_intr_mode vnic_dev_get_intr_mode(
  703. struct vnic_dev *vdev)
  704. {
  705. return vdev->intr_mode;
  706. }
  707. void vnic_dev_unregister(struct vnic_dev *vdev)
  708. {
  709. if (vdev) {
  710. if (vdev->notify)
  711. dma_free_coherent(&vdev->pdev->dev,
  712. sizeof(struct vnic_devcmd_notify),
  713. vdev->notify,
  714. vdev->notify_pa);
  715. if (vdev->linkstatus)
  716. dma_free_coherent(&vdev->pdev->dev,
  717. sizeof(u32),
  718. vdev->linkstatus,
  719. vdev->linkstatus_pa);
  720. if (vdev->stats)
  721. dma_free_coherent(&vdev->pdev->dev,
  722. sizeof(struct vnic_stats),
  723. vdev->stats, vdev->stats_pa);
  724. if (vdev->fw_info)
  725. dma_free_coherent(&vdev->pdev->dev,
  726. sizeof(struct vnic_devcmd_fw_info),
  727. vdev->fw_info, vdev->fw_info_pa);
  728. if (vdev->devcmd2)
  729. vnic_dev_deinit_devcmd2(vdev);
  730. kfree(vdev);
  731. }
  732. }
  733. struct vnic_dev *vnic_dev_register(struct vnic_dev *vdev,
  734. void *priv, struct pci_dev *pdev, struct vnic_dev_bar *bar)
  735. {
  736. if (!vdev) {
  737. vdev = kzalloc(sizeof(struct vnic_dev), GFP_KERNEL);
  738. if (!vdev)
  739. return NULL;
  740. }
  741. vdev->priv = priv;
  742. vdev->pdev = pdev;
  743. if (vnic_dev_discover_res(vdev, bar))
  744. goto err_out;
  745. return vdev;
  746. err_out:
  747. vnic_dev_unregister(vdev);
  748. return NULL;
  749. }
  750. int vnic_dev_cmd_init(struct vnic_dev *vdev)
  751. {
  752. int err;
  753. void *p;
  754. p = vnic_dev_get_res(vdev, RES_TYPE_DEVCMD2, 0);
  755. if (p) {
  756. pr_err("fnic: DEVCMD2 resource found!\n");
  757. err = vnic_dev_init_devcmd2(vdev);
  758. } else {
  759. pr_err("fnic: DEVCMD2 not found, fall back to Devcmd\n");
  760. err = vnic_dev_init_devcmd1(vdev);
  761. }
  762. return err;
  763. }