vf.c 28 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078
  1. // SPDX-License-Identifier: GPL-2.0
  2. /* Copyright(c) 1999 - 2018 Intel Corporation. */
  3. #include "vf.h"
  4. #include "ixgbevf.h"
  5. /* On Hyper-V, to reset, we need to read from this offset
  6. * from the PCI config space. This is the mechanism used on
  7. * Hyper-V to support PF/VF communication.
  8. */
  9. #define IXGBE_HV_RESET_OFFSET 0x201
  10. static inline s32 ixgbevf_write_msg_read_ack(struct ixgbe_hw *hw, u32 *msg,
  11. u32 *retmsg, u16 size)
  12. {
  13. s32 retval = ixgbevf_write_mbx(hw, msg, size);
  14. if (retval)
  15. return retval;
  16. return ixgbevf_poll_mbx(hw, retmsg, size);
  17. }
  18. /**
  19. * ixgbevf_start_hw_vf - Prepare hardware for Tx/Rx
  20. * @hw: pointer to hardware structure
  21. *
  22. * Starts the hardware by filling the bus info structure and media type, clears
  23. * all on chip counters, initializes receive address registers, multicast
  24. * table, VLAN filter table, calls routine to set up link and flow control
  25. * settings, and leaves transmit and receive units disabled and uninitialized
  26. **/
  27. static s32 ixgbevf_start_hw_vf(struct ixgbe_hw *hw)
  28. {
  29. /* Clear adapter stopped flag */
  30. hw->adapter_stopped = false;
  31. return 0;
  32. }
  33. /**
  34. * ixgbevf_init_hw_vf - virtual function hardware initialization
  35. * @hw: pointer to hardware structure
  36. *
  37. * Initialize the hardware by resetting the hardware and then starting
  38. * the hardware
  39. **/
  40. static s32 ixgbevf_init_hw_vf(struct ixgbe_hw *hw)
  41. {
  42. s32 status = hw->mac.ops.start_hw(hw);
  43. hw->mac.ops.get_mac_addr(hw, hw->mac.addr);
  44. return status;
  45. }
  46. /**
  47. * ixgbevf_reset_hw_vf - Performs hardware reset
  48. * @hw: pointer to hardware structure
  49. *
  50. * Resets the hardware by resetting the transmit and receive units, masks and
  51. * clears all interrupts.
  52. **/
  53. static s32 ixgbevf_reset_hw_vf(struct ixgbe_hw *hw)
  54. {
  55. struct ixgbe_mbx_info *mbx = &hw->mbx;
  56. u32 timeout = IXGBE_VF_INIT_TIMEOUT;
  57. u32 msgbuf[IXGBE_VF_PERMADDR_MSG_LEN];
  58. u8 *addr = (u8 *)(&msgbuf[1]);
  59. s32 ret_val;
  60. /* Call adapter stop to disable tx/rx and clear interrupts */
  61. hw->mac.ops.stop_adapter(hw);
  62. /* reset the api version */
  63. hw->api_version = ixgbe_mbox_api_10;
  64. hw->mbx.ops.init_params(hw);
  65. memcpy(&hw->mbx.ops, &ixgbevf_mbx_ops_legacy,
  66. sizeof(struct ixgbe_mbx_operations));
  67. IXGBE_WRITE_REG(hw, IXGBE_VFCTRL, IXGBE_CTRL_RST);
  68. IXGBE_WRITE_FLUSH(hw);
  69. /* we cannot reset while the RSTI / RSTD bits are asserted */
  70. while (!mbx->ops.check_for_rst(hw) && timeout) {
  71. timeout--;
  72. udelay(5);
  73. }
  74. if (!timeout)
  75. return IXGBE_ERR_RESET_FAILED;
  76. /* mailbox timeout can now become active */
  77. mbx->timeout = IXGBE_VF_MBX_INIT_TIMEOUT;
  78. msgbuf[0] = IXGBE_VF_RESET;
  79. ixgbevf_write_mbx(hw, msgbuf, 1);
  80. mdelay(10);
  81. /* set our "perm_addr" based on info provided by PF
  82. * also set up the mc_filter_type which is piggy backed
  83. * on the mac address in word 3
  84. */
  85. ret_val = ixgbevf_poll_mbx(hw, msgbuf, IXGBE_VF_PERMADDR_MSG_LEN);
  86. if (ret_val)
  87. return ret_val;
  88. /* New versions of the PF may NACK the reset return message
  89. * to indicate that no MAC address has yet been assigned for
  90. * the VF.
  91. */
  92. if (msgbuf[0] != (IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_SUCCESS) &&
  93. msgbuf[0] != (IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_FAILURE))
  94. return IXGBE_ERR_INVALID_MAC_ADDR;
  95. if (msgbuf[0] == (IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_SUCCESS))
  96. ether_addr_copy(hw->mac.perm_addr, addr);
  97. hw->mac.mc_filter_type = msgbuf[IXGBE_VF_MC_TYPE_WORD];
  98. return 0;
  99. }
  100. /**
  101. * ixgbevf_hv_reset_hw_vf - reset via Hyper-V
  102. * @hw: pointer to private hardware struct
  103. *
  104. * Hyper-V variant; the VF/PF communication is through the PCI
  105. * config space.
  106. */
  107. static s32 ixgbevf_hv_reset_hw_vf(struct ixgbe_hw *hw)
  108. {
  109. #if IS_ENABLED(CONFIG_PCI_MMCONFIG)
  110. struct ixgbevf_adapter *adapter = hw->back;
  111. int i;
  112. for (i = 0; i < 6; i++)
  113. pci_read_config_byte(adapter->pdev,
  114. (i + IXGBE_HV_RESET_OFFSET),
  115. &hw->mac.perm_addr[i]);
  116. return 0;
  117. #else
  118. pr_err("PCI_MMCONFIG needs to be enabled for Hyper-V\n");
  119. return -EOPNOTSUPP;
  120. #endif
  121. }
  122. /**
  123. * ixgbevf_stop_hw_vf - Generic stop Tx/Rx units
  124. * @hw: pointer to hardware structure
  125. *
  126. * Sets the adapter_stopped flag within ixgbe_hw struct. Clears interrupts,
  127. * disables transmit and receive units. The adapter_stopped flag is used by
  128. * the shared code and drivers to determine if the adapter is in a stopped
  129. * state and should not touch the hardware.
  130. **/
  131. static s32 ixgbevf_stop_hw_vf(struct ixgbe_hw *hw)
  132. {
  133. u32 number_of_queues;
  134. u32 reg_val;
  135. u16 i;
  136. /* Set the adapter_stopped flag so other driver functions stop touching
  137. * the hardware
  138. */
  139. hw->adapter_stopped = true;
  140. /* Disable the receive unit by stopped each queue */
  141. number_of_queues = hw->mac.max_rx_queues;
  142. for (i = 0; i < number_of_queues; i++) {
  143. reg_val = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
  144. if (reg_val & IXGBE_RXDCTL_ENABLE) {
  145. reg_val &= ~IXGBE_RXDCTL_ENABLE;
  146. IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), reg_val);
  147. }
  148. }
  149. IXGBE_WRITE_FLUSH(hw);
  150. /* Clear interrupt mask to stop from interrupts being generated */
  151. IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, IXGBE_VF_IRQ_CLEAR_MASK);
  152. /* Clear any pending interrupts */
  153. IXGBE_READ_REG(hw, IXGBE_VTEICR);
  154. /* Disable the transmit unit. Each queue must be disabled. */
  155. number_of_queues = hw->mac.max_tx_queues;
  156. for (i = 0; i < number_of_queues; i++) {
  157. reg_val = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
  158. if (reg_val & IXGBE_TXDCTL_ENABLE) {
  159. reg_val &= ~IXGBE_TXDCTL_ENABLE;
  160. IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), reg_val);
  161. }
  162. }
  163. return 0;
  164. }
  165. /**
  166. * ixgbevf_mta_vector - Determines bit-vector in multicast table to set
  167. * @hw: pointer to hardware structure
  168. * @mc_addr: the multicast address
  169. *
  170. * Extracts the 12 bits, from a multicast address, to determine which
  171. * bit-vector to set in the multicast table. The hardware uses 12 bits, from
  172. * incoming Rx multicast addresses, to determine the bit-vector to check in
  173. * the MTA. Which of the 4 combination, of 12-bits, the hardware uses is set
  174. * by the MO field of the MCSTCTRL. The MO field is set during initialization
  175. * to mc_filter_type.
  176. **/
  177. static s32 ixgbevf_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr)
  178. {
  179. u32 vector = 0;
  180. switch (hw->mac.mc_filter_type) {
  181. case 0: /* use bits [47:36] of the address */
  182. vector = ((mc_addr[4] >> 4) | (((u16)mc_addr[5]) << 4));
  183. break;
  184. case 1: /* use bits [46:35] of the address */
  185. vector = ((mc_addr[4] >> 3) | (((u16)mc_addr[5]) << 5));
  186. break;
  187. case 2: /* use bits [45:34] of the address */
  188. vector = ((mc_addr[4] >> 2) | (((u16)mc_addr[5]) << 6));
  189. break;
  190. case 3: /* use bits [43:32] of the address */
  191. vector = ((mc_addr[4]) | (((u16)mc_addr[5]) << 8));
  192. break;
  193. default: /* Invalid mc_filter_type */
  194. break;
  195. }
  196. /* vector can only be 12-bits or boundary will be exceeded */
  197. vector &= 0xFFF;
  198. return vector;
  199. }
  200. /**
  201. * ixgbevf_get_mac_addr_vf - Read device MAC address
  202. * @hw: pointer to the HW structure
  203. * @mac_addr: pointer to storage for retrieved MAC address
  204. **/
  205. static s32 ixgbevf_get_mac_addr_vf(struct ixgbe_hw *hw, u8 *mac_addr)
  206. {
  207. ether_addr_copy(mac_addr, hw->mac.perm_addr);
  208. return 0;
  209. }
  210. static s32 ixgbevf_set_uc_addr_vf(struct ixgbe_hw *hw, u32 index, u8 *addr)
  211. {
  212. u32 msgbuf[3], msgbuf_chk;
  213. u8 *msg_addr = (u8 *)(&msgbuf[1]);
  214. s32 ret_val;
  215. memset(msgbuf, 0, sizeof(msgbuf));
  216. /* If index is one then this is the start of a new list and needs
  217. * indication to the PF so it can do it's own list management.
  218. * If it is zero then that tells the PF to just clear all of
  219. * this VF's macvlans and there is no new list.
  220. */
  221. msgbuf[0] |= index << IXGBE_VT_MSGINFO_SHIFT;
  222. msgbuf[0] |= IXGBE_VF_SET_MACVLAN;
  223. msgbuf_chk = msgbuf[0];
  224. if (addr)
  225. ether_addr_copy(msg_addr, addr);
  226. ret_val = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf,
  227. ARRAY_SIZE(msgbuf));
  228. if (!ret_val) {
  229. msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS;
  230. if (msgbuf[0] == (msgbuf_chk | IXGBE_VT_MSGTYPE_FAILURE))
  231. return -ENOMEM;
  232. }
  233. return ret_val;
  234. }
  235. static s32 ixgbevf_hv_set_uc_addr_vf(struct ixgbe_hw *hw, u32 index, u8 *addr)
  236. {
  237. return -EOPNOTSUPP;
  238. }
  239. /**
  240. * ixgbevf_get_reta_locked - get the RSS redirection table (RETA) contents.
  241. * @hw: pointer to hardware structure
  242. * @reta: buffer to fill with RETA contents.
  243. * @num_rx_queues: Number of Rx queues configured for this port
  244. *
  245. * The "reta" buffer should be big enough to contain 32 registers.
  246. *
  247. * Returns: 0 on success.
  248. * if API doesn't support this operation - (-EOPNOTSUPP).
  249. */
  250. int ixgbevf_get_reta_locked(struct ixgbe_hw *hw, u32 *reta, int num_rx_queues)
  251. {
  252. int err, i, j;
  253. u32 msgbuf[IXGBE_VFMAILBOX_SIZE];
  254. u32 *hw_reta = &msgbuf[1];
  255. u32 mask = 0;
  256. /* We have to use a mailbox for 82599 and x540 devices only.
  257. * For these devices RETA has 128 entries.
  258. * Also these VFs support up to 4 RSS queues. Therefore PF will compress
  259. * 16 RETA entries in each DWORD giving 2 bits to each entry.
  260. */
  261. int dwords = IXGBEVF_82599_RETA_SIZE / 16;
  262. /* We support the RSS querying for 82599 and x540 devices only.
  263. * Thus return an error if API doesn't support RETA querying or querying
  264. * is not supported for this device type.
  265. */
  266. switch (hw->api_version) {
  267. case ixgbe_mbox_api_15:
  268. case ixgbe_mbox_api_14:
  269. case ixgbe_mbox_api_13:
  270. case ixgbe_mbox_api_12:
  271. if (hw->mac.type < ixgbe_mac_X550_vf)
  272. break;
  273. fallthrough;
  274. default:
  275. return -EOPNOTSUPP;
  276. }
  277. msgbuf[0] = IXGBE_VF_GET_RETA;
  278. err = ixgbevf_write_mbx(hw, msgbuf, 1);
  279. if (err)
  280. return err;
  281. err = ixgbevf_poll_mbx(hw, msgbuf, dwords + 1);
  282. if (err)
  283. return err;
  284. msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS;
  285. /* If the operation has been refused by a PF return -EPERM */
  286. if (msgbuf[0] == (IXGBE_VF_GET_RETA | IXGBE_VT_MSGTYPE_FAILURE))
  287. return -EPERM;
  288. /* If we didn't get an ACK there must have been
  289. * some sort of mailbox error so we should treat it
  290. * as such.
  291. */
  292. if (msgbuf[0] != (IXGBE_VF_GET_RETA | IXGBE_VT_MSGTYPE_SUCCESS))
  293. return IXGBE_ERR_MBX;
  294. /* ixgbevf doesn't support more than 2 queues at the moment */
  295. if (num_rx_queues > 1)
  296. mask = 0x1;
  297. for (i = 0; i < dwords; i++)
  298. for (j = 0; j < 16; j++)
  299. reta[i * 16 + j] = (hw_reta[i] >> (2 * j)) & mask;
  300. return 0;
  301. }
  302. /**
  303. * ixgbevf_get_rss_key_locked - get the RSS Random Key
  304. * @hw: pointer to the HW structure
  305. * @rss_key: buffer to fill with RSS Hash Key contents.
  306. *
  307. * The "rss_key" buffer should be big enough to contain 10 registers.
  308. *
  309. * Returns: 0 on success.
  310. * if API doesn't support this operation - (-EOPNOTSUPP).
  311. */
  312. int ixgbevf_get_rss_key_locked(struct ixgbe_hw *hw, u8 *rss_key)
  313. {
  314. int err;
  315. u32 msgbuf[IXGBE_VFMAILBOX_SIZE];
  316. /* We currently support the RSS Random Key retrieval for 82599 and x540
  317. * devices only.
  318. *
  319. * Thus return an error if API doesn't support RSS Random Key retrieval
  320. * or if the operation is not supported for this device type.
  321. */
  322. switch (hw->api_version) {
  323. case ixgbe_mbox_api_15:
  324. case ixgbe_mbox_api_14:
  325. case ixgbe_mbox_api_13:
  326. case ixgbe_mbox_api_12:
  327. if (hw->mac.type < ixgbe_mac_X550_vf)
  328. break;
  329. fallthrough;
  330. default:
  331. return -EOPNOTSUPP;
  332. }
  333. msgbuf[0] = IXGBE_VF_GET_RSS_KEY;
  334. err = ixgbevf_write_mbx(hw, msgbuf, 1);
  335. if (err)
  336. return err;
  337. err = ixgbevf_poll_mbx(hw, msgbuf, 11);
  338. if (err)
  339. return err;
  340. msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS;
  341. /* If the operation has been refused by a PF return -EPERM */
  342. if (msgbuf[0] == (IXGBE_VF_GET_RSS_KEY | IXGBE_VT_MSGTYPE_FAILURE))
  343. return -EPERM;
  344. /* If we didn't get an ACK there must have been
  345. * some sort of mailbox error so we should treat it
  346. * as such.
  347. */
  348. if (msgbuf[0] != (IXGBE_VF_GET_RSS_KEY | IXGBE_VT_MSGTYPE_SUCCESS))
  349. return IXGBE_ERR_MBX;
  350. memcpy(rss_key, msgbuf + 1, IXGBEVF_RSS_HASH_KEY_SIZE);
  351. return 0;
  352. }
  353. /**
  354. * ixgbevf_set_rar_vf - set device MAC address
  355. * @hw: pointer to hardware structure
  356. * @index: Receive address register to write
  357. * @addr: Address to put into receive address register
  358. * @vmdq: Unused in this implementation
  359. **/
  360. static s32 ixgbevf_set_rar_vf(struct ixgbe_hw *hw, u32 index, u8 *addr,
  361. u32 vmdq)
  362. {
  363. u32 msgbuf[3];
  364. u8 *msg_addr = (u8 *)(&msgbuf[1]);
  365. s32 ret_val;
  366. memset(msgbuf, 0, sizeof(msgbuf));
  367. msgbuf[0] = IXGBE_VF_SET_MAC_ADDR;
  368. ether_addr_copy(msg_addr, addr);
  369. ret_val = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf,
  370. ARRAY_SIZE(msgbuf));
  371. msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS;
  372. /* if nacked the address was rejected, use "perm_addr" */
  373. if (!ret_val &&
  374. (msgbuf[0] == (IXGBE_VF_SET_MAC_ADDR | IXGBE_VT_MSGTYPE_FAILURE))) {
  375. ixgbevf_get_mac_addr_vf(hw, hw->mac.addr);
  376. return IXGBE_ERR_MBX;
  377. }
  378. return ret_val;
  379. }
  380. /**
  381. * ixgbevf_hv_set_rar_vf - set device MAC address Hyper-V variant
  382. * @hw: pointer to hardware structure
  383. * @index: Receive address register to write
  384. * @addr: Address to put into receive address register
  385. * @vmdq: Unused in this implementation
  386. *
  387. * We don't really allow setting the device MAC address. However,
  388. * if the address being set is the permanent MAC address we will
  389. * permit that.
  390. **/
  391. static s32 ixgbevf_hv_set_rar_vf(struct ixgbe_hw *hw, u32 index, u8 *addr,
  392. u32 vmdq)
  393. {
  394. if (ether_addr_equal(addr, hw->mac.perm_addr))
  395. return 0;
  396. return -EOPNOTSUPP;
  397. }
  398. /**
  399. * ixgbevf_update_mc_addr_list_vf - Update Multicast addresses
  400. * @hw: pointer to the HW structure
  401. * @netdev: pointer to net device structure
  402. *
  403. * Updates the Multicast Table Array.
  404. **/
  405. static s32 ixgbevf_update_mc_addr_list_vf(struct ixgbe_hw *hw,
  406. struct net_device *netdev)
  407. {
  408. struct netdev_hw_addr *ha;
  409. u32 msgbuf[IXGBE_VFMAILBOX_SIZE];
  410. u16 *vector_list = (u16 *)&msgbuf[1];
  411. u32 cnt, i;
  412. /* Each entry in the list uses 1 16 bit word. We have 30
  413. * 16 bit words available in our HW msg buffer (minus 1 for the
  414. * msg type). That's 30 hash values if we pack 'em right. If
  415. * there are more than 30 MC addresses to add then punt the
  416. * extras for now and then add code to handle more than 30 later.
  417. * It would be unusual for a server to request that many multi-cast
  418. * addresses except for in large enterprise network environments.
  419. */
  420. cnt = netdev_mc_count(netdev);
  421. if (cnt > 30)
  422. cnt = 30;
  423. msgbuf[0] = IXGBE_VF_SET_MULTICAST;
  424. msgbuf[0] |= cnt << IXGBE_VT_MSGINFO_SHIFT;
  425. i = 0;
  426. netdev_for_each_mc_addr(ha, netdev) {
  427. if (i == cnt)
  428. break;
  429. if (is_link_local_ether_addr(ha->addr))
  430. continue;
  431. vector_list[i++] = ixgbevf_mta_vector(hw, ha->addr);
  432. }
  433. return ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf,
  434. IXGBE_VFMAILBOX_SIZE);
  435. }
  436. /**
  437. * ixgbevf_hv_update_mc_addr_list_vf - stub
  438. * @hw: unused
  439. * @netdev: unused
  440. *
  441. * Hyper-V variant - just a stub.
  442. */
  443. static s32 ixgbevf_hv_update_mc_addr_list_vf(struct ixgbe_hw *hw,
  444. struct net_device *netdev)
  445. {
  446. return -EOPNOTSUPP;
  447. }
  448. /**
  449. * ixgbevf_update_xcast_mode - Update Multicast mode
  450. * @hw: pointer to the HW structure
  451. * @xcast_mode: new multicast mode
  452. *
  453. * Updates the Multicast Mode of VF.
  454. **/
  455. static s32 ixgbevf_update_xcast_mode(struct ixgbe_hw *hw, int xcast_mode)
  456. {
  457. u32 msgbuf[2];
  458. s32 err;
  459. switch (hw->api_version) {
  460. case ixgbe_mbox_api_12:
  461. /* promisc introduced in 1.3 version */
  462. if (xcast_mode == IXGBEVF_XCAST_MODE_PROMISC)
  463. return -EOPNOTSUPP;
  464. fallthrough;
  465. case ixgbe_mbox_api_13:
  466. case ixgbe_mbox_api_14:
  467. case ixgbe_mbox_api_15:
  468. break;
  469. default:
  470. return -EOPNOTSUPP;
  471. }
  472. msgbuf[0] = IXGBE_VF_UPDATE_XCAST_MODE;
  473. msgbuf[1] = xcast_mode;
  474. err = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf,
  475. ARRAY_SIZE(msgbuf));
  476. if (err)
  477. return err;
  478. msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS;
  479. if (msgbuf[0] == (IXGBE_VF_UPDATE_XCAST_MODE | IXGBE_VT_MSGTYPE_FAILURE))
  480. return -EPERM;
  481. return 0;
  482. }
  483. /**
  484. * ixgbevf_hv_update_xcast_mode - stub
  485. * @hw: unused
  486. * @xcast_mode: unused
  487. *
  488. * Hyper-V variant - just a stub.
  489. */
  490. static s32 ixgbevf_hv_update_xcast_mode(struct ixgbe_hw *hw, int xcast_mode)
  491. {
  492. return -EOPNOTSUPP;
  493. }
  494. /**
  495. * ixgbevf_get_link_state_vf - Get VF link state from PF
  496. * @hw: pointer to the HW structure
  497. * @link_state: link state storage
  498. *
  499. * Returns state of the operation error or success.
  500. */
  501. static s32 ixgbevf_get_link_state_vf(struct ixgbe_hw *hw, bool *link_state)
  502. {
  503. u32 msgbuf[2];
  504. s32 ret_val;
  505. s32 err;
  506. msgbuf[0] = IXGBE_VF_GET_LINK_STATE;
  507. msgbuf[1] = 0x0;
  508. err = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf, 2);
  509. if (err || (msgbuf[0] & IXGBE_VT_MSGTYPE_FAILURE)) {
  510. ret_val = IXGBE_ERR_MBX;
  511. } else {
  512. ret_val = 0;
  513. *link_state = msgbuf[1];
  514. }
  515. return ret_val;
  516. }
  517. /**
  518. * ixgbevf_hv_get_link_state_vf - * Hyper-V variant - just a stub.
  519. * @hw: unused
  520. * @link_state: unused
  521. *
  522. * Hyper-V variant; there is no mailbox communication.
  523. */
  524. static s32 ixgbevf_hv_get_link_state_vf(struct ixgbe_hw *hw, bool *link_state)
  525. {
  526. return -EOPNOTSUPP;
  527. }
  528. /**
  529. * ixgbevf_set_vfta_vf - Set/Unset VLAN filter table address
  530. * @hw: pointer to the HW structure
  531. * @vlan: 12 bit VLAN ID
  532. * @vind: unused by VF drivers
  533. * @vlan_on: if true then set bit, else clear bit
  534. **/
  535. static s32 ixgbevf_set_vfta_vf(struct ixgbe_hw *hw, u32 vlan, u32 vind,
  536. bool vlan_on)
  537. {
  538. u32 msgbuf[2];
  539. s32 err;
  540. msgbuf[0] = IXGBE_VF_SET_VLAN;
  541. msgbuf[1] = vlan;
  542. /* Setting the 8 bit field MSG INFO to TRUE indicates "add" */
  543. msgbuf[0] |= vlan_on << IXGBE_VT_MSGINFO_SHIFT;
  544. err = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf,
  545. ARRAY_SIZE(msgbuf));
  546. if (err)
  547. goto mbx_err;
  548. /* remove extra bits from the message */
  549. msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS;
  550. msgbuf[0] &= ~(0xFF << IXGBE_VT_MSGINFO_SHIFT);
  551. if (msgbuf[0] != (IXGBE_VF_SET_VLAN | IXGBE_VT_MSGTYPE_SUCCESS))
  552. err = IXGBE_ERR_INVALID_ARGUMENT;
  553. mbx_err:
  554. return err;
  555. }
  556. /**
  557. * ixgbevf_hv_set_vfta_vf - * Hyper-V variant - just a stub.
  558. * @hw: unused
  559. * @vlan: unused
  560. * @vind: unused
  561. * @vlan_on: unused
  562. */
  563. static s32 ixgbevf_hv_set_vfta_vf(struct ixgbe_hw *hw, u32 vlan, u32 vind,
  564. bool vlan_on)
  565. {
  566. return -EOPNOTSUPP;
  567. }
  568. /**
  569. * ixgbevf_setup_mac_link_vf - Setup MAC link settings
  570. * @hw: pointer to hardware structure
  571. * @speed: Unused in this implementation
  572. * @autoneg: Unused in this implementation
  573. * @autoneg_wait_to_complete: Unused in this implementation
  574. *
  575. * Do nothing and return success. VF drivers are not allowed to change
  576. * global settings. Maintained for driver compatibility.
  577. **/
  578. static s32 ixgbevf_setup_mac_link_vf(struct ixgbe_hw *hw,
  579. ixgbe_link_speed speed, bool autoneg,
  580. bool autoneg_wait_to_complete)
  581. {
  582. return 0;
  583. }
  584. /**
  585. * ixgbevf_check_mac_link_vf - Get link/speed status
  586. * @hw: pointer to hardware structure
  587. * @speed: pointer to link speed
  588. * @link_up: true is link is up, false otherwise
  589. * @autoneg_wait_to_complete: unused
  590. *
  591. * Reads the links register to determine if link is up and the current speed
  592. **/
  593. static s32 ixgbevf_check_mac_link_vf(struct ixgbe_hw *hw,
  594. ixgbe_link_speed *speed,
  595. bool *link_up,
  596. bool autoneg_wait_to_complete)
  597. {
  598. struct ixgbe_mbx_info *mbx = &hw->mbx;
  599. struct ixgbe_mac_info *mac = &hw->mac;
  600. s32 ret_val = 0;
  601. u32 links_reg;
  602. u32 in_msg = 0;
  603. /* If we were hit with a reset drop the link */
  604. if (!mbx->ops.check_for_rst(hw) || !mbx->timeout)
  605. mac->get_link_status = true;
  606. if (!mac->get_link_status)
  607. goto out;
  608. /* if link status is down no point in checking to see if pf is up */
  609. links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS);
  610. if (!(links_reg & IXGBE_LINKS_UP))
  611. goto out;
  612. /* for SFP+ modules and DA cables on 82599 it can take up to 500usecs
  613. * before the link status is correct
  614. */
  615. if (mac->type == ixgbe_mac_82599_vf) {
  616. int i;
  617. for (i = 0; i < 5; i++) {
  618. udelay(100);
  619. links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS);
  620. if (!(links_reg & IXGBE_LINKS_UP))
  621. goto out;
  622. }
  623. }
  624. switch (links_reg & IXGBE_LINKS_SPEED_82599) {
  625. case IXGBE_LINKS_SPEED_10G_82599:
  626. *speed = IXGBE_LINK_SPEED_10GB_FULL;
  627. break;
  628. case IXGBE_LINKS_SPEED_1G_82599:
  629. *speed = IXGBE_LINK_SPEED_1GB_FULL;
  630. break;
  631. case IXGBE_LINKS_SPEED_100_82599:
  632. *speed = IXGBE_LINK_SPEED_100_FULL;
  633. break;
  634. }
  635. /* if the read failed it could just be a mailbox collision, best wait
  636. * until we are called again and don't report an error
  637. */
  638. if (mbx->ops.read(hw, &in_msg, 1)) {
  639. if (hw->api_version >= ixgbe_mbox_api_15)
  640. mac->get_link_status = false;
  641. goto out;
  642. }
  643. if (!(in_msg & IXGBE_VT_MSGTYPE_CTS)) {
  644. /* msg is not CTS and is NACK we must have lost CTS status */
  645. if (in_msg & IXGBE_VT_MSGTYPE_FAILURE)
  646. ret_val = -1;
  647. goto out;
  648. }
  649. /* the pf is talking, if we timed out in the past we reinit */
  650. if (!mbx->timeout) {
  651. ret_val = -1;
  652. goto out;
  653. }
  654. /* if we passed all the tests above then the link is up and we no
  655. * longer need to check for link
  656. */
  657. mac->get_link_status = false;
  658. out:
  659. *link_up = !mac->get_link_status;
  660. return ret_val;
  661. }
  662. /**
  663. * ixgbevf_hv_check_mac_link_vf - check link
  664. * @hw: pointer to private hardware struct
  665. * @speed: pointer to link speed
  666. * @link_up: true is link is up, false otherwise
  667. * @autoneg_wait_to_complete: unused
  668. *
  669. * Hyper-V variant; there is no mailbox communication.
  670. */
  671. static s32 ixgbevf_hv_check_mac_link_vf(struct ixgbe_hw *hw,
  672. ixgbe_link_speed *speed,
  673. bool *link_up,
  674. bool autoneg_wait_to_complete)
  675. {
  676. struct ixgbe_mbx_info *mbx = &hw->mbx;
  677. struct ixgbe_mac_info *mac = &hw->mac;
  678. u32 links_reg;
  679. /* If we were hit with a reset drop the link */
  680. if (!mbx->ops.check_for_rst(hw) || !mbx->timeout)
  681. mac->get_link_status = true;
  682. if (!mac->get_link_status)
  683. goto out;
  684. /* if link status is down no point in checking to see if pf is up */
  685. links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS);
  686. if (!(links_reg & IXGBE_LINKS_UP))
  687. goto out;
  688. /* for SFP+ modules and DA cables on 82599 it can take up to 500usecs
  689. * before the link status is correct
  690. */
  691. if (mac->type == ixgbe_mac_82599_vf) {
  692. int i;
  693. for (i = 0; i < 5; i++) {
  694. udelay(100);
  695. links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS);
  696. if (!(links_reg & IXGBE_LINKS_UP))
  697. goto out;
  698. }
  699. }
  700. switch (links_reg & IXGBE_LINKS_SPEED_82599) {
  701. case IXGBE_LINKS_SPEED_10G_82599:
  702. *speed = IXGBE_LINK_SPEED_10GB_FULL;
  703. break;
  704. case IXGBE_LINKS_SPEED_1G_82599:
  705. *speed = IXGBE_LINK_SPEED_1GB_FULL;
  706. break;
  707. case IXGBE_LINKS_SPEED_100_82599:
  708. *speed = IXGBE_LINK_SPEED_100_FULL;
  709. break;
  710. }
  711. /* if we passed all the tests above then the link is up and we no
  712. * longer need to check for link
  713. */
  714. mac->get_link_status = false;
  715. out:
  716. *link_up = !mac->get_link_status;
  717. return 0;
  718. }
  719. /**
  720. * ixgbevf_set_rlpml_vf - Set the maximum receive packet length
  721. * @hw: pointer to the HW structure
  722. * @max_size: value to assign to max frame size
  723. **/
  724. static s32 ixgbevf_set_rlpml_vf(struct ixgbe_hw *hw, u16 max_size)
  725. {
  726. u32 msgbuf[2];
  727. s32 ret_val;
  728. msgbuf[0] = IXGBE_VF_SET_LPE;
  729. msgbuf[1] = max_size;
  730. ret_val = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf,
  731. ARRAY_SIZE(msgbuf));
  732. if (ret_val)
  733. return ret_val;
  734. if ((msgbuf[0] & IXGBE_VF_SET_LPE) &&
  735. (msgbuf[0] & IXGBE_VT_MSGTYPE_FAILURE))
  736. return IXGBE_ERR_MBX;
  737. return 0;
  738. }
  739. /**
  740. * ixgbevf_hv_set_rlpml_vf - Set the maximum receive packet length
  741. * @hw: pointer to the HW structure
  742. * @max_size: value to assign to max frame size
  743. * Hyper-V variant.
  744. **/
  745. static s32 ixgbevf_hv_set_rlpml_vf(struct ixgbe_hw *hw, u16 max_size)
  746. {
  747. u32 reg;
  748. /* If we are on Hyper-V, we implement this functionality
  749. * differently.
  750. */
  751. reg = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(0));
  752. /* CRC == 4 */
  753. reg |= ((max_size + 4) | IXGBE_RXDCTL_RLPML_EN);
  754. IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(0), reg);
  755. return 0;
  756. }
  757. /**
  758. * ixgbevf_negotiate_api_version_vf - Negotiate supported API version
  759. * @hw: pointer to the HW structure
  760. * @api: integer containing requested API version
  761. **/
  762. static int ixgbevf_negotiate_api_version_vf(struct ixgbe_hw *hw, int api)
  763. {
  764. int err;
  765. u32 msg[3];
  766. /* Negotiate the mailbox API version */
  767. msg[0] = IXGBE_VF_API_NEGOTIATE;
  768. msg[1] = api;
  769. msg[2] = 0;
  770. err = ixgbevf_write_msg_read_ack(hw, msg, msg, ARRAY_SIZE(msg));
  771. if (!err) {
  772. msg[0] &= ~IXGBE_VT_MSGTYPE_CTS;
  773. /* Store value and return 0 on success */
  774. if (msg[0] == (IXGBE_VF_API_NEGOTIATE |
  775. IXGBE_VT_MSGTYPE_SUCCESS)) {
  776. hw->api_version = api;
  777. return 0;
  778. }
  779. err = IXGBE_ERR_INVALID_ARGUMENT;
  780. }
  781. return err;
  782. }
  783. /**
  784. * ixgbevf_hv_negotiate_api_version_vf - Negotiate supported API version
  785. * @hw: pointer to the HW structure
  786. * @api: integer containing requested API version
  787. * Hyper-V version - only ixgbe_mbox_api_10 supported.
  788. **/
  789. static int ixgbevf_hv_negotiate_api_version_vf(struct ixgbe_hw *hw, int api)
  790. {
  791. /* Hyper-V only supports api version ixgbe_mbox_api_10 */
  792. if (api != ixgbe_mbox_api_10)
  793. return IXGBE_ERR_INVALID_ARGUMENT;
  794. return 0;
  795. }
  796. int ixgbevf_get_queues(struct ixgbe_hw *hw, unsigned int *num_tcs,
  797. unsigned int *default_tc)
  798. {
  799. int err;
  800. u32 msg[5];
  801. /* do nothing if API doesn't support ixgbevf_get_queues */
  802. switch (hw->api_version) {
  803. case ixgbe_mbox_api_11:
  804. case ixgbe_mbox_api_12:
  805. case ixgbe_mbox_api_13:
  806. case ixgbe_mbox_api_14:
  807. case ixgbe_mbox_api_15:
  808. break;
  809. default:
  810. return 0;
  811. }
  812. /* Fetch queue configuration from the PF */
  813. msg[0] = IXGBE_VF_GET_QUEUE;
  814. msg[1] = msg[2] = msg[3] = msg[4] = 0;
  815. err = ixgbevf_write_msg_read_ack(hw, msg, msg, ARRAY_SIZE(msg));
  816. if (!err) {
  817. msg[0] &= ~IXGBE_VT_MSGTYPE_CTS;
  818. /* if we didn't get an ACK there must have been
  819. * some sort of mailbox error so we should treat it
  820. * as such
  821. */
  822. if (msg[0] != (IXGBE_VF_GET_QUEUE | IXGBE_VT_MSGTYPE_SUCCESS))
  823. return IXGBE_ERR_MBX;
  824. /* record and validate values from message */
  825. hw->mac.max_tx_queues = msg[IXGBE_VF_TX_QUEUES];
  826. if (hw->mac.max_tx_queues == 0 ||
  827. hw->mac.max_tx_queues > IXGBE_VF_MAX_TX_QUEUES)
  828. hw->mac.max_tx_queues = IXGBE_VF_MAX_TX_QUEUES;
  829. hw->mac.max_rx_queues = msg[IXGBE_VF_RX_QUEUES];
  830. if (hw->mac.max_rx_queues == 0 ||
  831. hw->mac.max_rx_queues > IXGBE_VF_MAX_RX_QUEUES)
  832. hw->mac.max_rx_queues = IXGBE_VF_MAX_RX_QUEUES;
  833. *num_tcs = msg[IXGBE_VF_TRANS_VLAN];
  834. /* in case of unknown state assume we cannot tag frames */
  835. if (*num_tcs > hw->mac.max_rx_queues)
  836. *num_tcs = 1;
  837. *default_tc = msg[IXGBE_VF_DEF_QUEUE];
  838. /* default to queue 0 on out-of-bounds queue number */
  839. if (*default_tc >= hw->mac.max_tx_queues)
  840. *default_tc = 0;
  841. }
  842. return err;
  843. }
  844. static const struct ixgbe_mac_operations ixgbevf_mac_ops = {
  845. .init_hw = ixgbevf_init_hw_vf,
  846. .reset_hw = ixgbevf_reset_hw_vf,
  847. .start_hw = ixgbevf_start_hw_vf,
  848. .get_mac_addr = ixgbevf_get_mac_addr_vf,
  849. .stop_adapter = ixgbevf_stop_hw_vf,
  850. .setup_link = ixgbevf_setup_mac_link_vf,
  851. .check_link = ixgbevf_check_mac_link_vf,
  852. .negotiate_api_version = ixgbevf_negotiate_api_version_vf,
  853. .set_rar = ixgbevf_set_rar_vf,
  854. .update_mc_addr_list = ixgbevf_update_mc_addr_list_vf,
  855. .update_xcast_mode = ixgbevf_update_xcast_mode,
  856. .get_link_state = ixgbevf_get_link_state_vf,
  857. .set_uc_addr = ixgbevf_set_uc_addr_vf,
  858. .set_vfta = ixgbevf_set_vfta_vf,
  859. .set_rlpml = ixgbevf_set_rlpml_vf,
  860. };
  861. static const struct ixgbe_mac_operations ixgbevf_hv_mac_ops = {
  862. .init_hw = ixgbevf_init_hw_vf,
  863. .reset_hw = ixgbevf_hv_reset_hw_vf,
  864. .start_hw = ixgbevf_start_hw_vf,
  865. .get_mac_addr = ixgbevf_get_mac_addr_vf,
  866. .stop_adapter = ixgbevf_stop_hw_vf,
  867. .setup_link = ixgbevf_setup_mac_link_vf,
  868. .check_link = ixgbevf_hv_check_mac_link_vf,
  869. .negotiate_api_version = ixgbevf_hv_negotiate_api_version_vf,
  870. .set_rar = ixgbevf_hv_set_rar_vf,
  871. .update_mc_addr_list = ixgbevf_hv_update_mc_addr_list_vf,
  872. .update_xcast_mode = ixgbevf_hv_update_xcast_mode,
  873. .get_link_state = ixgbevf_hv_get_link_state_vf,
  874. .set_uc_addr = ixgbevf_hv_set_uc_addr_vf,
  875. .set_vfta = ixgbevf_hv_set_vfta_vf,
  876. .set_rlpml = ixgbevf_hv_set_rlpml_vf,
  877. };
  878. const struct ixgbevf_info ixgbevf_82599_vf_info = {
  879. .mac = ixgbe_mac_82599_vf,
  880. .mac_ops = &ixgbevf_mac_ops,
  881. };
  882. const struct ixgbevf_info ixgbevf_82599_vf_hv_info = {
  883. .mac = ixgbe_mac_82599_vf,
  884. .mac_ops = &ixgbevf_hv_mac_ops,
  885. };
  886. const struct ixgbevf_info ixgbevf_X540_vf_info = {
  887. .mac = ixgbe_mac_X540_vf,
  888. .mac_ops = &ixgbevf_mac_ops,
  889. };
  890. const struct ixgbevf_info ixgbevf_X540_vf_hv_info = {
  891. .mac = ixgbe_mac_X540_vf,
  892. .mac_ops = &ixgbevf_hv_mac_ops,
  893. };
  894. const struct ixgbevf_info ixgbevf_X550_vf_info = {
  895. .mac = ixgbe_mac_X550_vf,
  896. .mac_ops = &ixgbevf_mac_ops,
  897. };
  898. const struct ixgbevf_info ixgbevf_X550_vf_hv_info = {
  899. .mac = ixgbe_mac_X550_vf,
  900. .mac_ops = &ixgbevf_hv_mac_ops,
  901. };
  902. const struct ixgbevf_info ixgbevf_X550EM_x_vf_info = {
  903. .mac = ixgbe_mac_X550EM_x_vf,
  904. .mac_ops = &ixgbevf_mac_ops,
  905. };
  906. const struct ixgbevf_info ixgbevf_X550EM_x_vf_hv_info = {
  907. .mac = ixgbe_mac_X550EM_x_vf,
  908. .mac_ops = &ixgbevf_hv_mac_ops,
  909. };
  910. const struct ixgbevf_info ixgbevf_x550em_a_vf_info = {
  911. .mac = ixgbe_mac_x550em_a_vf,
  912. .mac_ops = &ixgbevf_mac_ops,
  913. };