nic.h 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638
  1. /* SPDX-License-Identifier: GPL-2.0-only */
  2. /*
  3. * Copyright (C) 2015 Cavium, Inc.
  4. */
  5. #ifndef NIC_H
  6. #define NIC_H
  7. #include <linux/netdevice.h>
  8. #include <linux/interrupt.h>
  9. #include <linux/pci.h>
  10. #include "thunder_bgx.h"
  11. /* PCI device IDs */
  12. #define PCI_DEVICE_ID_THUNDER_NIC_PF 0xA01E
  13. #define PCI_DEVICE_ID_THUNDER_PASS1_NIC_VF 0x0011
  14. #define PCI_DEVICE_ID_THUNDER_NIC_VF 0xA034
  15. #define PCI_DEVICE_ID_THUNDER_BGX 0xA026
  16. /* Subsystem device IDs */
  17. #define PCI_SUBSYS_DEVID_88XX_NIC_PF 0xA11E
  18. #define PCI_SUBSYS_DEVID_81XX_NIC_PF 0xA21E
  19. #define PCI_SUBSYS_DEVID_83XX_NIC_PF 0xA31E
  20. #define PCI_SUBSYS_DEVID_88XX_PASS1_NIC_VF 0xA11E
  21. #define PCI_SUBSYS_DEVID_88XX_NIC_VF 0xA134
  22. #define PCI_SUBSYS_DEVID_81XX_NIC_VF 0xA234
  23. #define PCI_SUBSYS_DEVID_83XX_NIC_VF 0xA334
  24. /* PCI BAR nos */
  25. #define PCI_CFG_REG_BAR_NUM 0
  26. #define PCI_MSIX_REG_BAR_NUM 4
  27. /* NIC SRIOV VF count */
  28. #define MAX_NUM_VFS_SUPPORTED 128
  29. #define DEFAULT_NUM_VF_ENABLED 8
  30. #define NIC_TNS_BYPASS_MODE 0
  31. #define NIC_TNS_MODE 1
  32. /* NIC priv flags */
  33. #define NIC_SRIOV_ENABLED BIT(0)
  34. /* Min/Max packet size */
  35. #define NIC_HW_MIN_FRS 64
  36. #define NIC_HW_MAX_FRS 9190 /* Excluding L2 header and FCS */
  37. /* Max pkinds */
  38. #define NIC_MAX_PKIND 16
  39. /* Max when CPI_ALG is IP diffserv */
  40. #define NIC_MAX_CPI_PER_LMAC 64
  41. /* NIC VF Interrupts */
  42. #define NICVF_INTR_CQ 0
  43. #define NICVF_INTR_SQ 1
  44. #define NICVF_INTR_RBDR 2
  45. #define NICVF_INTR_PKT_DROP 3
  46. #define NICVF_INTR_TCP_TIMER 4
  47. #define NICVF_INTR_MBOX 5
  48. #define NICVF_INTR_QS_ERR 6
  49. #define NICVF_INTR_CQ_SHIFT 0
  50. #define NICVF_INTR_SQ_SHIFT 8
  51. #define NICVF_INTR_RBDR_SHIFT 16
  52. #define NICVF_INTR_PKT_DROP_SHIFT 20
  53. #define NICVF_INTR_TCP_TIMER_SHIFT 21
  54. #define NICVF_INTR_MBOX_SHIFT 22
  55. #define NICVF_INTR_QS_ERR_SHIFT 23
  56. #define NICVF_INTR_CQ_MASK (0xFF << NICVF_INTR_CQ_SHIFT)
  57. #define NICVF_INTR_SQ_MASK (0xFF << NICVF_INTR_SQ_SHIFT)
  58. #define NICVF_INTR_RBDR_MASK (0x03 << NICVF_INTR_RBDR_SHIFT)
  59. #define NICVF_INTR_PKT_DROP_MASK BIT(NICVF_INTR_PKT_DROP_SHIFT)
  60. #define NICVF_INTR_TCP_TIMER_MASK BIT(NICVF_INTR_TCP_TIMER_SHIFT)
  61. #define NICVF_INTR_MBOX_MASK BIT(NICVF_INTR_MBOX_SHIFT)
  62. #define NICVF_INTR_QS_ERR_MASK BIT(NICVF_INTR_QS_ERR_SHIFT)
  63. /* MSI-X interrupts */
  64. #define NIC_PF_MSIX_VECTORS 10
  65. #define NIC_VF_MSIX_VECTORS 20
  66. #define NIC_PF_INTR_ID_ECC0_SBE 0
  67. #define NIC_PF_INTR_ID_ECC0_DBE 1
  68. #define NIC_PF_INTR_ID_ECC1_SBE 2
  69. #define NIC_PF_INTR_ID_ECC1_DBE 3
  70. #define NIC_PF_INTR_ID_ECC2_SBE 4
  71. #define NIC_PF_INTR_ID_ECC2_DBE 5
  72. #define NIC_PF_INTR_ID_ECC3_SBE 6
  73. #define NIC_PF_INTR_ID_ECC3_DBE 7
  74. #define NIC_PF_INTR_ID_MBOX0 8
  75. #define NIC_PF_INTR_ID_MBOX1 9
  76. /* Minimum FIFO level before all packets for the CQ are dropped
  77. *
  78. * This value ensures that once a packet has been "accepted"
  79. * for reception it will not get dropped due to non-availability
  80. * of CQ descriptor. An errata in HW mandates this value to be
  81. * atleast 0x100.
  82. */
  83. #define NICPF_CQM_MIN_DROP_LEVEL 0x100
  84. /* Global timer for CQ timer thresh interrupts
  85. * Calculated for SCLK of 700Mhz
  86. * value written should be a 1/16th of what is expected
  87. *
  88. * 1 tick per 0.025usec
  89. */
  90. #define NICPF_CLK_PER_INT_TICK 1
  91. /* Time to wait before we decide that a SQ is stuck.
  92. *
  93. * Since both pkt rx and tx notifications are done with same CQ,
  94. * when packets are being received at very high rate (eg: L2 forwarding)
  95. * then freeing transmitted skbs will be delayed and watchdog
  96. * will kick in, resetting interface. Hence keeping this value high.
  97. */
  98. #define NICVF_TX_TIMEOUT (50 * HZ)
  99. struct nicvf_cq_poll {
  100. struct nicvf *nicvf;
  101. u8 cq_idx; /* Completion queue index */
  102. struct napi_struct napi;
  103. };
  104. #define NIC_MAX_RSS_HASH_BITS 8
  105. #define NIC_MAX_RSS_IDR_TBL_SIZE (1 << NIC_MAX_RSS_HASH_BITS)
  106. #define RSS_HASH_KEY_SIZE 5 /* 320 bit key */
  107. struct nicvf_rss_info {
  108. bool enable;
  109. #define RSS_L2_EXTENDED_HASH_ENA BIT(0)
  110. #define RSS_IP_HASH_ENA BIT(1)
  111. #define RSS_TCP_HASH_ENA BIT(2)
  112. #define RSS_TCP_SYN_DIS BIT(3)
  113. #define RSS_UDP_HASH_ENA BIT(4)
  114. #define RSS_L4_EXTENDED_HASH_ENA BIT(5)
  115. #define RSS_ROCE_ENA BIT(6)
  116. #define RSS_L3_BI_DIRECTION_ENA BIT(7)
  117. #define RSS_L4_BI_DIRECTION_ENA BIT(8)
  118. u64 cfg;
  119. u8 hash_bits;
  120. u16 rss_size;
  121. u8 ind_tbl[NIC_MAX_RSS_IDR_TBL_SIZE];
  122. u64 key[RSS_HASH_KEY_SIZE];
  123. } ____cacheline_aligned_in_smp;
  124. struct nicvf_pfc {
  125. u8 autoneg;
  126. u8 fc_rx;
  127. u8 fc_tx;
  128. };
  129. enum rx_stats_reg_offset {
  130. RX_OCTS = 0x0,
  131. RX_UCAST = 0x1,
  132. RX_BCAST = 0x2,
  133. RX_MCAST = 0x3,
  134. RX_RED = 0x4,
  135. RX_RED_OCTS = 0x5,
  136. RX_ORUN = 0x6,
  137. RX_ORUN_OCTS = 0x7,
  138. RX_FCS = 0x8,
  139. RX_L2ERR = 0x9,
  140. RX_DRP_BCAST = 0xa,
  141. RX_DRP_MCAST = 0xb,
  142. RX_DRP_L3BCAST = 0xc,
  143. RX_DRP_L3MCAST = 0xd,
  144. RX_STATS_ENUM_LAST,
  145. };
  146. enum tx_stats_reg_offset {
  147. TX_OCTS = 0x0,
  148. TX_UCAST = 0x1,
  149. TX_BCAST = 0x2,
  150. TX_MCAST = 0x3,
  151. TX_DROP = 0x4,
  152. TX_STATS_ENUM_LAST,
  153. };
  154. struct nicvf_hw_stats {
  155. u64 rx_bytes;
  156. u64 rx_frames;
  157. u64 rx_ucast_frames;
  158. u64 rx_bcast_frames;
  159. u64 rx_mcast_frames;
  160. u64 rx_drops;
  161. u64 rx_drop_red;
  162. u64 rx_drop_red_bytes;
  163. u64 rx_drop_overrun;
  164. u64 rx_drop_overrun_bytes;
  165. u64 rx_drop_bcast;
  166. u64 rx_drop_mcast;
  167. u64 rx_drop_l3_bcast;
  168. u64 rx_drop_l3_mcast;
  169. u64 rx_fcs_errors;
  170. u64 rx_l2_errors;
  171. u64 tx_bytes;
  172. u64 tx_frames;
  173. u64 tx_ucast_frames;
  174. u64 tx_bcast_frames;
  175. u64 tx_mcast_frames;
  176. u64 tx_drops;
  177. };
  178. struct nicvf_drv_stats {
  179. /* CQE Rx errs */
  180. u64 rx_bgx_truncated_pkts;
  181. u64 rx_jabber_errs;
  182. u64 rx_fcs_errs;
  183. u64 rx_bgx_errs;
  184. u64 rx_prel2_errs;
  185. u64 rx_l2_hdr_malformed;
  186. u64 rx_oversize;
  187. u64 rx_undersize;
  188. u64 rx_l2_len_mismatch;
  189. u64 rx_l2_pclp;
  190. u64 rx_ip_ver_errs;
  191. u64 rx_ip_csum_errs;
  192. u64 rx_ip_hdr_malformed;
  193. u64 rx_ip_payload_malformed;
  194. u64 rx_ip_ttl_errs;
  195. u64 rx_l3_pclp;
  196. u64 rx_l4_malformed;
  197. u64 rx_l4_csum_errs;
  198. u64 rx_udp_len_errs;
  199. u64 rx_l4_port_errs;
  200. u64 rx_tcp_flag_errs;
  201. u64 rx_tcp_offset_errs;
  202. u64 rx_l4_pclp;
  203. u64 rx_truncated_pkts;
  204. /* CQE Tx errs */
  205. u64 tx_desc_fault;
  206. u64 tx_hdr_cons_err;
  207. u64 tx_subdesc_err;
  208. u64 tx_max_size_exceeded;
  209. u64 tx_imm_size_oflow;
  210. u64 tx_data_seq_err;
  211. u64 tx_mem_seq_err;
  212. u64 tx_lock_viol;
  213. u64 tx_data_fault;
  214. u64 tx_tstmp_conflict;
  215. u64 tx_tstmp_timeout;
  216. u64 tx_mem_fault;
  217. u64 tx_csum_overlap;
  218. u64 tx_csum_overflow;
  219. /* driver debug stats */
  220. u64 tx_tso;
  221. u64 tx_timeout;
  222. u64 txq_stop;
  223. u64 txq_wake;
  224. u64 rcv_buffer_alloc_failures;
  225. u64 page_alloc;
  226. struct u64_stats_sync syncp;
  227. };
  228. struct cavium_ptp;
  229. struct xcast_addr_list {
  230. int count;
  231. u64 mc[];
  232. };
  233. struct nicvf_work {
  234. struct work_struct work;
  235. u8 mode;
  236. struct xcast_addr_list *mc;
  237. };
  238. struct nicvf {
  239. struct nicvf *pnicvf;
  240. struct net_device *netdev;
  241. struct pci_dev *pdev;
  242. void __iomem *reg_base;
  243. struct bpf_prog *xdp_prog;
  244. #define MAX_QUEUES_PER_QSET 8
  245. struct queue_set *qs;
  246. void *iommu_domain;
  247. u8 vf_id;
  248. u8 sqs_id;
  249. bool sqs_mode;
  250. bool hw_tso;
  251. bool t88;
  252. /* Receive buffer alloc */
  253. u32 rb_page_offset;
  254. u16 rb_pageref;
  255. bool rb_alloc_fail;
  256. bool rb_work_scheduled;
  257. struct page *rb_page;
  258. struct delayed_work rbdr_work;
  259. struct tasklet_struct rbdr_task;
  260. /* Secondary Qset */
  261. u8 sqs_count;
  262. #define MAX_SQS_PER_VF_SINGLE_NODE 5
  263. #define MAX_SQS_PER_VF 11
  264. struct nicvf *snicvf[MAX_SQS_PER_VF];
  265. /* Queue count */
  266. u8 rx_queues;
  267. u8 tx_queues;
  268. u8 xdp_tx_queues;
  269. u8 max_queues;
  270. u8 node;
  271. u8 cpi_alg;
  272. bool link_up;
  273. u8 mac_type;
  274. u8 duplex;
  275. u32 speed;
  276. bool tns_mode;
  277. bool loopback_supported;
  278. struct nicvf_rss_info rss_info;
  279. struct nicvf_pfc pfc;
  280. struct tasklet_struct qs_err_task;
  281. struct work_struct reset_task;
  282. struct nicvf_work rx_mode_work;
  283. /* spinlock to protect workqueue arguments from concurrent access */
  284. spinlock_t rx_mode_wq_lock;
  285. /* workqueue for handling kernel ndo_set_rx_mode() calls */
  286. struct workqueue_struct *nicvf_rx_mode_wq;
  287. /* mutex to protect VF's mailbox contents from concurrent access */
  288. struct mutex rx_mode_mtx;
  289. struct delayed_work link_change_work;
  290. /* PTP timestamp */
  291. struct cavium_ptp *ptp_clock;
  292. /* Inbound timestamping is on */
  293. bool hw_rx_tstamp;
  294. /* When the packet that requires timestamping is sent, hardware inserts
  295. * two entries to the completion queue. First is the regular
  296. * CQE_TYPE_SEND entry that signals that the packet was sent.
  297. * The second is CQE_TYPE_SEND_PTP that contains the actual timestamp
  298. * for that packet.
  299. * `ptp_skb` is initialized in the handler for the CQE_TYPE_SEND
  300. * entry and is used and zeroed in the handler for the CQE_TYPE_SEND_PTP
  301. * entry.
  302. * So `ptp_skb` is used to hold the pointer to the packet between
  303. * the calls to CQE_TYPE_SEND and CQE_TYPE_SEND_PTP handlers.
  304. */
  305. struct sk_buff *ptp_skb;
  306. /* `tx_ptp_skbs` is set when the hardware is sending a packet that
  307. * requires timestamping. Cavium hardware can not process more than one
  308. * such packet at once so this is set each time the driver submits
  309. * a packet that requires timestamping to the send queue and clears
  310. * each time it receives the entry on the completion queue saying
  311. * that such packet was sent.
  312. * So `tx_ptp_skbs` prevents driver from submitting more than one
  313. * packet that requires timestamping to the hardware for transmitting.
  314. */
  315. atomic_t tx_ptp_skbs;
  316. /* Interrupt coalescing settings */
  317. u32 cq_coalesce_usecs;
  318. u32 msg_enable;
  319. /* Stats */
  320. struct nicvf_hw_stats hw_stats;
  321. struct nicvf_drv_stats __percpu *drv_stats;
  322. struct bgx_stats bgx_stats;
  323. /* Napi */
  324. struct nicvf_cq_poll *napi[8];
  325. /* MSI-X */
  326. u8 num_vec;
  327. char irq_name[NIC_VF_MSIX_VECTORS][IFNAMSIZ + 15];
  328. bool irq_allocated[NIC_VF_MSIX_VECTORS];
  329. cpumask_var_t affinity_mask[NIC_VF_MSIX_VECTORS];
  330. /* VF <-> PF mailbox communication */
  331. bool pf_acked;
  332. bool pf_nacked;
  333. bool set_mac_pending;
  334. } ____cacheline_aligned_in_smp;
  335. /* PF <--> VF Mailbox communication
  336. * Eight 64bit registers are shared between PF and VF.
  337. * Separate set for each VF.
  338. * Writing '1' into last register mbx7 means end of message.
  339. */
  340. /* PF <--> VF mailbox communication */
  341. #define NIC_PF_VF_MAILBOX_SIZE 2
  342. #define NIC_MBOX_MSG_TIMEOUT 2000 /* ms */
  343. /* Mailbox message types */
  344. #define NIC_MBOX_MSG_READY 0x01 /* Is PF ready to rcv msgs */
  345. #define NIC_MBOX_MSG_ACK 0x02 /* ACK the message received */
  346. #define NIC_MBOX_MSG_NACK 0x03 /* NACK the message received */
  347. #define NIC_MBOX_MSG_QS_CFG 0x04 /* Configure Qset */
  348. #define NIC_MBOX_MSG_RQ_CFG 0x05 /* Configure receive queue */
  349. #define NIC_MBOX_MSG_SQ_CFG 0x06 /* Configure Send queue */
  350. #define NIC_MBOX_MSG_RQ_DROP_CFG 0x07 /* Configure receive queue */
  351. #define NIC_MBOX_MSG_SET_MAC 0x08 /* Add MAC ID to DMAC filter */
  352. #define NIC_MBOX_MSG_SET_MAX_FRS 0x09 /* Set max frame size */
  353. #define NIC_MBOX_MSG_CPI_CFG 0x0A /* Config CPI, RSSI */
  354. #define NIC_MBOX_MSG_RSS_SIZE 0x0B /* Get RSS indir_tbl size */
  355. #define NIC_MBOX_MSG_RSS_CFG 0x0C /* Config RSS table */
  356. #define NIC_MBOX_MSG_RSS_CFG_CONT 0x0D /* RSS config continuation */
  357. #define NIC_MBOX_MSG_RQ_BP_CFG 0x0E /* RQ backpressure config */
  358. #define NIC_MBOX_MSG_RQ_SW_SYNC 0x0F /* Flush inflight pkts to RQ */
  359. #define NIC_MBOX_MSG_BGX_STATS 0x10 /* Get stats from BGX */
  360. #define NIC_MBOX_MSG_BGX_LINK_CHANGE 0x11 /* BGX:LMAC link status */
  361. #define NIC_MBOX_MSG_ALLOC_SQS 0x12 /* Allocate secondary Qset */
  362. #define NIC_MBOX_MSG_NICVF_PTR 0x13 /* Send nicvf ptr to PF */
  363. #define NIC_MBOX_MSG_PNICVF_PTR 0x14 /* Get primary qset nicvf ptr */
  364. #define NIC_MBOX_MSG_SNICVF_PTR 0x15 /* Send sqet nicvf ptr to PVF */
  365. #define NIC_MBOX_MSG_LOOPBACK 0x16 /* Set interface in loopback */
  366. #define NIC_MBOX_MSG_RESET_STAT_COUNTER 0x17 /* Reset statistics counters */
  367. #define NIC_MBOX_MSG_PFC 0x18 /* Pause frame control */
  368. #define NIC_MBOX_MSG_PTP_CFG 0x19 /* HW packet timestamp */
  369. #define NIC_MBOX_MSG_CFG_DONE 0xF0 /* VF configuration done */
  370. #define NIC_MBOX_MSG_SHUTDOWN 0xF1 /* VF is being shutdown */
  371. #define NIC_MBOX_MSG_RESET_XCAST 0xF2 /* Reset DCAM filtering mode */
  372. #define NIC_MBOX_MSG_ADD_MCAST 0xF3 /* Add MAC to DCAM filters */
  373. #define NIC_MBOX_MSG_SET_XCAST 0xF4 /* Set MCAST/BCAST RX mode */
  374. struct nic_cfg_msg {
  375. u8 msg;
  376. u8 vf_id;
  377. u8 node_id;
  378. u8 tns_mode:1;
  379. u8 sqs_mode:1;
  380. u8 loopback_supported:1;
  381. u8 mac_addr[ETH_ALEN];
  382. };
  383. /* Qset configuration */
  384. struct qs_cfg_msg {
  385. u8 msg;
  386. u8 num;
  387. u8 sqs_count;
  388. u64 cfg;
  389. };
  390. /* Receive queue configuration */
  391. struct rq_cfg_msg {
  392. u8 msg;
  393. u8 qs_num;
  394. u8 rq_num;
  395. u64 cfg;
  396. };
  397. /* Send queue configuration */
  398. struct sq_cfg_msg {
  399. u8 msg;
  400. u8 qs_num;
  401. u8 sq_num;
  402. bool sqs_mode;
  403. u64 cfg;
  404. };
  405. /* Set VF's MAC address */
  406. struct set_mac_msg {
  407. u8 msg;
  408. u8 vf_id;
  409. u8 mac_addr[ETH_ALEN];
  410. };
  411. /* Set Maximum frame size */
  412. struct set_frs_msg {
  413. u8 msg;
  414. u8 vf_id;
  415. u16 max_frs;
  416. };
  417. /* Set CPI algorithm type */
  418. struct cpi_cfg_msg {
  419. u8 msg;
  420. u8 vf_id;
  421. u8 rq_cnt;
  422. u8 cpi_alg;
  423. };
  424. /* Get RSS table size */
  425. struct rss_sz_msg {
  426. u8 msg;
  427. u8 vf_id;
  428. u16 ind_tbl_size;
  429. };
  430. /* Set RSS configuration */
  431. struct rss_cfg_msg {
  432. u8 msg;
  433. u8 vf_id;
  434. u8 hash_bits;
  435. u8 tbl_len;
  436. u8 tbl_offset;
  437. #define RSS_IND_TBL_LEN_PER_MBX_MSG 8
  438. u8 ind_tbl[RSS_IND_TBL_LEN_PER_MBX_MSG];
  439. };
  440. struct bgx_stats_msg {
  441. u8 msg;
  442. u8 vf_id;
  443. u8 rx;
  444. u8 idx;
  445. u64 stats;
  446. };
  447. /* Physical interface link status */
  448. struct bgx_link_status {
  449. u8 msg;
  450. u8 mac_type;
  451. u8 link_up;
  452. u8 duplex;
  453. u32 speed;
  454. };
  455. /* Get Extra Qset IDs */
  456. struct sqs_alloc {
  457. u8 msg;
  458. u8 vf_id;
  459. u8 qs_count;
  460. };
  461. struct nicvf_ptr {
  462. u8 msg;
  463. u8 vf_id;
  464. bool sqs_mode;
  465. u8 sqs_id;
  466. u64 nicvf;
  467. };
  468. /* Set interface in loopback mode */
  469. struct set_loopback {
  470. u8 msg;
  471. u8 vf_id;
  472. bool enable;
  473. };
  474. /* Reset statistics counters */
  475. struct reset_stat_cfg {
  476. u8 msg;
  477. /* Bitmap to select NIC_PF_VNIC(vf_id)_RX_STAT(0..13) */
  478. u16 rx_stat_mask;
  479. /* Bitmap to select NIC_PF_VNIC(vf_id)_TX_STAT(0..4) */
  480. u8 tx_stat_mask;
  481. /* Bitmap to select NIC_PF_QS(0..127)_RQ(0..7)_STAT(0..1)
  482. * bit14, bit15 NIC_PF_QS(vf_id)_RQ7_STAT(0..1)
  483. * bit12, bit13 NIC_PF_QS(vf_id)_RQ6_STAT(0..1)
  484. * ..
  485. * bit2, bit3 NIC_PF_QS(vf_id)_RQ1_STAT(0..1)
  486. * bit0, bit1 NIC_PF_QS(vf_id)_RQ0_STAT(0..1)
  487. */
  488. u16 rq_stat_mask;
  489. /* Bitmap to select NIC_PF_QS(0..127)_SQ(0..7)_STAT(0..1)
  490. * bit14, bit15 NIC_PF_QS(vf_id)_SQ7_STAT(0..1)
  491. * bit12, bit13 NIC_PF_QS(vf_id)_SQ6_STAT(0..1)
  492. * ..
  493. * bit2, bit3 NIC_PF_QS(vf_id)_SQ1_STAT(0..1)
  494. * bit0, bit1 NIC_PF_QS(vf_id)_SQ0_STAT(0..1)
  495. */
  496. u16 sq_stat_mask;
  497. };
  498. struct pfc {
  499. u8 msg;
  500. u8 get; /* Get or set PFC settings */
  501. u8 autoneg;
  502. u8 fc_rx;
  503. u8 fc_tx;
  504. };
  505. struct set_ptp {
  506. u8 msg;
  507. bool enable;
  508. };
  509. struct xcast {
  510. u8 msg;
  511. u8 mode;
  512. u64 mac:48;
  513. };
  514. /* 128 bit shared memory between PF and each VF */
  515. union nic_mbx {
  516. struct { u8 msg; } msg;
  517. struct nic_cfg_msg nic_cfg;
  518. struct qs_cfg_msg qs;
  519. struct rq_cfg_msg rq;
  520. struct sq_cfg_msg sq;
  521. struct set_mac_msg mac;
  522. struct set_frs_msg frs;
  523. struct cpi_cfg_msg cpi_cfg;
  524. struct rss_sz_msg rss_size;
  525. struct rss_cfg_msg rss_cfg;
  526. struct bgx_stats_msg bgx_stats;
  527. struct bgx_link_status link_status;
  528. struct sqs_alloc sqs_alloc;
  529. struct nicvf_ptr nicvf;
  530. struct set_loopback lbk;
  531. struct reset_stat_cfg reset_stat;
  532. struct pfc pfc;
  533. struct set_ptp ptp;
  534. struct xcast xcast;
  535. };
  536. #define NIC_NODE_ID_MASK 0x03
  537. #define NIC_NODE_ID_SHIFT 44
  538. static inline int nic_get_node_id(struct pci_dev *pdev)
  539. {
  540. u64 addr = pci_resource_start(pdev, PCI_CFG_REG_BAR_NUM);
  541. return ((addr >> NIC_NODE_ID_SHIFT) & NIC_NODE_ID_MASK);
  542. }
  543. static inline bool pass1_silicon(struct pci_dev *pdev)
  544. {
  545. return (pdev->revision < 8) &&
  546. (pdev->subsystem_device == PCI_SUBSYS_DEVID_88XX_NIC_PF);
  547. }
  548. static inline bool pass2_silicon(struct pci_dev *pdev)
  549. {
  550. return (pdev->revision >= 8) &&
  551. (pdev->subsystem_device == PCI_SUBSYS_DEVID_88XX_NIC_PF);
  552. }
  553. int nicvf_set_real_num_queues(struct net_device *netdev,
  554. int tx_queues, int rx_queues);
  555. int nicvf_open(struct net_device *netdev);
  556. int nicvf_stop(struct net_device *netdev);
  557. int nicvf_send_msg_to_pf(struct nicvf *vf, union nic_mbx *mbx);
  558. void nicvf_config_rss(struct nicvf *nic);
  559. void nicvf_set_rss_key(struct nicvf *nic);
  560. void nicvf_set_ethtool_ops(struct net_device *netdev);
  561. void nicvf_update_stats(struct nicvf *nic);
  562. void nicvf_update_lmac_stats(struct nicvf *nic);
  563. #endif /* NIC_H */