gve.h 26 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877
  1. /* SPDX-License-Identifier: (GPL-2.0 OR MIT)
  2. * Google virtual Ethernet (gve) driver
  3. *
  4. * Copyright (C) 2015-2021 Google, Inc.
  5. */
  6. #ifndef _GVE_H_
  7. #define _GVE_H_
  8. #include <linux/dma-mapping.h>
  9. #include <linux/netdevice.h>
  10. #include <linux/pci.h>
  11. #include <linux/u64_stats_sync.h>
  12. #include "gve_desc.h"
  13. #include "gve_desc_dqo.h"
  14. #ifndef PCI_VENDOR_ID_GOOGLE
  15. #define PCI_VENDOR_ID_GOOGLE 0x1ae0
  16. #endif
  17. #define PCI_DEV_ID_GVNIC 0x0042
  18. #define GVE_REGISTER_BAR 0
  19. #define GVE_DOORBELL_BAR 2
  20. /* Driver can alloc up to 2 segments for the header and 2 for the payload. */
  21. #define GVE_TX_MAX_IOVEC 4
  22. /* 1 for management, 1 for rx, 1 for tx */
  23. #define GVE_MIN_MSIX 3
  24. /* Numbers of gve tx/rx stats in stats report. */
  25. #define GVE_TX_STATS_REPORT_NUM 6
  26. #define GVE_RX_STATS_REPORT_NUM 2
  27. /* Interval to schedule a stats report update, 20000ms. */
  28. #define GVE_STATS_REPORT_TIMER_PERIOD 20000
  29. /* Numbers of NIC tx/rx stats in stats report. */
  30. #define NIC_TX_STATS_REPORT_NUM 0
  31. #define NIC_RX_STATS_REPORT_NUM 4
  32. #define GVE_DATA_SLOT_ADDR_PAGE_MASK (~(PAGE_SIZE - 1))
  33. /* PTYPEs are always 10 bits. */
  34. #define GVE_NUM_PTYPES 1024
  35. #define GVE_RX_BUFFER_SIZE_DQO 2048
  36. #define GVE_GQ_TX_MIN_PKT_DESC_BYTES 182
  37. /* Each slot in the desc ring has a 1:1 mapping to a slot in the data ring */
  38. struct gve_rx_desc_queue {
  39. struct gve_rx_desc *desc_ring; /* the descriptor ring */
  40. dma_addr_t bus; /* the bus for the desc_ring */
  41. u8 seqno; /* the next expected seqno for this desc*/
  42. };
  43. /* The page info for a single slot in the RX data queue */
  44. struct gve_rx_slot_page_info {
  45. struct page *page;
  46. void *page_address;
  47. u32 page_offset; /* offset to write to in page */
  48. int pagecnt_bias; /* expected pagecnt if only the driver has a ref */
  49. u8 can_flip;
  50. };
  51. /* A list of pages registered with the device during setup and used by a queue
  52. * as buffers
  53. */
  54. struct gve_queue_page_list {
  55. u32 id; /* unique id */
  56. u32 num_entries;
  57. struct page **pages; /* list of num_entries pages */
  58. dma_addr_t *page_buses; /* the dma addrs of the pages */
  59. };
  60. /* Each slot in the data ring has a 1:1 mapping to a slot in the desc ring */
  61. struct gve_rx_data_queue {
  62. union gve_rx_data_slot *data_ring; /* read by NIC */
  63. dma_addr_t data_bus; /* dma mapping of the slots */
  64. struct gve_rx_slot_page_info *page_info; /* page info of the buffers */
  65. struct gve_queue_page_list *qpl; /* qpl assigned to this queue */
  66. u8 raw_addressing; /* use raw_addressing? */
  67. };
  68. struct gve_priv;
  69. /* RX buffer queue for posting buffers to HW.
  70. * Each RX (completion) queue has a corresponding buffer queue.
  71. */
  72. struct gve_rx_buf_queue_dqo {
  73. struct gve_rx_desc_dqo *desc_ring;
  74. dma_addr_t bus;
  75. u32 head; /* Pointer to start cleaning buffers at. */
  76. u32 tail; /* Last posted buffer index + 1 */
  77. u32 mask; /* Mask for indices to the size of the ring */
  78. };
  79. /* RX completion queue to receive packets from HW. */
  80. struct gve_rx_compl_queue_dqo {
  81. struct gve_rx_compl_desc_dqo *desc_ring;
  82. dma_addr_t bus;
  83. /* Number of slots which did not have a buffer posted yet. We should not
  84. * post more buffers than the queue size to avoid HW overrunning the
  85. * queue.
  86. */
  87. int num_free_slots;
  88. /* HW uses a "generation bit" to notify SW of new descriptors. When a
  89. * descriptor's generation bit is different from the current generation,
  90. * that descriptor is ready to be consumed by SW.
  91. */
  92. u8 cur_gen_bit;
  93. /* Pointer into desc_ring where the next completion descriptor will be
  94. * received.
  95. */
  96. u32 head;
  97. u32 mask; /* Mask for indices to the size of the ring */
  98. };
  99. /* Stores state for tracking buffers posted to HW */
  100. struct gve_rx_buf_state_dqo {
  101. /* The page posted to HW. */
  102. struct gve_rx_slot_page_info page_info;
  103. /* The DMA address corresponding to `page_info`. */
  104. dma_addr_t addr;
  105. /* Last offset into the page when it only had a single reference, at
  106. * which point every other offset is free to be reused.
  107. */
  108. u32 last_single_ref_offset;
  109. /* Linked list index to next element in the list, or -1 if none */
  110. s16 next;
  111. };
  112. /* `head` and `tail` are indices into an array, or -1 if empty. */
  113. struct gve_index_list {
  114. s16 head;
  115. s16 tail;
  116. };
  117. /* A single received packet split across multiple buffers may be
  118. * reconstructed using the information in this structure.
  119. */
  120. struct gve_rx_ctx {
  121. /* head and tail of skb chain for the current packet or NULL if none */
  122. struct sk_buff *skb_head;
  123. struct sk_buff *skb_tail;
  124. u16 total_expected_size;
  125. u8 expected_frag_cnt;
  126. u8 curr_frag_cnt;
  127. u8 reuse_frags;
  128. };
  129. /* Contains datapath state used to represent an RX queue. */
  130. struct gve_rx_ring {
  131. struct gve_priv *gve;
  132. union {
  133. /* GQI fields */
  134. struct {
  135. struct gve_rx_desc_queue desc;
  136. struct gve_rx_data_queue data;
  137. /* threshold for posting new buffs and descs */
  138. u32 db_threshold;
  139. u16 packet_buffer_size;
  140. };
  141. /* DQO fields. */
  142. struct {
  143. struct gve_rx_buf_queue_dqo bufq;
  144. struct gve_rx_compl_queue_dqo complq;
  145. struct gve_rx_buf_state_dqo *buf_states;
  146. u16 num_buf_states;
  147. /* Linked list of gve_rx_buf_state_dqo. Index into
  148. * buf_states, or -1 if empty.
  149. */
  150. s16 free_buf_states;
  151. /* Linked list of gve_rx_buf_state_dqo. Indexes into
  152. * buf_states, or -1 if empty.
  153. *
  154. * This list contains buf_states which are pointing to
  155. * valid buffers.
  156. *
  157. * We use a FIFO here in order to increase the
  158. * probability that buffers can be reused by increasing
  159. * the time between usages.
  160. */
  161. struct gve_index_list recycled_buf_states;
  162. /* Linked list of gve_rx_buf_state_dqo. Indexes into
  163. * buf_states, or -1 if empty.
  164. *
  165. * This list contains buf_states which have buffers
  166. * which cannot be reused yet.
  167. */
  168. struct gve_index_list used_buf_states;
  169. } dqo;
  170. };
  171. u64 rbytes; /* free-running bytes received */
  172. u64 rpackets; /* free-running packets received */
  173. u32 cnt; /* free-running total number of completed packets */
  174. u32 fill_cnt; /* free-running total number of descs and buffs posted */
  175. u32 mask; /* masks the cnt and fill_cnt to the size of the ring */
  176. u64 rx_copybreak_pkt; /* free-running count of copybreak packets */
  177. u64 rx_copied_pkt; /* free-running total number of copied packets */
  178. u64 rx_skb_alloc_fail; /* free-running count of skb alloc fails */
  179. u64 rx_buf_alloc_fail; /* free-running count of buffer alloc fails */
  180. u64 rx_desc_err_dropped_pkt; /* free-running count of packets dropped by descriptor error */
  181. u64 rx_cont_packet_cnt; /* free-running multi-fragment packets received */
  182. u64 rx_frag_flip_cnt; /* free-running count of rx segments where page_flip was used */
  183. u64 rx_frag_copy_cnt; /* free-running count of rx segments copied into skb linear portion */
  184. u32 q_num; /* queue index */
  185. u32 ntfy_id; /* notification block index */
  186. struct gve_queue_resources *q_resources; /* head and tail pointer idx */
  187. dma_addr_t q_resources_bus; /* dma address for the queue resources */
  188. struct u64_stats_sync statss; /* sync stats for 32bit archs */
  189. struct gve_rx_ctx ctx; /* Info for packet currently being processed in this ring. */
  190. };
  191. /* A TX desc ring entry */
  192. union gve_tx_desc {
  193. struct gve_tx_pkt_desc pkt; /* first desc for a packet */
  194. struct gve_tx_mtd_desc mtd; /* optional metadata descriptor */
  195. struct gve_tx_seg_desc seg; /* subsequent descs for a packet */
  196. };
  197. /* Tracks the memory in the fifo occupied by a segment of a packet */
  198. struct gve_tx_iovec {
  199. u32 iov_offset; /* offset into this segment */
  200. u32 iov_len; /* length */
  201. u32 iov_padding; /* padding associated with this segment */
  202. };
  203. /* Tracks the memory in the fifo occupied by the skb. Mapped 1:1 to a desc
  204. * ring entry but only used for a pkt_desc not a seg_desc
  205. */
  206. struct gve_tx_buffer_state {
  207. struct sk_buff *skb; /* skb for this pkt */
  208. union {
  209. struct gve_tx_iovec iov[GVE_TX_MAX_IOVEC]; /* segments of this pkt */
  210. struct {
  211. DEFINE_DMA_UNMAP_ADDR(dma);
  212. DEFINE_DMA_UNMAP_LEN(len);
  213. };
  214. };
  215. };
  216. /* A TX buffer - each queue has one */
  217. struct gve_tx_fifo {
  218. void *base; /* address of base of FIFO */
  219. u32 size; /* total size */
  220. atomic_t available; /* how much space is still available */
  221. u32 head; /* offset to write at */
  222. struct gve_queue_page_list *qpl; /* QPL mapped into this FIFO */
  223. };
  224. /* TX descriptor for DQO format */
  225. union gve_tx_desc_dqo {
  226. struct gve_tx_pkt_desc_dqo pkt;
  227. struct gve_tx_tso_context_desc_dqo tso_ctx;
  228. struct gve_tx_general_context_desc_dqo general_ctx;
  229. };
  230. enum gve_packet_state {
  231. /* Packet is in free list, available to be allocated.
  232. * This should always be zero since state is not explicitly initialized.
  233. */
  234. GVE_PACKET_STATE_UNALLOCATED,
  235. /* Packet is expecting a regular data completion or miss completion */
  236. GVE_PACKET_STATE_PENDING_DATA_COMPL,
  237. /* Packet has received a miss completion and is expecting a
  238. * re-injection completion.
  239. */
  240. GVE_PACKET_STATE_PENDING_REINJECT_COMPL,
  241. /* No valid completion received within the specified timeout. */
  242. GVE_PACKET_STATE_TIMED_OUT_COMPL,
  243. };
  244. struct gve_tx_pending_packet_dqo {
  245. struct sk_buff *skb; /* skb for this packet */
  246. /* 0th element corresponds to the linear portion of `skb`, should be
  247. * unmapped with `dma_unmap_single`.
  248. *
  249. * All others correspond to `skb`'s frags and should be unmapped with
  250. * `dma_unmap_page`.
  251. */
  252. DEFINE_DMA_UNMAP_ADDR(dma[MAX_SKB_FRAGS + 1]);
  253. DEFINE_DMA_UNMAP_LEN(len[MAX_SKB_FRAGS + 1]);
  254. u16 num_bufs;
  255. /* Linked list index to next element in the list, or -1 if none */
  256. s16 next;
  257. /* Linked list index to prev element in the list, or -1 if none.
  258. * Used for tracking either outstanding miss completions or prematurely
  259. * freed packets.
  260. */
  261. s16 prev;
  262. /* Identifies the current state of the packet as defined in
  263. * `enum gve_packet_state`.
  264. */
  265. u8 state;
  266. /* If packet is an outstanding miss completion, then the packet is
  267. * freed if the corresponding re-injection completion is not received
  268. * before kernel jiffies exceeds timeout_jiffies.
  269. */
  270. unsigned long timeout_jiffies;
  271. };
  272. /* Contains datapath state used to represent a TX queue. */
  273. struct gve_tx_ring {
  274. /* Cacheline 0 -- Accessed & dirtied during transmit */
  275. union {
  276. /* GQI fields */
  277. struct {
  278. struct gve_tx_fifo tx_fifo;
  279. u32 req; /* driver tracked head pointer */
  280. u32 done; /* driver tracked tail pointer */
  281. };
  282. /* DQO fields. */
  283. struct {
  284. /* Linked list of gve_tx_pending_packet_dqo. Index into
  285. * pending_packets, or -1 if empty.
  286. *
  287. * This is a consumer list owned by the TX path. When it
  288. * runs out, the producer list is stolen from the
  289. * completion handling path
  290. * (dqo_compl.free_pending_packets).
  291. */
  292. s16 free_pending_packets;
  293. /* Cached value of `dqo_compl.hw_tx_head` */
  294. u32 head;
  295. u32 tail; /* Last posted buffer index + 1 */
  296. /* Index of the last descriptor with "report event" bit
  297. * set.
  298. */
  299. u32 last_re_idx;
  300. } dqo_tx;
  301. };
  302. /* Cacheline 1 -- Accessed & dirtied during gve_clean_tx_done */
  303. union {
  304. /* GQI fields */
  305. struct {
  306. /* Spinlock for when cleanup in progress */
  307. spinlock_t clean_lock;
  308. };
  309. /* DQO fields. */
  310. struct {
  311. u32 head; /* Last read on compl_desc */
  312. /* Tracks the current gen bit of compl_q */
  313. u8 cur_gen_bit;
  314. /* Linked list of gve_tx_pending_packet_dqo. Index into
  315. * pending_packets, or -1 if empty.
  316. *
  317. * This is the producer list, owned by the completion
  318. * handling path. When the consumer list
  319. * (dqo_tx.free_pending_packets) is runs out, this list
  320. * will be stolen.
  321. */
  322. atomic_t free_pending_packets;
  323. /* Last TX ring index fetched by HW */
  324. atomic_t hw_tx_head;
  325. /* List to track pending packets which received a miss
  326. * completion but not a corresponding reinjection.
  327. */
  328. struct gve_index_list miss_completions;
  329. /* List to track pending packets that were completed
  330. * before receiving a valid completion because they
  331. * reached a specified timeout.
  332. */
  333. struct gve_index_list timed_out_completions;
  334. } dqo_compl;
  335. } ____cacheline_aligned;
  336. u64 pkt_done; /* free-running - total packets completed */
  337. u64 bytes_done; /* free-running - total bytes completed */
  338. u64 dropped_pkt; /* free-running - total packets dropped */
  339. u64 dma_mapping_error; /* count of dma mapping errors */
  340. /* Cacheline 2 -- Read-mostly fields */
  341. union {
  342. /* GQI fields */
  343. struct {
  344. union gve_tx_desc *desc;
  345. /* Maps 1:1 to a desc */
  346. struct gve_tx_buffer_state *info;
  347. };
  348. /* DQO fields. */
  349. struct {
  350. union gve_tx_desc_dqo *tx_ring;
  351. struct gve_tx_compl_desc *compl_ring;
  352. struct gve_tx_pending_packet_dqo *pending_packets;
  353. s16 num_pending_packets;
  354. u32 complq_mask; /* complq size is complq_mask + 1 */
  355. } dqo;
  356. } ____cacheline_aligned;
  357. struct netdev_queue *netdev_txq;
  358. struct gve_queue_resources *q_resources; /* head and tail pointer idx */
  359. struct device *dev;
  360. u32 mask; /* masks req and done down to queue size */
  361. u8 raw_addressing; /* use raw_addressing? */
  362. /* Slow-path fields */
  363. u32 q_num ____cacheline_aligned; /* queue idx */
  364. u32 stop_queue; /* count of queue stops */
  365. u32 wake_queue; /* count of queue wakes */
  366. u32 queue_timeout; /* count of queue timeouts */
  367. u32 ntfy_id; /* notification block index */
  368. u32 last_kick_msec; /* Last time the queue was kicked */
  369. dma_addr_t bus; /* dma address of the descr ring */
  370. dma_addr_t q_resources_bus; /* dma address of the queue resources */
  371. dma_addr_t complq_bus_dqo; /* dma address of the dqo.compl_ring */
  372. struct u64_stats_sync statss; /* sync stats for 32bit archs */
  373. } ____cacheline_aligned;
  374. /* Wraps the info for one irq including the napi struct and the queues
  375. * associated with that irq.
  376. */
  377. struct gve_notify_block {
  378. __be32 *irq_db_index; /* pointer to idx into Bar2 */
  379. char name[IFNAMSIZ + 16]; /* name registered with the kernel */
  380. struct napi_struct napi; /* kernel napi struct for this block */
  381. struct gve_priv *priv;
  382. struct gve_tx_ring *tx; /* tx rings on this block */
  383. struct gve_rx_ring *rx; /* rx rings on this block */
  384. };
  385. /* Tracks allowed and current queue settings */
  386. struct gve_queue_config {
  387. u16 max_queues;
  388. u16 num_queues; /* current */
  389. };
  390. /* Tracks the available and used qpl IDs */
  391. struct gve_qpl_config {
  392. u32 qpl_map_size; /* map memory size */
  393. unsigned long *qpl_id_map; /* bitmap of used qpl ids */
  394. };
  395. struct gve_options_dqo_rda {
  396. u16 tx_comp_ring_entries; /* number of tx_comp descriptors */
  397. u16 rx_buff_ring_entries; /* number of rx_buff descriptors */
  398. };
  399. struct gve_irq_db {
  400. __be32 index;
  401. } ____cacheline_aligned;
  402. struct gve_ptype {
  403. u8 l3_type; /* `gve_l3_type` in gve_adminq.h */
  404. u8 l4_type; /* `gve_l4_type` in gve_adminq.h */
  405. };
  406. struct gve_ptype_lut {
  407. struct gve_ptype ptypes[GVE_NUM_PTYPES];
  408. };
  409. /* GVE_QUEUE_FORMAT_UNSPECIFIED must be zero since 0 is the default value
  410. * when the entire configure_device_resources command is zeroed out and the
  411. * queue_format is not specified.
  412. */
  413. enum gve_queue_format {
  414. GVE_QUEUE_FORMAT_UNSPECIFIED = 0x0,
  415. GVE_GQI_RDA_FORMAT = 0x1,
  416. GVE_GQI_QPL_FORMAT = 0x2,
  417. GVE_DQO_RDA_FORMAT = 0x3,
  418. };
  419. struct gve_priv {
  420. struct net_device *dev;
  421. struct gve_tx_ring *tx; /* array of tx_cfg.num_queues */
  422. struct gve_rx_ring *rx; /* array of rx_cfg.num_queues */
  423. struct gve_queue_page_list *qpls; /* array of num qpls */
  424. struct gve_notify_block *ntfy_blocks; /* array of num_ntfy_blks */
  425. struct gve_irq_db *irq_db_indices; /* array of num_ntfy_blks */
  426. dma_addr_t irq_db_indices_bus;
  427. struct msix_entry *msix_vectors; /* array of num_ntfy_blks + 1 */
  428. char mgmt_msix_name[IFNAMSIZ + 16];
  429. u32 mgmt_msix_idx;
  430. __be32 *counter_array; /* array of num_event_counters */
  431. dma_addr_t counter_array_bus;
  432. u16 num_event_counters;
  433. u16 tx_desc_cnt; /* num desc per ring */
  434. u16 rx_desc_cnt; /* num desc per ring */
  435. u16 tx_pages_per_qpl; /* tx buffer length */
  436. u16 rx_data_slot_cnt; /* rx buffer length */
  437. u64 max_registered_pages;
  438. u64 num_registered_pages; /* num pages registered with NIC */
  439. u32 rx_copybreak; /* copy packets smaller than this */
  440. u16 default_num_queues; /* default num queues to set up */
  441. struct gve_queue_config tx_cfg;
  442. struct gve_queue_config rx_cfg;
  443. struct gve_qpl_config qpl_cfg; /* map used QPL ids */
  444. u32 num_ntfy_blks; /* spilt between TX and RX so must be even */
  445. struct gve_registers __iomem *reg_bar0; /* see gve_register.h */
  446. __be32 __iomem *db_bar2; /* "array" of doorbells */
  447. u32 msg_enable; /* level for netif* netdev print macros */
  448. struct pci_dev *pdev;
  449. /* metrics */
  450. u32 tx_timeo_cnt;
  451. /* Admin queue - see gve_adminq.h*/
  452. union gve_adminq_command *adminq;
  453. dma_addr_t adminq_bus_addr;
  454. u32 adminq_mask; /* masks prod_cnt to adminq size */
  455. u32 adminq_prod_cnt; /* free-running count of AQ cmds executed */
  456. u32 adminq_cmd_fail; /* free-running count of AQ cmds failed */
  457. u32 adminq_timeouts; /* free-running count of AQ cmds timeouts */
  458. /* free-running count of per AQ cmd executed */
  459. u32 adminq_describe_device_cnt;
  460. u32 adminq_cfg_device_resources_cnt;
  461. u32 adminq_register_page_list_cnt;
  462. u32 adminq_unregister_page_list_cnt;
  463. u32 adminq_create_tx_queue_cnt;
  464. u32 adminq_create_rx_queue_cnt;
  465. u32 adminq_destroy_tx_queue_cnt;
  466. u32 adminq_destroy_rx_queue_cnt;
  467. u32 adminq_dcfg_device_resources_cnt;
  468. u32 adminq_set_driver_parameter_cnt;
  469. u32 adminq_report_stats_cnt;
  470. u32 adminq_report_link_speed_cnt;
  471. u32 adminq_get_ptype_map_cnt;
  472. /* Global stats */
  473. u32 interface_up_cnt; /* count of times interface turned up since last reset */
  474. u32 interface_down_cnt; /* count of times interface turned down since last reset */
  475. u32 reset_cnt; /* count of reset */
  476. u32 page_alloc_fail; /* count of page alloc fails */
  477. u32 dma_mapping_error; /* count of dma mapping errors */
  478. u32 stats_report_trigger_cnt; /* count of device-requested stats-reports since last reset */
  479. u32 suspend_cnt; /* count of times suspended */
  480. u32 resume_cnt; /* count of times resumed */
  481. struct workqueue_struct *gve_wq;
  482. struct work_struct service_task;
  483. struct work_struct stats_report_task;
  484. unsigned long service_task_flags;
  485. unsigned long state_flags;
  486. struct gve_stats_report *stats_report;
  487. u64 stats_report_len;
  488. dma_addr_t stats_report_bus; /* dma address for the stats report */
  489. unsigned long ethtool_flags;
  490. unsigned long stats_report_timer_period;
  491. struct timer_list stats_report_timer;
  492. /* Gvnic device link speed from hypervisor. */
  493. u64 link_speed;
  494. bool up_before_suspend; /* True if dev was up before suspend */
  495. struct gve_options_dqo_rda options_dqo_rda;
  496. struct gve_ptype_lut *ptype_lut_dqo;
  497. /* Must be a power of two. */
  498. int data_buffer_size_dqo;
  499. enum gve_queue_format queue_format;
  500. /* Interrupt coalescing settings */
  501. u32 tx_coalesce_usecs;
  502. u32 rx_coalesce_usecs;
  503. };
  504. enum gve_service_task_flags_bit {
  505. GVE_PRIV_FLAGS_DO_RESET = 1,
  506. GVE_PRIV_FLAGS_RESET_IN_PROGRESS = 2,
  507. GVE_PRIV_FLAGS_PROBE_IN_PROGRESS = 3,
  508. GVE_PRIV_FLAGS_DO_REPORT_STATS = 4,
  509. };
  510. enum gve_state_flags_bit {
  511. GVE_PRIV_FLAGS_ADMIN_QUEUE_OK = 1,
  512. GVE_PRIV_FLAGS_DEVICE_RESOURCES_OK = 2,
  513. GVE_PRIV_FLAGS_DEVICE_RINGS_OK = 3,
  514. GVE_PRIV_FLAGS_NAPI_ENABLED = 4,
  515. };
  516. enum gve_ethtool_flags_bit {
  517. GVE_PRIV_FLAGS_REPORT_STATS = 0,
  518. };
  519. static inline bool gve_get_do_reset(struct gve_priv *priv)
  520. {
  521. return test_bit(GVE_PRIV_FLAGS_DO_RESET, &priv->service_task_flags);
  522. }
  523. static inline void gve_set_do_reset(struct gve_priv *priv)
  524. {
  525. set_bit(GVE_PRIV_FLAGS_DO_RESET, &priv->service_task_flags);
  526. }
  527. static inline void gve_clear_do_reset(struct gve_priv *priv)
  528. {
  529. clear_bit(GVE_PRIV_FLAGS_DO_RESET, &priv->service_task_flags);
  530. }
  531. static inline bool gve_get_reset_in_progress(struct gve_priv *priv)
  532. {
  533. return test_bit(GVE_PRIV_FLAGS_RESET_IN_PROGRESS,
  534. &priv->service_task_flags);
  535. }
  536. static inline void gve_set_reset_in_progress(struct gve_priv *priv)
  537. {
  538. set_bit(GVE_PRIV_FLAGS_RESET_IN_PROGRESS, &priv->service_task_flags);
  539. }
  540. static inline void gve_clear_reset_in_progress(struct gve_priv *priv)
  541. {
  542. clear_bit(GVE_PRIV_FLAGS_RESET_IN_PROGRESS, &priv->service_task_flags);
  543. }
  544. static inline bool gve_get_probe_in_progress(struct gve_priv *priv)
  545. {
  546. return test_bit(GVE_PRIV_FLAGS_PROBE_IN_PROGRESS,
  547. &priv->service_task_flags);
  548. }
  549. static inline void gve_set_probe_in_progress(struct gve_priv *priv)
  550. {
  551. set_bit(GVE_PRIV_FLAGS_PROBE_IN_PROGRESS, &priv->service_task_flags);
  552. }
  553. static inline void gve_clear_probe_in_progress(struct gve_priv *priv)
  554. {
  555. clear_bit(GVE_PRIV_FLAGS_PROBE_IN_PROGRESS, &priv->service_task_flags);
  556. }
  557. static inline bool gve_get_do_report_stats(struct gve_priv *priv)
  558. {
  559. return test_bit(GVE_PRIV_FLAGS_DO_REPORT_STATS,
  560. &priv->service_task_flags);
  561. }
  562. static inline void gve_set_do_report_stats(struct gve_priv *priv)
  563. {
  564. set_bit(GVE_PRIV_FLAGS_DO_REPORT_STATS, &priv->service_task_flags);
  565. }
  566. static inline void gve_clear_do_report_stats(struct gve_priv *priv)
  567. {
  568. clear_bit(GVE_PRIV_FLAGS_DO_REPORT_STATS, &priv->service_task_flags);
  569. }
  570. static inline bool gve_get_admin_queue_ok(struct gve_priv *priv)
  571. {
  572. return test_bit(GVE_PRIV_FLAGS_ADMIN_QUEUE_OK, &priv->state_flags);
  573. }
  574. static inline void gve_set_admin_queue_ok(struct gve_priv *priv)
  575. {
  576. set_bit(GVE_PRIV_FLAGS_ADMIN_QUEUE_OK, &priv->state_flags);
  577. }
  578. static inline void gve_clear_admin_queue_ok(struct gve_priv *priv)
  579. {
  580. clear_bit(GVE_PRIV_FLAGS_ADMIN_QUEUE_OK, &priv->state_flags);
  581. }
  582. static inline bool gve_get_device_resources_ok(struct gve_priv *priv)
  583. {
  584. return test_bit(GVE_PRIV_FLAGS_DEVICE_RESOURCES_OK, &priv->state_flags);
  585. }
  586. static inline void gve_set_device_resources_ok(struct gve_priv *priv)
  587. {
  588. set_bit(GVE_PRIV_FLAGS_DEVICE_RESOURCES_OK, &priv->state_flags);
  589. }
  590. static inline void gve_clear_device_resources_ok(struct gve_priv *priv)
  591. {
  592. clear_bit(GVE_PRIV_FLAGS_DEVICE_RESOURCES_OK, &priv->state_flags);
  593. }
  594. static inline bool gve_get_device_rings_ok(struct gve_priv *priv)
  595. {
  596. return test_bit(GVE_PRIV_FLAGS_DEVICE_RINGS_OK, &priv->state_flags);
  597. }
  598. static inline void gve_set_device_rings_ok(struct gve_priv *priv)
  599. {
  600. set_bit(GVE_PRIV_FLAGS_DEVICE_RINGS_OK, &priv->state_flags);
  601. }
  602. static inline void gve_clear_device_rings_ok(struct gve_priv *priv)
  603. {
  604. clear_bit(GVE_PRIV_FLAGS_DEVICE_RINGS_OK, &priv->state_flags);
  605. }
  606. static inline bool gve_get_napi_enabled(struct gve_priv *priv)
  607. {
  608. return test_bit(GVE_PRIV_FLAGS_NAPI_ENABLED, &priv->state_flags);
  609. }
  610. static inline void gve_set_napi_enabled(struct gve_priv *priv)
  611. {
  612. set_bit(GVE_PRIV_FLAGS_NAPI_ENABLED, &priv->state_flags);
  613. }
  614. static inline void gve_clear_napi_enabled(struct gve_priv *priv)
  615. {
  616. clear_bit(GVE_PRIV_FLAGS_NAPI_ENABLED, &priv->state_flags);
  617. }
  618. static inline bool gve_get_report_stats(struct gve_priv *priv)
  619. {
  620. return test_bit(GVE_PRIV_FLAGS_REPORT_STATS, &priv->ethtool_flags);
  621. }
  622. static inline void gve_clear_report_stats(struct gve_priv *priv)
  623. {
  624. clear_bit(GVE_PRIV_FLAGS_REPORT_STATS, &priv->ethtool_flags);
  625. }
  626. /* Returns the address of the ntfy_blocks irq doorbell
  627. */
  628. static inline __be32 __iomem *gve_irq_doorbell(struct gve_priv *priv,
  629. struct gve_notify_block *block)
  630. {
  631. return &priv->db_bar2[be32_to_cpu(*block->irq_db_index)];
  632. }
  633. /* Returns the index into ntfy_blocks of the given tx ring's block
  634. */
  635. static inline u32 gve_tx_idx_to_ntfy(struct gve_priv *priv, u32 queue_idx)
  636. {
  637. return queue_idx;
  638. }
  639. /* Returns the index into ntfy_blocks of the given rx ring's block
  640. */
  641. static inline u32 gve_rx_idx_to_ntfy(struct gve_priv *priv, u32 queue_idx)
  642. {
  643. return (priv->num_ntfy_blks / 2) + queue_idx;
  644. }
  645. /* Returns the number of tx queue page lists
  646. */
  647. static inline u32 gve_num_tx_qpls(struct gve_priv *priv)
  648. {
  649. if (priv->queue_format != GVE_GQI_QPL_FORMAT)
  650. return 0;
  651. return priv->tx_cfg.num_queues;
  652. }
  653. /* Returns the number of rx queue page lists
  654. */
  655. static inline u32 gve_num_rx_qpls(struct gve_priv *priv)
  656. {
  657. if (priv->queue_format != GVE_GQI_QPL_FORMAT)
  658. return 0;
  659. return priv->rx_cfg.num_queues;
  660. }
  661. /* Returns a pointer to the next available tx qpl in the list of qpls
  662. */
  663. static inline
  664. struct gve_queue_page_list *gve_assign_tx_qpl(struct gve_priv *priv)
  665. {
  666. int id = find_first_zero_bit(priv->qpl_cfg.qpl_id_map,
  667. priv->qpl_cfg.qpl_map_size);
  668. /* we are out of tx qpls */
  669. if (id >= gve_num_tx_qpls(priv))
  670. return NULL;
  671. set_bit(id, priv->qpl_cfg.qpl_id_map);
  672. return &priv->qpls[id];
  673. }
  674. /* Returns a pointer to the next available rx qpl in the list of qpls
  675. */
  676. static inline
  677. struct gve_queue_page_list *gve_assign_rx_qpl(struct gve_priv *priv)
  678. {
  679. int id = find_next_zero_bit(priv->qpl_cfg.qpl_id_map,
  680. priv->qpl_cfg.qpl_map_size,
  681. gve_num_tx_qpls(priv));
  682. /* we are out of rx qpls */
  683. if (id == gve_num_tx_qpls(priv) + gve_num_rx_qpls(priv))
  684. return NULL;
  685. set_bit(id, priv->qpl_cfg.qpl_id_map);
  686. return &priv->qpls[id];
  687. }
  688. /* Unassigns the qpl with the given id
  689. */
  690. static inline void gve_unassign_qpl(struct gve_priv *priv, int id)
  691. {
  692. clear_bit(id, priv->qpl_cfg.qpl_id_map);
  693. }
  694. /* Returns the correct dma direction for tx and rx qpls
  695. */
  696. static inline enum dma_data_direction gve_qpl_dma_dir(struct gve_priv *priv,
  697. int id)
  698. {
  699. if (id < gve_num_tx_qpls(priv))
  700. return DMA_TO_DEVICE;
  701. else
  702. return DMA_FROM_DEVICE;
  703. }
  704. static inline bool gve_is_gqi(struct gve_priv *priv)
  705. {
  706. return priv->queue_format == GVE_GQI_RDA_FORMAT ||
  707. priv->queue_format == GVE_GQI_QPL_FORMAT;
  708. }
  709. /* buffers */
  710. int gve_alloc_page(struct gve_priv *priv, struct device *dev,
  711. struct page **page, dma_addr_t *dma,
  712. enum dma_data_direction, gfp_t gfp_flags);
  713. void gve_free_page(struct device *dev, struct page *page, dma_addr_t dma,
  714. enum dma_data_direction);
  715. /* tx handling */
  716. netdev_tx_t gve_tx(struct sk_buff *skb, struct net_device *dev);
  717. bool gve_tx_poll(struct gve_notify_block *block, int budget);
  718. int gve_tx_alloc_rings(struct gve_priv *priv);
  719. void gve_tx_free_rings_gqi(struct gve_priv *priv);
  720. u32 gve_tx_load_event_counter(struct gve_priv *priv,
  721. struct gve_tx_ring *tx);
  722. bool gve_tx_clean_pending(struct gve_priv *priv, struct gve_tx_ring *tx);
  723. /* rx handling */
  724. void gve_rx_write_doorbell(struct gve_priv *priv, struct gve_rx_ring *rx);
  725. int gve_rx_poll(struct gve_notify_block *block, int budget);
  726. bool gve_rx_work_pending(struct gve_rx_ring *rx);
  727. int gve_rx_alloc_rings(struct gve_priv *priv);
  728. void gve_rx_free_rings_gqi(struct gve_priv *priv);
  729. /* Reset */
  730. void gve_schedule_reset(struct gve_priv *priv);
  731. int gve_reset(struct gve_priv *priv, bool attempt_teardown);
  732. int gve_adjust_queues(struct gve_priv *priv,
  733. struct gve_queue_config new_rx_config,
  734. struct gve_queue_config new_tx_config);
  735. /* report stats handling */
  736. void gve_handle_report_stats(struct gve_priv *priv);
  737. /* exported by ethtool.c */
  738. extern const struct ethtool_ops gve_ethtool_ops;
  739. /* needed by ethtool */
  740. extern const char gve_version_str[];
  741. #endif /* _GVE_H_ */