enic.h 7.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312
  1. /* SPDX-License-Identifier: GPL-2.0-only */
  2. /*
  3. * Copyright 2008-2010 Cisco Systems, Inc. All rights reserved.
  4. * Copyright 2007 Nuova Systems, Inc. All rights reserved.
  5. */
  6. #ifndef _ENIC_H_
  7. #define _ENIC_H_
  8. #include "vnic_enet.h"
  9. #include "vnic_dev.h"
  10. #include "vnic_wq.h"
  11. #include "vnic_rq.h"
  12. #include "vnic_cq.h"
  13. #include "vnic_intr.h"
  14. #include "vnic_stats.h"
  15. #include "vnic_nic.h"
  16. #include "vnic_rss.h"
  17. #include <linux/irq.h>
  18. #define DRV_NAME "enic"
  19. #define DRV_DESCRIPTION "Cisco VIC Ethernet NIC Driver"
  20. #define ENIC_BARS_MAX 6
  21. #define ENIC_WQ_MAX 8
  22. #define ENIC_RQ_MAX 8
  23. #define ENIC_CQ_MAX (ENIC_WQ_MAX + ENIC_RQ_MAX)
  24. #define ENIC_INTR_MAX (ENIC_CQ_MAX + 2)
  25. #define ENIC_WQ_NAPI_BUDGET 256
  26. #define ENIC_AIC_LARGE_PKT_DIFF 3
  27. struct enic_msix_entry {
  28. int requested;
  29. char devname[IFNAMSIZ + 8];
  30. irqreturn_t (*isr)(int, void *);
  31. void *devid;
  32. cpumask_var_t affinity_mask;
  33. };
  34. /* Store only the lower range. Higher range is given by fw. */
  35. struct enic_intr_mod_range {
  36. u32 small_pkt_range_start;
  37. u32 large_pkt_range_start;
  38. };
  39. struct enic_intr_mod_table {
  40. u32 rx_rate;
  41. u32 range_percent;
  42. };
  43. #define ENIC_MAX_LINK_SPEEDS 3
  44. #define ENIC_LINK_SPEED_10G 10000
  45. #define ENIC_LINK_SPEED_4G 4000
  46. #define ENIC_LINK_40G_INDEX 2
  47. #define ENIC_LINK_10G_INDEX 1
  48. #define ENIC_LINK_4G_INDEX 0
  49. #define ENIC_RX_COALESCE_RANGE_END 125
  50. #define ENIC_AIC_TS_BREAK 100
  51. struct enic_rx_coal {
  52. u32 small_pkt_range_start;
  53. u32 large_pkt_range_start;
  54. u32 range_end;
  55. u32 use_adaptive_rx_coalesce;
  56. };
  57. /* priv_flags */
  58. #define ENIC_SRIOV_ENABLED (1 << 0)
  59. /* enic port profile set flags */
  60. #define ENIC_PORT_REQUEST_APPLIED (1 << 0)
  61. #define ENIC_SET_REQUEST (1 << 1)
  62. #define ENIC_SET_NAME (1 << 2)
  63. #define ENIC_SET_INSTANCE (1 << 3)
  64. #define ENIC_SET_HOST (1 << 4)
  65. struct enic_port_profile {
  66. u32 set;
  67. u8 request;
  68. char name[PORT_PROFILE_MAX];
  69. u8 instance_uuid[PORT_UUID_MAX];
  70. u8 host_uuid[PORT_UUID_MAX];
  71. u8 vf_mac[ETH_ALEN];
  72. u8 mac_addr[ETH_ALEN];
  73. };
  74. /* enic_rfs_fltr_node - rfs filter node in hash table
  75. * @@keys: IPv4 5 tuple
  76. * @flow_id: flow_id of clsf filter provided by kernel
  77. * @fltr_id: filter id of clsf filter returned by adaptor
  78. * @rq_id: desired rq index
  79. * @node: hlist_node
  80. */
  81. struct enic_rfs_fltr_node {
  82. struct flow_keys keys;
  83. u32 flow_id;
  84. u16 fltr_id;
  85. u16 rq_id;
  86. struct hlist_node node;
  87. };
  88. /* enic_rfs_flw_tbl - rfs flow table
  89. * @max: Maximum number of filters vNIC supports
  90. * @free: Number of free filters available
  91. * @toclean: hash table index to clean next
  92. * @ht_head: hash table list head
  93. * @lock: spin lock
  94. * @rfs_may_expire: timer function for enic_rps_may_expire_flow
  95. */
  96. struct enic_rfs_flw_tbl {
  97. u16 max;
  98. int free;
  99. #define ENIC_RFS_FLW_BITSHIFT (10)
  100. #define ENIC_RFS_FLW_MASK ((1 << ENIC_RFS_FLW_BITSHIFT) - 1)
  101. u16 toclean:ENIC_RFS_FLW_BITSHIFT;
  102. struct hlist_head ht_head[1 << ENIC_RFS_FLW_BITSHIFT];
  103. spinlock_t lock;
  104. struct timer_list rfs_may_expire;
  105. };
  106. struct vxlan_offload {
  107. u16 vxlan_udp_port_number;
  108. u8 patch_level;
  109. u8 flags;
  110. };
  111. /* Per-instance private data structure */
  112. struct enic {
  113. struct net_device *netdev;
  114. struct pci_dev *pdev;
  115. struct vnic_enet_config config;
  116. struct vnic_dev_bar bar[ENIC_BARS_MAX];
  117. struct vnic_dev *vdev;
  118. struct timer_list notify_timer;
  119. struct work_struct reset;
  120. struct work_struct tx_hang_reset;
  121. struct work_struct change_mtu_work;
  122. struct msix_entry msix_entry[ENIC_INTR_MAX];
  123. struct enic_msix_entry msix[ENIC_INTR_MAX];
  124. u32 msg_enable;
  125. spinlock_t devcmd_lock;
  126. u8 mac_addr[ETH_ALEN];
  127. unsigned int flags;
  128. unsigned int priv_flags;
  129. unsigned int mc_count;
  130. unsigned int uc_count;
  131. u32 port_mtu;
  132. struct enic_rx_coal rx_coalesce_setting;
  133. u32 rx_coalesce_usecs;
  134. u32 tx_coalesce_usecs;
  135. #ifdef CONFIG_PCI_IOV
  136. u16 num_vfs;
  137. #endif
  138. spinlock_t enic_api_lock;
  139. bool enic_api_busy;
  140. struct enic_port_profile *pp;
  141. /* work queue cache line section */
  142. ____cacheline_aligned struct vnic_wq wq[ENIC_WQ_MAX];
  143. spinlock_t wq_lock[ENIC_WQ_MAX];
  144. unsigned int wq_count;
  145. u16 loop_enable;
  146. u16 loop_tag;
  147. /* receive queue cache line section */
  148. ____cacheline_aligned struct vnic_rq rq[ENIC_RQ_MAX];
  149. unsigned int rq_count;
  150. struct vxlan_offload vxlan;
  151. u64 rq_truncated_pkts;
  152. u64 rq_bad_fcs;
  153. struct napi_struct napi[ENIC_RQ_MAX + ENIC_WQ_MAX];
  154. /* interrupt resource cache line section */
  155. ____cacheline_aligned struct vnic_intr intr[ENIC_INTR_MAX];
  156. unsigned int intr_count;
  157. u32 __iomem *legacy_pba; /* memory-mapped */
  158. /* completion queue cache line section */
  159. ____cacheline_aligned struct vnic_cq cq[ENIC_CQ_MAX];
  160. unsigned int cq_count;
  161. struct enic_rfs_flw_tbl rfs_h;
  162. u32 rx_copybreak;
  163. u8 rss_key[ENIC_RSS_LEN];
  164. struct vnic_gen_stats gen_stats;
  165. };
  166. static inline struct net_device *vnic_get_netdev(struct vnic_dev *vdev)
  167. {
  168. struct enic *enic = vdev->priv;
  169. return enic->netdev;
  170. }
  171. /* wrappers function for kernel log
  172. */
  173. #define vdev_err(vdev, fmt, ...) \
  174. dev_err(&(vdev)->pdev->dev, fmt, ##__VA_ARGS__)
  175. #define vdev_warn(vdev, fmt, ...) \
  176. dev_warn(&(vdev)->pdev->dev, fmt, ##__VA_ARGS__)
  177. #define vdev_info(vdev, fmt, ...) \
  178. dev_info(&(vdev)->pdev->dev, fmt, ##__VA_ARGS__)
  179. #define vdev_neterr(vdev, fmt, ...) \
  180. netdev_err(vnic_get_netdev(vdev), fmt, ##__VA_ARGS__)
  181. #define vdev_netwarn(vdev, fmt, ...) \
  182. netdev_warn(vnic_get_netdev(vdev), fmt, ##__VA_ARGS__)
  183. #define vdev_netinfo(vdev, fmt, ...) \
  184. netdev_info(vnic_get_netdev(vdev), fmt, ##__VA_ARGS__)
  185. static inline struct device *enic_get_dev(struct enic *enic)
  186. {
  187. return &(enic->pdev->dev);
  188. }
  189. static inline unsigned int enic_cq_rq(struct enic *enic, unsigned int rq)
  190. {
  191. return rq;
  192. }
  193. static inline unsigned int enic_cq_wq(struct enic *enic, unsigned int wq)
  194. {
  195. return enic->rq_count + wq;
  196. }
  197. static inline unsigned int enic_legacy_io_intr(void)
  198. {
  199. return 0;
  200. }
  201. static inline unsigned int enic_legacy_err_intr(void)
  202. {
  203. return 1;
  204. }
  205. static inline unsigned int enic_legacy_notify_intr(void)
  206. {
  207. return 2;
  208. }
  209. static inline unsigned int enic_msix_rq_intr(struct enic *enic,
  210. unsigned int rq)
  211. {
  212. return enic->cq[enic_cq_rq(enic, rq)].interrupt_offset;
  213. }
  214. static inline unsigned int enic_msix_wq_intr(struct enic *enic,
  215. unsigned int wq)
  216. {
  217. return enic->cq[enic_cq_wq(enic, wq)].interrupt_offset;
  218. }
  219. static inline unsigned int enic_msix_err_intr(struct enic *enic)
  220. {
  221. return enic->rq_count + enic->wq_count;
  222. }
  223. static inline unsigned int enic_msix_notify_intr(struct enic *enic)
  224. {
  225. return enic->rq_count + enic->wq_count + 1;
  226. }
  227. static inline bool enic_is_err_intr(struct enic *enic, int intr)
  228. {
  229. switch (vnic_dev_get_intr_mode(enic->vdev)) {
  230. case VNIC_DEV_INTR_MODE_INTX:
  231. return intr == enic_legacy_err_intr();
  232. case VNIC_DEV_INTR_MODE_MSIX:
  233. return intr == enic_msix_err_intr(enic);
  234. case VNIC_DEV_INTR_MODE_MSI:
  235. default:
  236. return false;
  237. }
  238. }
  239. static inline bool enic_is_notify_intr(struct enic *enic, int intr)
  240. {
  241. switch (vnic_dev_get_intr_mode(enic->vdev)) {
  242. case VNIC_DEV_INTR_MODE_INTX:
  243. return intr == enic_legacy_notify_intr();
  244. case VNIC_DEV_INTR_MODE_MSIX:
  245. return intr == enic_msix_notify_intr(enic);
  246. case VNIC_DEV_INTR_MODE_MSI:
  247. default:
  248. return false;
  249. }
  250. }
  251. static inline int enic_dma_map_check(struct enic *enic, dma_addr_t dma_addr)
  252. {
  253. if (unlikely(dma_mapping_error(&enic->pdev->dev, dma_addr))) {
  254. net_warn_ratelimited("%s: PCI dma mapping failed!\n",
  255. enic->netdev->name);
  256. enic->gen_stats.dma_map_error++;
  257. return -ENOMEM;
  258. }
  259. return 0;
  260. }
  261. void enic_reset_addr_lists(struct enic *enic);
  262. int enic_sriov_enabled(struct enic *enic);
  263. int enic_is_valid_vf(struct enic *enic, int vf);
  264. int enic_is_dynamic(struct enic *enic);
  265. void enic_set_ethtool_ops(struct net_device *netdev);
  266. int __enic_set_rsskey(struct enic *enic);
  267. #endif /* _ENIC_H_ */