qed.h 24 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007
  1. /* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
  2. /* QLogic qed NIC Driver
  3. * Copyright (c) 2015-2017 QLogic Corporation
  4. * Copyright (c) 2019-2020 Marvell International Ltd.
  5. */
  6. #ifndef _QED_H
  7. #define _QED_H
  8. #include <linux/types.h>
  9. #include <linux/io.h>
  10. #include <linux/delay.h>
  11. #include <linux/firmware.h>
  12. #include <linux/interrupt.h>
  13. #include <linux/list.h>
  14. #include <linux/mutex.h>
  15. #include <linux/pci.h>
  16. #include <linux/slab.h>
  17. #include <linux/string.h>
  18. #include <linux/workqueue.h>
  19. #include <linux/zlib.h>
  20. #include <linux/hashtable.h>
  21. #include <linux/qed/qed_if.h>
  22. #include "qed_debug.h"
  23. #include "qed_hsi.h"
  24. #include "qed_dbg_hsi.h"
  25. #include "qed_mfw_hsi.h"
  26. extern const struct qed_common_ops qed_common_ops_pass;
  27. #define STORM_FW_VERSION \
  28. ((FW_MAJOR_VERSION << 24) | (FW_MINOR_VERSION << 16) | \
  29. (FW_REVISION_VERSION << 8) | FW_ENGINEERING_VERSION)
  30. #define MAX_HWFNS_PER_DEVICE (4)
  31. #define NAME_SIZE 16
  32. #define VER_SIZE 16
  33. #define QED_WFQ_UNIT 100
  34. #define QED_WID_SIZE (1024)
  35. #define QED_MIN_WIDS (4)
  36. #define QED_PF_DEMS_SIZE (4)
  37. #define QED_LLH_DONT_CARE 0
  38. /* cau states */
  39. enum qed_coalescing_mode {
  40. QED_COAL_MODE_DISABLE,
  41. QED_COAL_MODE_ENABLE
  42. };
  43. enum qed_nvm_cmd {
  44. QED_PUT_FILE_BEGIN = DRV_MSG_CODE_NVM_PUT_FILE_BEGIN,
  45. QED_PUT_FILE_DATA = DRV_MSG_CODE_NVM_PUT_FILE_DATA,
  46. QED_NVM_WRITE_NVRAM = DRV_MSG_CODE_NVM_WRITE_NVRAM,
  47. QED_GET_MCP_NVM_RESP = 0xFFFFFF00
  48. };
  49. struct qed_eth_cb_ops;
  50. struct qed_dev_info;
  51. union qed_mcp_protocol_stats;
  52. enum qed_mcp_protocol_type;
  53. enum qed_mfw_tlv_type;
  54. union qed_mfw_tlv_data;
  55. /* helpers */
  56. #define QED_MFW_GET_FIELD(name, field) \
  57. (((name) & (field ## _MASK)) >> (field ## _SHIFT))
  58. #define QED_MFW_SET_FIELD(name, field, value) \
  59. do { \
  60. (name) &= ~(field ## _MASK); \
  61. (name) |= (((value) << (field ## _SHIFT)) & (field ## _MASK));\
  62. } while (0)
  63. static inline u32 qed_db_addr(u32 cid, u32 DEMS)
  64. {
  65. u32 db_addr = FIELD_VALUE(DB_LEGACY_ADDR_DEMS, DEMS) |
  66. (cid * QED_PF_DEMS_SIZE);
  67. return db_addr;
  68. }
  69. static inline u32 qed_db_addr_vf(u32 cid, u32 DEMS)
  70. {
  71. u32 db_addr = FIELD_VALUE(DB_LEGACY_ADDR_DEMS, DEMS) |
  72. FIELD_VALUE(DB_LEGACY_ADDR_ICID, cid);
  73. return db_addr;
  74. }
  75. #define ALIGNED_TYPE_SIZE(type_name, p_hwfn) \
  76. ((sizeof(type_name) + (u32)(1 << ((p_hwfn)->cdev->cache_shift)) - 1) & \
  77. ~((1 << (p_hwfn->cdev->cache_shift)) - 1))
  78. #define for_each_hwfn(cdev, i) for (i = 0; i < (cdev)->num_hwfns; i++)
  79. #define D_TRINE(val, cond1, cond2, true1, true2, def) \
  80. ((val) == (cond1) ? true1 : \
  81. ((val) == (cond2) ? true2 : def))
  82. /* forward */
  83. struct qed_ptt_pool;
  84. struct qed_spq;
  85. struct qed_sb_info;
  86. struct qed_sb_attn_info;
  87. struct qed_cxt_mngr;
  88. struct qed_sb_sp_info;
  89. struct qed_ll2_info;
  90. struct qed_mcp_info;
  91. struct qed_llh_info;
  92. struct qed_rt_data {
  93. u32 *init_val;
  94. bool *b_valid;
  95. };
  96. enum qed_tunn_mode {
  97. QED_MODE_L2GENEVE_TUNN,
  98. QED_MODE_IPGENEVE_TUNN,
  99. QED_MODE_L2GRE_TUNN,
  100. QED_MODE_IPGRE_TUNN,
  101. QED_MODE_VXLAN_TUNN,
  102. };
  103. enum qed_tunn_clss {
  104. QED_TUNN_CLSS_MAC_VLAN,
  105. QED_TUNN_CLSS_MAC_VNI,
  106. QED_TUNN_CLSS_INNER_MAC_VLAN,
  107. QED_TUNN_CLSS_INNER_MAC_VNI,
  108. QED_TUNN_CLSS_MAC_VLAN_DUAL_STAGE,
  109. MAX_QED_TUNN_CLSS,
  110. };
  111. struct qed_tunn_update_type {
  112. bool b_update_mode;
  113. bool b_mode_enabled;
  114. enum qed_tunn_clss tun_cls;
  115. };
  116. struct qed_tunn_update_udp_port {
  117. bool b_update_port;
  118. u16 port;
  119. };
  120. struct qed_tunnel_info {
  121. struct qed_tunn_update_type vxlan;
  122. struct qed_tunn_update_type l2_geneve;
  123. struct qed_tunn_update_type ip_geneve;
  124. struct qed_tunn_update_type l2_gre;
  125. struct qed_tunn_update_type ip_gre;
  126. struct qed_tunn_update_udp_port vxlan_port;
  127. struct qed_tunn_update_udp_port geneve_port;
  128. bool b_update_rx_cls;
  129. bool b_update_tx_cls;
  130. };
  131. struct qed_tunn_start_params {
  132. unsigned long tunn_mode;
  133. u16 vxlan_udp_port;
  134. u16 geneve_udp_port;
  135. u8 update_vxlan_udp_port;
  136. u8 update_geneve_udp_port;
  137. u8 tunn_clss_vxlan;
  138. u8 tunn_clss_l2geneve;
  139. u8 tunn_clss_ipgeneve;
  140. u8 tunn_clss_l2gre;
  141. u8 tunn_clss_ipgre;
  142. };
  143. struct qed_tunn_update_params {
  144. unsigned long tunn_mode_update_mask;
  145. unsigned long tunn_mode;
  146. u16 vxlan_udp_port;
  147. u16 geneve_udp_port;
  148. u8 update_rx_pf_clss;
  149. u8 update_tx_pf_clss;
  150. u8 update_vxlan_udp_port;
  151. u8 update_geneve_udp_port;
  152. u8 tunn_clss_vxlan;
  153. u8 tunn_clss_l2geneve;
  154. u8 tunn_clss_ipgeneve;
  155. u8 tunn_clss_l2gre;
  156. u8 tunn_clss_ipgre;
  157. };
  158. /* The PCI personality is not quite synonymous to protocol ID:
  159. * 1. All personalities need CORE connections
  160. * 2. The Ethernet personality may support also the RoCE/iWARP protocol
  161. */
  162. enum qed_pci_personality {
  163. QED_PCI_ETH,
  164. QED_PCI_FCOE,
  165. QED_PCI_ISCSI,
  166. QED_PCI_NVMETCP,
  167. QED_PCI_ETH_ROCE,
  168. QED_PCI_ETH_IWARP,
  169. QED_PCI_ETH_RDMA,
  170. QED_PCI_DEFAULT, /* default in shmem */
  171. };
  172. /* All VFs are symmetric, all counters are PF + all VFs */
  173. struct qed_qm_iids {
  174. u32 cids;
  175. u32 vf_cids;
  176. u32 tids;
  177. };
  178. /* HW / FW resources, output of features supported below, most information
  179. * is received from MFW.
  180. */
  181. enum qed_resources {
  182. QED_SB,
  183. QED_L2_QUEUE,
  184. QED_VPORT,
  185. QED_RSS_ENG,
  186. QED_PQ,
  187. QED_RL,
  188. QED_MAC,
  189. QED_VLAN,
  190. QED_RDMA_CNQ_RAM,
  191. QED_ILT,
  192. QED_LL2_RAM_QUEUE,
  193. QED_LL2_CTX_QUEUE,
  194. QED_CMDQS_CQS,
  195. QED_RDMA_STATS_QUEUE,
  196. QED_BDQ,
  197. QED_MAX_RESC,
  198. };
  199. enum QED_FEATURE {
  200. QED_PF_L2_QUE,
  201. QED_VF,
  202. QED_RDMA_CNQ,
  203. QED_NVMETCP_CQ,
  204. QED_ISCSI_CQ,
  205. QED_FCOE_CQ,
  206. QED_VF_L2_QUE,
  207. QED_MAX_FEATURES,
  208. };
  209. enum qed_dev_cap {
  210. QED_DEV_CAP_ETH,
  211. QED_DEV_CAP_FCOE,
  212. QED_DEV_CAP_ISCSI,
  213. QED_DEV_CAP_ROCE,
  214. QED_DEV_CAP_IWARP,
  215. };
  216. enum qed_wol_support {
  217. QED_WOL_SUPPORT_NONE,
  218. QED_WOL_SUPPORT_PME,
  219. };
  220. enum qed_db_rec_exec {
  221. DB_REC_DRY_RUN,
  222. DB_REC_REAL_DEAL,
  223. DB_REC_ONCE,
  224. };
  225. struct qed_hw_info {
  226. /* PCI personality */
  227. enum qed_pci_personality personality;
  228. #define QED_IS_RDMA_PERSONALITY(dev) \
  229. ((dev)->hw_info.personality == QED_PCI_ETH_ROCE || \
  230. (dev)->hw_info.personality == QED_PCI_ETH_IWARP || \
  231. (dev)->hw_info.personality == QED_PCI_ETH_RDMA)
  232. #define QED_IS_ROCE_PERSONALITY(dev) \
  233. ((dev)->hw_info.personality == QED_PCI_ETH_ROCE || \
  234. (dev)->hw_info.personality == QED_PCI_ETH_RDMA)
  235. #define QED_IS_IWARP_PERSONALITY(dev) \
  236. ((dev)->hw_info.personality == QED_PCI_ETH_IWARP || \
  237. (dev)->hw_info.personality == QED_PCI_ETH_RDMA)
  238. #define QED_IS_L2_PERSONALITY(dev) \
  239. ((dev)->hw_info.personality == QED_PCI_ETH || \
  240. QED_IS_RDMA_PERSONALITY(dev))
  241. #define QED_IS_FCOE_PERSONALITY(dev) \
  242. ((dev)->hw_info.personality == QED_PCI_FCOE)
  243. #define QED_IS_ISCSI_PERSONALITY(dev) \
  244. ((dev)->hw_info.personality == QED_PCI_ISCSI)
  245. #define QED_IS_NVMETCP_PERSONALITY(dev) \
  246. ((dev)->hw_info.personality == QED_PCI_NVMETCP)
  247. /* Resource Allocation scheme results */
  248. u32 resc_start[QED_MAX_RESC];
  249. u32 resc_num[QED_MAX_RESC];
  250. #define RESC_START(_p_hwfn, resc) ((_p_hwfn)->hw_info.resc_start[resc])
  251. #define RESC_NUM(_p_hwfn, resc) ((_p_hwfn)->hw_info.resc_num[resc])
  252. #define RESC_END(_p_hwfn, resc) (RESC_START(_p_hwfn, resc) + \
  253. RESC_NUM(_p_hwfn, resc))
  254. u32 feat_num[QED_MAX_FEATURES];
  255. #define FEAT_NUM(_p_hwfn, resc) ((_p_hwfn)->hw_info.feat_num[resc])
  256. /* Amount of traffic classes HW supports */
  257. u8 num_hw_tc;
  258. /* Amount of TCs which should be active according to DCBx or upper
  259. * layer driver configuration.
  260. */
  261. u8 num_active_tc;
  262. u8 offload_tc;
  263. bool offload_tc_set;
  264. bool multi_tc_roce_en;
  265. #define IS_QED_MULTI_TC_ROCE(p_hwfn) ((p_hwfn)->hw_info.multi_tc_roce_en)
  266. u32 concrete_fid;
  267. u16 opaque_fid;
  268. u16 ovlan;
  269. u32 part_num[4];
  270. unsigned char hw_mac_addr[ETH_ALEN];
  271. u64 node_wwn;
  272. u64 port_wwn;
  273. u16 num_fcoe_conns;
  274. struct qed_igu_info *p_igu_info;
  275. u32 hw_mode;
  276. unsigned long device_capabilities;
  277. u16 mtu;
  278. enum qed_wol_support b_wol_support;
  279. };
  280. /* maximun size of read/write commands (HW limit) */
  281. #define DMAE_MAX_RW_SIZE 0x2000
  282. struct qed_dmae_info {
  283. /* Mutex for synchronizing access to functions */
  284. struct mutex mutex;
  285. u8 channel;
  286. dma_addr_t completion_word_phys_addr;
  287. /* The memory location where the DMAE writes the completion
  288. * value when an operation is finished on this context.
  289. */
  290. u32 *p_completion_word;
  291. dma_addr_t intermediate_buffer_phys_addr;
  292. /* An intermediate buffer for DMAE operations that use virtual
  293. * addresses - data is DMA'd to/from this buffer and then
  294. * memcpy'd to/from the virtual address
  295. */
  296. u32 *p_intermediate_buffer;
  297. dma_addr_t dmae_cmd_phys_addr;
  298. struct dmae_cmd *p_dmae_cmd;
  299. };
  300. struct qed_wfq_data {
  301. /* when feature is configured for at least 1 vport */
  302. u32 min_speed;
  303. bool configured;
  304. };
  305. struct qed_qm_info {
  306. struct init_qm_pq_params *qm_pq_params;
  307. struct init_qm_vport_params *qm_vport_params;
  308. struct init_qm_port_params *qm_port_params;
  309. u16 start_pq;
  310. u8 start_vport;
  311. u16 pure_lb_pq;
  312. u16 first_ofld_pq;
  313. u16 first_llt_pq;
  314. u16 pure_ack_pq;
  315. u16 ooo_pq;
  316. u16 first_vf_pq;
  317. u16 first_mcos_pq;
  318. u16 first_rl_pq;
  319. u16 num_pqs;
  320. u16 num_vf_pqs;
  321. u8 num_vports;
  322. u8 max_phys_tcs_per_port;
  323. u8 ooo_tc;
  324. bool pf_rl_en;
  325. bool pf_wfq_en;
  326. bool vport_rl_en;
  327. bool vport_wfq_en;
  328. u8 pf_wfq;
  329. u32 pf_rl;
  330. struct qed_wfq_data *wfq_data;
  331. u8 num_pf_rls;
  332. };
  333. #define QED_OVERFLOW_BIT 1
  334. struct qed_db_recovery_info {
  335. struct list_head list;
  336. /* Lock to protect the doorbell recovery mechanism list */
  337. spinlock_t lock;
  338. bool dorq_attn;
  339. u32 db_recovery_counter;
  340. unsigned long overflow;
  341. };
  342. struct storm_stats {
  343. u32 address;
  344. u32 len;
  345. };
  346. struct qed_storm_stats {
  347. struct storm_stats mstats;
  348. struct storm_stats pstats;
  349. struct storm_stats tstats;
  350. struct storm_stats ustats;
  351. };
  352. struct qed_fw_data {
  353. struct fw_ver_info *fw_ver_info;
  354. const u8 *modes_tree_buf;
  355. union init_op *init_ops;
  356. const u32 *arr_data;
  357. const u32 *fw_overlays;
  358. u32 fw_overlays_len;
  359. u32 init_ops_size;
  360. };
  361. enum qed_mf_mode_bit {
  362. /* Supports PF-classification based on tag */
  363. QED_MF_OVLAN_CLSS,
  364. /* Supports PF-classification based on MAC */
  365. QED_MF_LLH_MAC_CLSS,
  366. /* Supports PF-classification based on protocol type */
  367. QED_MF_LLH_PROTO_CLSS,
  368. /* Requires a default PF to be set */
  369. QED_MF_NEED_DEF_PF,
  370. /* Allow LL2 to multicast/broadcast */
  371. QED_MF_LL2_NON_UNICAST,
  372. /* Allow Cross-PF [& child VFs] Tx-switching */
  373. QED_MF_INTER_PF_SWITCH,
  374. /* Unified Fabtic Port support enabled */
  375. QED_MF_UFP_SPECIFIC,
  376. /* Disable Accelerated Receive Flow Steering (aRFS) */
  377. QED_MF_DISABLE_ARFS,
  378. /* Use vlan for steering */
  379. QED_MF_8021Q_TAGGING,
  380. /* Use stag for steering */
  381. QED_MF_8021AD_TAGGING,
  382. /* Allow DSCP to TC mapping */
  383. QED_MF_DSCP_TO_TC_MAP,
  384. /* Do not insert a vlan tag with id 0 */
  385. QED_MF_DONT_ADD_VLAN0_TAG,
  386. };
  387. enum qed_ufp_mode {
  388. QED_UFP_MODE_ETS,
  389. QED_UFP_MODE_VNIC_BW,
  390. QED_UFP_MODE_UNKNOWN
  391. };
  392. enum qed_ufp_pri_type {
  393. QED_UFP_PRI_OS,
  394. QED_UFP_PRI_VNIC,
  395. QED_UFP_PRI_UNKNOWN
  396. };
  397. struct qed_ufp_info {
  398. enum qed_ufp_pri_type pri_type;
  399. enum qed_ufp_mode mode;
  400. u8 tc;
  401. };
  402. enum BAR_ID {
  403. BAR_ID_0, /* used for GRC */
  404. BAR_ID_1 /* Used for doorbells */
  405. };
  406. struct qed_nvm_image_info {
  407. u32 num_images;
  408. struct bist_nvm_image_att *image_att;
  409. bool valid;
  410. };
  411. enum qed_hsi_def_type {
  412. QED_HSI_DEF_MAX_NUM_VFS,
  413. QED_HSI_DEF_MAX_NUM_L2_QUEUES,
  414. QED_HSI_DEF_MAX_NUM_PORTS,
  415. QED_HSI_DEF_MAX_SB_PER_PATH,
  416. QED_HSI_DEF_MAX_NUM_PFS,
  417. QED_HSI_DEF_MAX_NUM_VPORTS,
  418. QED_HSI_DEF_NUM_ETH_RSS_ENGINE,
  419. QED_HSI_DEF_MAX_QM_TX_QUEUES,
  420. QED_HSI_DEF_NUM_PXP_ILT_RECORDS,
  421. QED_HSI_DEF_NUM_RDMA_STATISTIC_COUNTERS,
  422. QED_HSI_DEF_MAX_QM_GLOBAL_RLS,
  423. QED_HSI_DEF_MAX_PBF_CMD_LINES,
  424. QED_HSI_DEF_MAX_BTB_BLOCKS,
  425. QED_NUM_HSI_DEFS
  426. };
  427. struct qed_simd_fp_handler {
  428. void *token;
  429. void (*func)(void *cookie);
  430. };
  431. enum qed_slowpath_wq_flag {
  432. QED_SLOWPATH_MFW_TLV_REQ,
  433. QED_SLOWPATH_PERIODIC_DB_REC,
  434. };
  435. struct qed_hwfn {
  436. struct qed_dev *cdev;
  437. u8 my_id; /* ID inside the PF */
  438. #define IS_LEAD_HWFN(edev) (!((edev)->my_id))
  439. u8 rel_pf_id; /* Relative to engine*/
  440. u8 abs_pf_id;
  441. #define QED_PATH_ID(_p_hwfn) \
  442. (QED_IS_K2((_p_hwfn)->cdev) ? 0 : ((_p_hwfn)->abs_pf_id & 1))
  443. u8 port_id;
  444. bool b_active;
  445. u32 dp_module;
  446. u8 dp_level;
  447. char name[NAME_SIZE];
  448. bool hw_init_done;
  449. u8 num_funcs_on_engine;
  450. u8 enabled_func_idx;
  451. /* BAR access */
  452. void __iomem *regview;
  453. void __iomem *doorbells;
  454. u64 db_phys_addr;
  455. unsigned long db_size;
  456. /* PTT pool */
  457. struct qed_ptt_pool *p_ptt_pool;
  458. /* HW info */
  459. struct qed_hw_info hw_info;
  460. /* rt_array (for init-tool) */
  461. struct qed_rt_data rt_data;
  462. /* SPQ */
  463. struct qed_spq *p_spq;
  464. /* EQ */
  465. struct qed_eq *p_eq;
  466. /* Consolidate Q*/
  467. struct qed_consq *p_consq;
  468. /* Slow-Path definitions */
  469. struct tasklet_struct sp_dpc;
  470. bool b_sp_dpc_enabled;
  471. struct qed_ptt *p_main_ptt;
  472. struct qed_ptt *p_dpc_ptt;
  473. /* PTP will be used only by the leading function.
  474. * Usage of all PTP-apis should be synchronized as result.
  475. */
  476. struct qed_ptt *p_ptp_ptt;
  477. struct qed_sb_sp_info *p_sp_sb;
  478. struct qed_sb_attn_info *p_sb_attn;
  479. /* Protocol related */
  480. bool using_ll2;
  481. struct qed_ll2_info *p_ll2_info;
  482. struct qed_ooo_info *p_ooo_info;
  483. struct qed_rdma_info *p_rdma_info;
  484. struct qed_iscsi_info *p_iscsi_info;
  485. struct qed_nvmetcp_info *p_nvmetcp_info;
  486. struct qed_fcoe_info *p_fcoe_info;
  487. struct qed_pf_params pf_params;
  488. bool b_rdma_enabled_in_prs;
  489. u32 rdma_prs_search_reg;
  490. struct qed_cxt_mngr *p_cxt_mngr;
  491. /* Flag indicating whether interrupts are enabled or not*/
  492. bool b_int_enabled;
  493. bool b_int_requested;
  494. /* True if the driver requests for the link */
  495. bool b_drv_link_init;
  496. struct qed_vf_iov *vf_iov_info;
  497. struct qed_pf_iov *pf_iov_info;
  498. struct qed_mcp_info *mcp_info;
  499. struct qed_dcbx_info *p_dcbx_info;
  500. struct qed_ufp_info ufp_info;
  501. struct qed_dmae_info dmae_info;
  502. /* QM init */
  503. struct qed_qm_info qm_info;
  504. struct qed_storm_stats storm_stats;
  505. /* Buffer for unzipping firmware data */
  506. void *unzip_buf;
  507. struct dbg_tools_data dbg_info;
  508. void *dbg_user_info;
  509. struct virt_mem_desc dbg_arrays[MAX_BIN_DBG_BUFFER_TYPE];
  510. /* PWM region specific data */
  511. u16 wid_count;
  512. u32 dpi_size;
  513. u32 dpi_count;
  514. /* This is used to calculate the doorbell address */
  515. u32 dpi_start_offset;
  516. /* If one of the following is set then EDPM shouldn't be used */
  517. u8 dcbx_no_edpm;
  518. u8 db_bar_no_edpm;
  519. /* L2-related */
  520. struct qed_l2_info *p_l2_info;
  521. /* Mechanism for recovering from doorbell drop */
  522. struct qed_db_recovery_info db_recovery_info;
  523. /* Nvm images number and attributes */
  524. struct qed_nvm_image_info nvm_info;
  525. struct phys_mem_desc *fw_overlay_mem;
  526. struct qed_ptt *p_arfs_ptt;
  527. struct qed_simd_fp_handler simd_proto_handler[64];
  528. #ifdef CONFIG_QED_SRIOV
  529. struct workqueue_struct *iov_wq;
  530. struct delayed_work iov_task;
  531. unsigned long iov_task_flags;
  532. #endif
  533. struct z_stream_s *stream;
  534. bool slowpath_wq_active;
  535. struct workqueue_struct *slowpath_wq;
  536. struct delayed_work slowpath_task;
  537. unsigned long slowpath_task_flags;
  538. u32 periodic_db_rec_count;
  539. };
  540. struct pci_params {
  541. int pm_cap;
  542. unsigned long mem_start;
  543. unsigned long mem_end;
  544. unsigned int irq;
  545. u8 pf_num;
  546. };
  547. struct qed_int_param {
  548. u32 int_mode;
  549. u8 num_vectors;
  550. u8 min_msix_cnt; /* for minimal functionality */
  551. };
  552. struct qed_int_params {
  553. struct qed_int_param in;
  554. struct qed_int_param out;
  555. struct msix_entry *msix_table;
  556. bool fp_initialized;
  557. u8 fp_msix_base;
  558. u8 fp_msix_cnt;
  559. u8 rdma_msix_base;
  560. u8 rdma_msix_cnt;
  561. };
  562. struct qed_dbg_feature {
  563. struct dentry *dentry;
  564. u8 *dump_buf;
  565. u32 buf_size;
  566. u32 dumped_dwords;
  567. };
  568. struct qed_dev {
  569. u32 dp_module;
  570. u8 dp_level;
  571. char name[NAME_SIZE];
  572. enum qed_dev_type type;
  573. /* Translate type/revision combo into the proper conditions */
  574. #define QED_IS_BB(dev) ((dev)->type == QED_DEV_TYPE_BB)
  575. #define QED_IS_BB_B0(dev) (QED_IS_BB(dev) && CHIP_REV_IS_B0(dev))
  576. #define QED_IS_AH(dev) ((dev)->type == QED_DEV_TYPE_AH)
  577. #define QED_IS_K2(dev) QED_IS_AH(dev)
  578. u16 vendor_id;
  579. u16 device_id;
  580. #define QED_DEV_ID_MASK 0xff00
  581. #define QED_DEV_ID_MASK_BB 0x1600
  582. #define QED_DEV_ID_MASK_AH 0x8000
  583. u16 chip_num;
  584. #define CHIP_NUM_MASK 0xffff
  585. #define CHIP_NUM_SHIFT 16
  586. u16 chip_rev;
  587. #define CHIP_REV_MASK 0xf
  588. #define CHIP_REV_SHIFT 12
  589. #define CHIP_REV_IS_B0(_cdev) ((_cdev)->chip_rev == 1)
  590. u16 chip_metal;
  591. #define CHIP_METAL_MASK 0xff
  592. #define CHIP_METAL_SHIFT 4
  593. u16 chip_bond_id;
  594. #define CHIP_BOND_ID_MASK 0xf
  595. #define CHIP_BOND_ID_SHIFT 0
  596. u8 num_engines;
  597. u8 num_ports;
  598. u8 num_ports_in_engine;
  599. u8 num_funcs_in_port;
  600. u8 path_id;
  601. unsigned long mf_bits;
  602. int pcie_width;
  603. int pcie_speed;
  604. /* Add MF related configuration */
  605. u8 mcp_rev;
  606. u8 boot_mode;
  607. /* WoL related configurations */
  608. u8 wol_config;
  609. u8 wol_mac[ETH_ALEN];
  610. u32 int_mode;
  611. enum qed_coalescing_mode int_coalescing_mode;
  612. u16 rx_coalesce_usecs;
  613. u16 tx_coalesce_usecs;
  614. /* Start Bar offset of first hwfn */
  615. void __iomem *regview;
  616. void __iomem *doorbells;
  617. u64 db_phys_addr;
  618. unsigned long db_size;
  619. /* PCI */
  620. u8 cache_shift;
  621. /* Init */
  622. const u32 *iro_arr;
  623. #define IRO ((const struct iro *)p_hwfn->cdev->iro_arr)
  624. /* HW functions */
  625. u8 num_hwfns;
  626. struct qed_hwfn hwfns[MAX_HWFNS_PER_DEVICE];
  627. /* Engine affinity */
  628. u8 l2_affin_hint;
  629. u8 fir_affin;
  630. u8 iwarp_affin;
  631. /* SRIOV */
  632. struct qed_hw_sriov_info *p_iov_info;
  633. #define IS_QED_SRIOV(cdev) (!!(cdev)->p_iov_info)
  634. struct qed_tunnel_info tunnel;
  635. bool b_is_vf;
  636. u32 drv_type;
  637. struct qed_eth_stats *reset_stats;
  638. struct qed_fw_data *fw_data;
  639. u32 mcp_nvm_resp;
  640. /* Recovery */
  641. bool recov_in_prog;
  642. /* Indicates whether should prevent attentions from being reasserted */
  643. bool attn_clr_en;
  644. /* LLH info */
  645. u8 ppfid_bitmap;
  646. struct qed_llh_info *p_llh_info;
  647. /* Linux specific here */
  648. struct qed_dev_info common_dev_info;
  649. struct qede_dev *edev;
  650. struct pci_dev *pdev;
  651. u32 flags;
  652. #define QED_FLAG_STORAGE_STARTED (BIT(0))
  653. int msg_enable;
  654. struct pci_params pci_params;
  655. struct qed_int_params int_params;
  656. u8 protocol;
  657. #define IS_QED_ETH_IF(cdev) ((cdev)->protocol == QED_PROTOCOL_ETH)
  658. #define IS_QED_FCOE_IF(cdev) ((cdev)->protocol == QED_PROTOCOL_FCOE)
  659. /* Callbacks to protocol driver */
  660. union {
  661. struct qed_common_cb_ops *common;
  662. struct qed_eth_cb_ops *eth;
  663. struct qed_fcoe_cb_ops *fcoe;
  664. struct qed_iscsi_cb_ops *iscsi;
  665. struct qed_nvmetcp_cb_ops *nvmetcp;
  666. } protocol_ops;
  667. void *ops_cookie;
  668. #ifdef CONFIG_QED_LL2
  669. struct qed_cb_ll2_info *ll2;
  670. u8 ll2_mac_address[ETH_ALEN];
  671. #endif
  672. struct qed_dbg_feature dbg_features[DBG_FEATURE_NUM];
  673. u8 engine_for_debug;
  674. bool disable_ilt_dump;
  675. bool dbg_bin_dump;
  676. DECLARE_HASHTABLE(connections, 10);
  677. const struct firmware *firmware;
  678. bool print_dbg_data;
  679. u32 rdma_max_sge;
  680. u32 rdma_max_inline;
  681. u32 rdma_max_srq_sge;
  682. u16 tunn_feature_mask;
  683. bool iwarp_cmt;
  684. };
  685. u32 qed_get_hsi_def_val(struct qed_dev *cdev, enum qed_hsi_def_type type);
  686. #define NUM_OF_VFS(dev) \
  687. qed_get_hsi_def_val(dev, QED_HSI_DEF_MAX_NUM_VFS)
  688. #define NUM_OF_L2_QUEUES(dev) \
  689. qed_get_hsi_def_val(dev, QED_HSI_DEF_MAX_NUM_L2_QUEUES)
  690. #define NUM_OF_PORTS(dev) \
  691. qed_get_hsi_def_val(dev, QED_HSI_DEF_MAX_NUM_PORTS)
  692. #define NUM_OF_SBS(dev) \
  693. qed_get_hsi_def_val(dev, QED_HSI_DEF_MAX_SB_PER_PATH)
  694. #define NUM_OF_ENG_PFS(dev) \
  695. qed_get_hsi_def_val(dev, QED_HSI_DEF_MAX_NUM_PFS)
  696. #define NUM_OF_VPORTS(dev) \
  697. qed_get_hsi_def_val(dev, QED_HSI_DEF_MAX_NUM_VPORTS)
  698. #define NUM_OF_RSS_ENGINES(dev) \
  699. qed_get_hsi_def_val(dev, QED_HSI_DEF_NUM_ETH_RSS_ENGINE)
  700. #define NUM_OF_QM_TX_QUEUES(dev) \
  701. qed_get_hsi_def_val(dev, QED_HSI_DEF_MAX_QM_TX_QUEUES)
  702. #define NUM_OF_PXP_ILT_RECORDS(dev) \
  703. qed_get_hsi_def_val(dev, QED_HSI_DEF_NUM_PXP_ILT_RECORDS)
  704. #define NUM_OF_RDMA_STATISTIC_COUNTERS(dev) \
  705. qed_get_hsi_def_val(dev, QED_HSI_DEF_NUM_RDMA_STATISTIC_COUNTERS)
  706. #define NUM_OF_QM_GLOBAL_RLS(dev) \
  707. qed_get_hsi_def_val(dev, QED_HSI_DEF_MAX_QM_GLOBAL_RLS)
  708. #define NUM_OF_PBF_CMD_LINES(dev) \
  709. qed_get_hsi_def_val(dev, QED_HSI_DEF_MAX_PBF_CMD_LINES)
  710. #define NUM_OF_BTB_BLOCKS(dev) \
  711. qed_get_hsi_def_val(dev, QED_HSI_DEF_MAX_BTB_BLOCKS)
  712. /**
  713. * qed_concrete_to_sw_fid(): Get the sw function id from
  714. * the concrete value.
  715. *
  716. * @cdev: Qed dev pointer.
  717. * @concrete_fid: Concrete fid.
  718. *
  719. * Return: inline u8.
  720. */
  721. static inline u8 qed_concrete_to_sw_fid(struct qed_dev *cdev,
  722. u32 concrete_fid)
  723. {
  724. u8 vfid = GET_FIELD(concrete_fid, PXP_CONCRETE_FID_VFID);
  725. u8 pfid = GET_FIELD(concrete_fid, PXP_CONCRETE_FID_PFID);
  726. u8 vf_valid = GET_FIELD(concrete_fid,
  727. PXP_CONCRETE_FID_VFVALID);
  728. u8 sw_fid;
  729. if (vf_valid)
  730. sw_fid = vfid + MAX_NUM_PFS;
  731. else
  732. sw_fid = pfid;
  733. return sw_fid;
  734. }
  735. #define PKT_LB_TC 9
  736. int qed_configure_vport_wfq(struct qed_dev *cdev, u16 vp_id, u32 rate);
  737. void qed_configure_vp_wfq_on_link_change(struct qed_dev *cdev,
  738. struct qed_ptt *p_ptt,
  739. u32 min_pf_rate);
  740. void qed_clean_wfq_db(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
  741. int qed_device_num_engines(struct qed_dev *cdev);
  742. void qed_set_fw_mac_addr(__le16 *fw_msb,
  743. __le16 *fw_mid, __le16 *fw_lsb, u8 *mac);
  744. #define QED_LEADING_HWFN(dev) (&(dev)->hwfns[0])
  745. #define QED_IS_CMT(dev) ((dev)->num_hwfns > 1)
  746. /* Macros for getting the engine-affinitized hwfn (FIR: fcoe,iscsi,roce) */
  747. #define QED_FIR_AFFIN_HWFN(dev) (&(dev)->hwfns[dev->fir_affin])
  748. #define QED_IWARP_AFFIN_HWFN(dev) (&(dev)->hwfns[dev->iwarp_affin])
  749. #define QED_AFFIN_HWFN(dev) \
  750. (QED_IS_IWARP_PERSONALITY(QED_LEADING_HWFN(dev)) ? \
  751. QED_IWARP_AFFIN_HWFN(dev) : QED_FIR_AFFIN_HWFN(dev))
  752. #define QED_AFFIN_HWFN_IDX(dev) (IS_LEAD_HWFN(QED_AFFIN_HWFN(dev)) ? 0 : 1)
  753. /* Flags for indication of required queues */
  754. #define PQ_FLAGS_RLS (BIT(0))
  755. #define PQ_FLAGS_MCOS (BIT(1))
  756. #define PQ_FLAGS_LB (BIT(2))
  757. #define PQ_FLAGS_OOO (BIT(3))
  758. #define PQ_FLAGS_ACK (BIT(4))
  759. #define PQ_FLAGS_OFLD (BIT(5))
  760. #define PQ_FLAGS_VFS (BIT(6))
  761. #define PQ_FLAGS_LLT (BIT(7))
  762. #define PQ_FLAGS_MTC (BIT(8))
  763. /* physical queue index for cm context initialization */
  764. u16 qed_get_cm_pq_idx(struct qed_hwfn *p_hwfn, u32 pq_flags);
  765. u16 qed_get_cm_pq_idx_mcos(struct qed_hwfn *p_hwfn, u8 tc);
  766. u16 qed_get_cm_pq_idx_vf(struct qed_hwfn *p_hwfn, u16 vf);
  767. u16 qed_get_cm_pq_idx_ofld_mtc(struct qed_hwfn *p_hwfn, u8 tc);
  768. u16 qed_get_cm_pq_idx_llt_mtc(struct qed_hwfn *p_hwfn, u8 tc);
  769. /* doorbell recovery mechanism */
  770. void qed_db_recovery_dp(struct qed_hwfn *p_hwfn);
  771. void qed_db_recovery_execute(struct qed_hwfn *p_hwfn);
  772. bool qed_edpm_enabled(struct qed_hwfn *p_hwfn);
  773. #define GET_GTT_REG_ADDR(__base, __offset, __idx) \
  774. ((__base) + __offset ## _GTT_OFFSET((__idx)))
  775. #define GET_GTT_BDQ_REG_ADDR(__base, __offset, __idx, __bdq_idx) \
  776. ((__base) + __offset ## _GTT_OFFSET((__idx), (__bdq_idx)))
  777. /* Other Linux specific common definitions */
  778. #define DP_NAME(cdev) ((cdev)->name)
  779. #define REG_ADDR(cdev, offset) ((void __iomem *)((u8 __iomem *)\
  780. ((cdev)->regview) + \
  781. (offset)))
  782. #define REG_RD(cdev, offset) readl(REG_ADDR(cdev, offset))
  783. #define REG_WR(cdev, offset, val) writel((u32)val, REG_ADDR(cdev, offset))
  784. #define REG_WR16(cdev, offset, val) writew((u16)val, REG_ADDR(cdev, offset))
  785. #define DOORBELL(cdev, db_addr, val) \
  786. writel((u32)val, (void __iomem *)((u8 __iomem *)\
  787. ((cdev)->doorbells) + (db_addr)))
  788. #define MFW_PORT(_p_hwfn) ((_p_hwfn)->abs_pf_id % \
  789. qed_device_num_ports((_p_hwfn)->cdev))
  790. int qed_device_num_ports(struct qed_dev *cdev);
  791. /* Prototypes */
  792. int qed_fill_dev_info(struct qed_dev *cdev,
  793. struct qed_dev_info *dev_info);
  794. void qed_link_update(struct qed_hwfn *hwfn, struct qed_ptt *ptt);
  795. void qed_bw_update(struct qed_hwfn *hwfn, struct qed_ptt *ptt);
  796. u32 qed_unzip_data(struct qed_hwfn *p_hwfn,
  797. u32 input_len, u8 *input_buf,
  798. u32 max_size, u8 *unzip_buf);
  799. int qed_recovery_process(struct qed_dev *cdev);
  800. void qed_schedule_recovery_handler(struct qed_hwfn *p_hwfn);
  801. void qed_hw_error_occurred(struct qed_hwfn *p_hwfn,
  802. enum qed_hw_err_type err_type);
  803. void qed_get_protocol_stats(struct qed_dev *cdev,
  804. enum qed_mcp_protocol_type type,
  805. union qed_mcp_protocol_stats *stats);
  806. int qed_slowpath_irq_req(struct qed_hwfn *hwfn);
  807. void qed_slowpath_irq_sync(struct qed_hwfn *p_hwfn);
  808. int qed_mfw_tlv_req(struct qed_hwfn *hwfn);
  809. int qed_mfw_fill_tlv_data(struct qed_hwfn *hwfn,
  810. enum qed_mfw_tlv_type type,
  811. union qed_mfw_tlv_data *tlv_data);
  812. void qed_hw_info_set_offload_tc(struct qed_hw_info *p_info, u8 tc);
  813. void qed_periodic_db_rec_start(struct qed_hwfn *p_hwfn);
  814. int qed_llh_add_src_tcp_port_filter(struct qed_dev *cdev, u16 src_port);
  815. int qed_llh_add_dst_tcp_port_filter(struct qed_dev *cdev, u16 dest_port);
  816. void qed_llh_remove_src_tcp_port_filter(struct qed_dev *cdev, u16 src_port);
  817. void qed_llh_remove_dst_tcp_port_filter(struct qed_dev *cdev, u16 src_port);
  818. void qed_llh_clear_all_filters(struct qed_dev *cdev);
  819. unsigned long qed_get_epoch_time(void);
  820. #endif /* _QED_H */