qed_if.h 34 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502
  1. /* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
  2. /* QLogic qed NIC Driver
  3. * Copyright (c) 2015-2017 QLogic Corporation
  4. * Copyright (c) 2019-2020 Marvell International Ltd.
  5. */
  6. #ifndef _QED_IF_H
  7. #define _QED_IF_H
  8. #include <linux/ethtool.h>
  9. #include <linux/types.h>
  10. #include <linux/interrupt.h>
  11. #include <linux/netdevice.h>
  12. #include <linux/pci.h>
  13. #include <linux/skbuff.h>
  14. #include <asm/byteorder.h>
  15. #include <linux/io.h>
  16. #include <linux/compiler.h>
  17. #include <linux/kernel.h>
  18. #include <linux/list.h>
  19. #include <linux/slab.h>
  20. #include <linux/qed/common_hsi.h>
  21. #include <linux/qed/qed_chain.h>
  22. #include <linux/io-64-nonatomic-lo-hi.h>
  23. #include <net/devlink.h>
  24. #define QED_TX_SWS_TIMER_DFLT 500
  25. #define QED_TWO_MSL_TIMER_DFLT 4000
  26. enum dcbx_protocol_type {
  27. DCBX_PROTOCOL_ISCSI,
  28. DCBX_PROTOCOL_FCOE,
  29. DCBX_PROTOCOL_ROCE,
  30. DCBX_PROTOCOL_ROCE_V2,
  31. DCBX_PROTOCOL_ETH,
  32. DCBX_MAX_PROTOCOL_TYPE
  33. };
  34. #define QED_ROCE_PROTOCOL_INDEX (3)
  35. #define QED_LLDP_CHASSIS_ID_STAT_LEN 4
  36. #define QED_LLDP_PORT_ID_STAT_LEN 4
  37. #define QED_DCBX_MAX_APP_PROTOCOL 32
  38. #define QED_MAX_PFC_PRIORITIES 8
  39. #define QED_DCBX_DSCP_SIZE 64
  40. struct qed_dcbx_lldp_remote {
  41. u32 peer_chassis_id[QED_LLDP_CHASSIS_ID_STAT_LEN];
  42. u32 peer_port_id[QED_LLDP_PORT_ID_STAT_LEN];
  43. bool enable_rx;
  44. bool enable_tx;
  45. u32 tx_interval;
  46. u32 max_credit;
  47. };
  48. struct qed_dcbx_lldp_local {
  49. u32 local_chassis_id[QED_LLDP_CHASSIS_ID_STAT_LEN];
  50. u32 local_port_id[QED_LLDP_PORT_ID_STAT_LEN];
  51. };
  52. struct qed_dcbx_app_prio {
  53. u8 roce;
  54. u8 roce_v2;
  55. u8 fcoe;
  56. u8 iscsi;
  57. u8 eth;
  58. };
  59. struct qed_dbcx_pfc_params {
  60. bool willing;
  61. bool enabled;
  62. u8 prio[QED_MAX_PFC_PRIORITIES];
  63. u8 max_tc;
  64. };
  65. enum qed_dcbx_sf_ieee_type {
  66. QED_DCBX_SF_IEEE_ETHTYPE,
  67. QED_DCBX_SF_IEEE_TCP_PORT,
  68. QED_DCBX_SF_IEEE_UDP_PORT,
  69. QED_DCBX_SF_IEEE_TCP_UDP_PORT
  70. };
  71. struct qed_app_entry {
  72. bool ethtype;
  73. enum qed_dcbx_sf_ieee_type sf_ieee;
  74. bool enabled;
  75. u8 prio;
  76. u16 proto_id;
  77. enum dcbx_protocol_type proto_type;
  78. };
  79. struct qed_dcbx_params {
  80. struct qed_app_entry app_entry[QED_DCBX_MAX_APP_PROTOCOL];
  81. u16 num_app_entries;
  82. bool app_willing;
  83. bool app_valid;
  84. bool app_error;
  85. bool ets_willing;
  86. bool ets_enabled;
  87. bool ets_cbs;
  88. bool valid;
  89. u8 ets_pri_tc_tbl[QED_MAX_PFC_PRIORITIES];
  90. u8 ets_tc_bw_tbl[QED_MAX_PFC_PRIORITIES];
  91. u8 ets_tc_tsa_tbl[QED_MAX_PFC_PRIORITIES];
  92. struct qed_dbcx_pfc_params pfc;
  93. u8 max_ets_tc;
  94. };
  95. struct qed_dcbx_admin_params {
  96. struct qed_dcbx_params params;
  97. bool valid;
  98. };
  99. struct qed_dcbx_remote_params {
  100. struct qed_dcbx_params params;
  101. bool valid;
  102. };
  103. struct qed_dcbx_operational_params {
  104. struct qed_dcbx_app_prio app_prio;
  105. struct qed_dcbx_params params;
  106. bool valid;
  107. bool enabled;
  108. bool ieee;
  109. bool cee;
  110. bool local;
  111. u32 err;
  112. };
  113. struct qed_dcbx_get {
  114. struct qed_dcbx_operational_params operational;
  115. struct qed_dcbx_lldp_remote lldp_remote;
  116. struct qed_dcbx_lldp_local lldp_local;
  117. struct qed_dcbx_remote_params remote;
  118. struct qed_dcbx_admin_params local;
  119. };
  120. enum qed_nvm_images {
  121. QED_NVM_IMAGE_ISCSI_CFG,
  122. QED_NVM_IMAGE_FCOE_CFG,
  123. QED_NVM_IMAGE_MDUMP,
  124. QED_NVM_IMAGE_NVM_CFG1,
  125. QED_NVM_IMAGE_DEFAULT_CFG,
  126. QED_NVM_IMAGE_NVM_META,
  127. };
  128. struct qed_link_eee_params {
  129. u32 tx_lpi_timer;
  130. #define QED_EEE_1G_ADV BIT(0)
  131. #define QED_EEE_10G_ADV BIT(1)
  132. /* Capabilities are represented using QED_EEE_*_ADV values */
  133. u8 adv_caps;
  134. u8 lp_adv_caps;
  135. bool enable;
  136. bool tx_lpi_enable;
  137. };
  138. enum qed_led_mode {
  139. QED_LED_MODE_OFF,
  140. QED_LED_MODE_ON,
  141. QED_LED_MODE_RESTORE
  142. };
  143. struct qed_mfw_tlv_eth {
  144. u16 lso_maxoff_size;
  145. bool lso_maxoff_size_set;
  146. u16 lso_minseg_size;
  147. bool lso_minseg_size_set;
  148. u8 prom_mode;
  149. bool prom_mode_set;
  150. u16 tx_descr_size;
  151. bool tx_descr_size_set;
  152. u16 rx_descr_size;
  153. bool rx_descr_size_set;
  154. u16 netq_count;
  155. bool netq_count_set;
  156. u32 tcp4_offloads;
  157. bool tcp4_offloads_set;
  158. u32 tcp6_offloads;
  159. bool tcp6_offloads_set;
  160. u16 tx_descr_qdepth;
  161. bool tx_descr_qdepth_set;
  162. u16 rx_descr_qdepth;
  163. bool rx_descr_qdepth_set;
  164. u8 iov_offload;
  165. #define QED_MFW_TLV_IOV_OFFLOAD_NONE (0)
  166. #define QED_MFW_TLV_IOV_OFFLOAD_MULTIQUEUE (1)
  167. #define QED_MFW_TLV_IOV_OFFLOAD_VEB (2)
  168. #define QED_MFW_TLV_IOV_OFFLOAD_VEPA (3)
  169. bool iov_offload_set;
  170. u8 txqs_empty;
  171. bool txqs_empty_set;
  172. u8 rxqs_empty;
  173. bool rxqs_empty_set;
  174. u8 num_txqs_full;
  175. bool num_txqs_full_set;
  176. u8 num_rxqs_full;
  177. bool num_rxqs_full_set;
  178. };
  179. #define QED_MFW_TLV_TIME_SIZE 14
  180. struct qed_mfw_tlv_time {
  181. bool b_set;
  182. u8 month;
  183. u8 day;
  184. u8 hour;
  185. u8 min;
  186. u16 msec;
  187. u16 usec;
  188. };
  189. struct qed_mfw_tlv_fcoe {
  190. u8 scsi_timeout;
  191. bool scsi_timeout_set;
  192. u32 rt_tov;
  193. bool rt_tov_set;
  194. u32 ra_tov;
  195. bool ra_tov_set;
  196. u32 ed_tov;
  197. bool ed_tov_set;
  198. u32 cr_tov;
  199. bool cr_tov_set;
  200. u8 boot_type;
  201. bool boot_type_set;
  202. u8 npiv_state;
  203. bool npiv_state_set;
  204. u32 num_npiv_ids;
  205. bool num_npiv_ids_set;
  206. u8 switch_name[8];
  207. bool switch_name_set;
  208. u16 switch_portnum;
  209. bool switch_portnum_set;
  210. u8 switch_portid[3];
  211. bool switch_portid_set;
  212. u8 vendor_name[8];
  213. bool vendor_name_set;
  214. u8 switch_model[8];
  215. bool switch_model_set;
  216. u8 switch_fw_version[8];
  217. bool switch_fw_version_set;
  218. u8 qos_pri;
  219. bool qos_pri_set;
  220. u8 port_alias[3];
  221. bool port_alias_set;
  222. u8 port_state;
  223. #define QED_MFW_TLV_PORT_STATE_OFFLINE (0)
  224. #define QED_MFW_TLV_PORT_STATE_LOOP (1)
  225. #define QED_MFW_TLV_PORT_STATE_P2P (2)
  226. #define QED_MFW_TLV_PORT_STATE_FABRIC (3)
  227. bool port_state_set;
  228. u16 fip_tx_descr_size;
  229. bool fip_tx_descr_size_set;
  230. u16 fip_rx_descr_size;
  231. bool fip_rx_descr_size_set;
  232. u16 link_failures;
  233. bool link_failures_set;
  234. u8 fcoe_boot_progress;
  235. bool fcoe_boot_progress_set;
  236. u64 rx_bcast;
  237. bool rx_bcast_set;
  238. u64 tx_bcast;
  239. bool tx_bcast_set;
  240. u16 fcoe_txq_depth;
  241. bool fcoe_txq_depth_set;
  242. u16 fcoe_rxq_depth;
  243. bool fcoe_rxq_depth_set;
  244. u64 fcoe_rx_frames;
  245. bool fcoe_rx_frames_set;
  246. u64 fcoe_rx_bytes;
  247. bool fcoe_rx_bytes_set;
  248. u64 fcoe_tx_frames;
  249. bool fcoe_tx_frames_set;
  250. u64 fcoe_tx_bytes;
  251. bool fcoe_tx_bytes_set;
  252. u16 crc_count;
  253. bool crc_count_set;
  254. u32 crc_err_src_fcid[5];
  255. bool crc_err_src_fcid_set[5];
  256. struct qed_mfw_tlv_time crc_err[5];
  257. u16 losync_err;
  258. bool losync_err_set;
  259. u16 losig_err;
  260. bool losig_err_set;
  261. u16 primtive_err;
  262. bool primtive_err_set;
  263. u16 disparity_err;
  264. bool disparity_err_set;
  265. u16 code_violation_err;
  266. bool code_violation_err_set;
  267. u32 flogi_param[4];
  268. bool flogi_param_set[4];
  269. struct qed_mfw_tlv_time flogi_tstamp;
  270. u32 flogi_acc_param[4];
  271. bool flogi_acc_param_set[4];
  272. struct qed_mfw_tlv_time flogi_acc_tstamp;
  273. u32 flogi_rjt;
  274. bool flogi_rjt_set;
  275. struct qed_mfw_tlv_time flogi_rjt_tstamp;
  276. u32 fdiscs;
  277. bool fdiscs_set;
  278. u8 fdisc_acc;
  279. bool fdisc_acc_set;
  280. u8 fdisc_rjt;
  281. bool fdisc_rjt_set;
  282. u8 plogi;
  283. bool plogi_set;
  284. u8 plogi_acc;
  285. bool plogi_acc_set;
  286. u8 plogi_rjt;
  287. bool plogi_rjt_set;
  288. u32 plogi_dst_fcid[5];
  289. bool plogi_dst_fcid_set[5];
  290. struct qed_mfw_tlv_time plogi_tstamp[5];
  291. u32 plogi_acc_src_fcid[5];
  292. bool plogi_acc_src_fcid_set[5];
  293. struct qed_mfw_tlv_time plogi_acc_tstamp[5];
  294. u8 tx_plogos;
  295. bool tx_plogos_set;
  296. u8 plogo_acc;
  297. bool plogo_acc_set;
  298. u8 plogo_rjt;
  299. bool plogo_rjt_set;
  300. u32 plogo_src_fcid[5];
  301. bool plogo_src_fcid_set[5];
  302. struct qed_mfw_tlv_time plogo_tstamp[5];
  303. u8 rx_logos;
  304. bool rx_logos_set;
  305. u8 tx_accs;
  306. bool tx_accs_set;
  307. u8 tx_prlis;
  308. bool tx_prlis_set;
  309. u8 rx_accs;
  310. bool rx_accs_set;
  311. u8 tx_abts;
  312. bool tx_abts_set;
  313. u8 rx_abts_acc;
  314. bool rx_abts_acc_set;
  315. u8 rx_abts_rjt;
  316. bool rx_abts_rjt_set;
  317. u32 abts_dst_fcid[5];
  318. bool abts_dst_fcid_set[5];
  319. struct qed_mfw_tlv_time abts_tstamp[5];
  320. u8 rx_rscn;
  321. bool rx_rscn_set;
  322. u32 rx_rscn_nport[4];
  323. bool rx_rscn_nport_set[4];
  324. u8 tx_lun_rst;
  325. bool tx_lun_rst_set;
  326. u8 abort_task_sets;
  327. bool abort_task_sets_set;
  328. u8 tx_tprlos;
  329. bool tx_tprlos_set;
  330. u8 tx_nos;
  331. bool tx_nos_set;
  332. u8 rx_nos;
  333. bool rx_nos_set;
  334. u8 ols;
  335. bool ols_set;
  336. u8 lr;
  337. bool lr_set;
  338. u8 lrr;
  339. bool lrr_set;
  340. u8 tx_lip;
  341. bool tx_lip_set;
  342. u8 rx_lip;
  343. bool rx_lip_set;
  344. u8 eofa;
  345. bool eofa_set;
  346. u8 eofni;
  347. bool eofni_set;
  348. u8 scsi_chks;
  349. bool scsi_chks_set;
  350. u8 scsi_cond_met;
  351. bool scsi_cond_met_set;
  352. u8 scsi_busy;
  353. bool scsi_busy_set;
  354. u8 scsi_inter;
  355. bool scsi_inter_set;
  356. u8 scsi_inter_cond_met;
  357. bool scsi_inter_cond_met_set;
  358. u8 scsi_rsv_conflicts;
  359. bool scsi_rsv_conflicts_set;
  360. u8 scsi_tsk_full;
  361. bool scsi_tsk_full_set;
  362. u8 scsi_aca_active;
  363. bool scsi_aca_active_set;
  364. u8 scsi_tsk_abort;
  365. bool scsi_tsk_abort_set;
  366. u32 scsi_rx_chk[5];
  367. bool scsi_rx_chk_set[5];
  368. struct qed_mfw_tlv_time scsi_chk_tstamp[5];
  369. };
  370. struct qed_mfw_tlv_iscsi {
  371. u8 target_llmnr;
  372. bool target_llmnr_set;
  373. u8 header_digest;
  374. bool header_digest_set;
  375. u8 data_digest;
  376. bool data_digest_set;
  377. u8 auth_method;
  378. #define QED_MFW_TLV_AUTH_METHOD_NONE (1)
  379. #define QED_MFW_TLV_AUTH_METHOD_CHAP (2)
  380. #define QED_MFW_TLV_AUTH_METHOD_MUTUAL_CHAP (3)
  381. bool auth_method_set;
  382. u16 boot_taget_portal;
  383. bool boot_taget_portal_set;
  384. u16 frame_size;
  385. bool frame_size_set;
  386. u16 tx_desc_size;
  387. bool tx_desc_size_set;
  388. u16 rx_desc_size;
  389. bool rx_desc_size_set;
  390. u8 boot_progress;
  391. bool boot_progress_set;
  392. u16 tx_desc_qdepth;
  393. bool tx_desc_qdepth_set;
  394. u16 rx_desc_qdepth;
  395. bool rx_desc_qdepth_set;
  396. u64 rx_frames;
  397. bool rx_frames_set;
  398. u64 rx_bytes;
  399. bool rx_bytes_set;
  400. u64 tx_frames;
  401. bool tx_frames_set;
  402. u64 tx_bytes;
  403. bool tx_bytes_set;
  404. };
  405. enum qed_db_rec_width {
  406. DB_REC_WIDTH_32B,
  407. DB_REC_WIDTH_64B,
  408. };
  409. enum qed_db_rec_space {
  410. DB_REC_KERNEL,
  411. DB_REC_USER,
  412. };
  413. #define DIRECT_REG_WR(reg_addr, val) writel((u32)val, \
  414. (void __iomem *)(reg_addr))
  415. #define DIRECT_REG_RD(reg_addr) readl((void __iomem *)(reg_addr))
  416. #define DIRECT_REG_WR64(reg_addr, val) writeq((u64)val, \
  417. (void __iomem *)(reg_addr))
  418. #define QED_COALESCE_MAX 0x1FF
  419. #define QED_DEFAULT_RX_USECS 12
  420. #define QED_DEFAULT_TX_USECS 48
  421. /* forward */
  422. struct qed_dev;
  423. struct qed_eth_pf_params {
  424. /* The following parameters are used during HW-init
  425. * and these parameters need to be passed as arguments
  426. * to update_pf_params routine invoked before slowpath start
  427. */
  428. u16 num_cons;
  429. /* per-VF number of CIDs */
  430. u8 num_vf_cons;
  431. #define ETH_PF_PARAMS_VF_CONS_DEFAULT (32)
  432. /* To enable arfs, previous to HW-init a positive number needs to be
  433. * set [as filters require allocated searcher ILT memory].
  434. * This will set the maximal number of configured steering-filters.
  435. */
  436. u32 num_arfs_filters;
  437. };
  438. struct qed_fcoe_pf_params {
  439. /* The following parameters are used during protocol-init */
  440. u64 glbl_q_params_addr;
  441. u64 bdq_pbl_base_addr[2];
  442. /* The following parameters are used during HW-init
  443. * and these parameters need to be passed as arguments
  444. * to update_pf_params routine invoked before slowpath start
  445. */
  446. u16 num_cons;
  447. u16 num_tasks;
  448. /* The following parameters are used during protocol-init */
  449. u16 sq_num_pbl_pages;
  450. u16 cq_num_entries;
  451. u16 cmdq_num_entries;
  452. u16 rq_buffer_log_size;
  453. u16 mtu;
  454. u16 dummy_icid;
  455. u16 bdq_xoff_threshold[2];
  456. u16 bdq_xon_threshold[2];
  457. u16 rq_buffer_size;
  458. u8 num_cqs; /* num of global CQs */
  459. u8 log_page_size;
  460. u8 gl_rq_pi;
  461. u8 gl_cmd_pi;
  462. u8 debug_mode;
  463. u8 is_target;
  464. u8 bdq_pbl_num_entries[2];
  465. };
  466. /* Most of the parameters below are described in the FW iSCSI / TCP HSI */
  467. struct qed_iscsi_pf_params {
  468. u64 glbl_q_params_addr;
  469. u64 bdq_pbl_base_addr[3];
  470. u16 cq_num_entries;
  471. u16 cmdq_num_entries;
  472. u32 two_msl_timer;
  473. u16 tx_sws_timer;
  474. /* The following parameters are used during HW-init
  475. * and these parameters need to be passed as arguments
  476. * to update_pf_params routine invoked before slowpath start
  477. */
  478. u16 num_cons;
  479. u16 num_tasks;
  480. /* The following parameters are used during protocol-init */
  481. u16 half_way_close_timeout;
  482. u16 bdq_xoff_threshold[3];
  483. u16 bdq_xon_threshold[3];
  484. u16 cmdq_xoff_threshold;
  485. u16 cmdq_xon_threshold;
  486. u16 rq_buffer_size;
  487. u8 num_sq_pages_in_ring;
  488. u8 num_r2tq_pages_in_ring;
  489. u8 num_uhq_pages_in_ring;
  490. u8 num_queues;
  491. u8 log_page_size;
  492. u8 rqe_log_size;
  493. u8 max_fin_rt;
  494. u8 gl_rq_pi;
  495. u8 gl_cmd_pi;
  496. u8 debug_mode;
  497. u8 ll2_ooo_queue_id;
  498. u8 is_target;
  499. u8 is_soc_en;
  500. u8 soc_num_of_blocks_log;
  501. u8 bdq_pbl_num_entries[3];
  502. };
  503. struct qed_nvmetcp_pf_params {
  504. u64 glbl_q_params_addr;
  505. u16 cq_num_entries;
  506. u16 num_cons;
  507. u16 num_tasks;
  508. u8 num_sq_pages_in_ring;
  509. u8 num_r2tq_pages_in_ring;
  510. u8 num_uhq_pages_in_ring;
  511. u8 num_queues;
  512. u8 gl_rq_pi;
  513. u8 gl_cmd_pi;
  514. u8 debug_mode;
  515. u8 ll2_ooo_queue_id;
  516. u16 min_rto;
  517. };
  518. struct qed_rdma_pf_params {
  519. /* Supplied to QED during resource allocation (may affect the ILT and
  520. * the doorbell BAR).
  521. */
  522. u32 min_dpis; /* number of requested DPIs */
  523. u32 num_qps; /* number of requested Queue Pairs */
  524. u32 num_srqs; /* number of requested SRQ */
  525. u8 roce_edpm_mode; /* see QED_ROCE_EDPM_MODE_ENABLE */
  526. u8 gl_pi; /* protocol index */
  527. /* Will allocate rate limiters to be used with QPs */
  528. u8 enable_dcqcn;
  529. };
  530. struct qed_pf_params {
  531. struct qed_eth_pf_params eth_pf_params;
  532. struct qed_fcoe_pf_params fcoe_pf_params;
  533. struct qed_iscsi_pf_params iscsi_pf_params;
  534. struct qed_nvmetcp_pf_params nvmetcp_pf_params;
  535. struct qed_rdma_pf_params rdma_pf_params;
  536. };
  537. enum qed_int_mode {
  538. QED_INT_MODE_INTA,
  539. QED_INT_MODE_MSIX,
  540. QED_INT_MODE_MSI,
  541. QED_INT_MODE_POLL,
  542. };
  543. struct qed_sb_info {
  544. struct status_block *sb_virt;
  545. dma_addr_t sb_phys;
  546. u32 sb_ack; /* Last given ack */
  547. u16 igu_sb_id;
  548. void __iomem *igu_addr;
  549. u8 flags;
  550. #define QED_SB_INFO_INIT 0x1
  551. #define QED_SB_INFO_SETUP 0x2
  552. struct qed_dev *cdev;
  553. };
  554. enum qed_hw_err_type {
  555. QED_HW_ERR_FAN_FAIL,
  556. QED_HW_ERR_MFW_RESP_FAIL,
  557. QED_HW_ERR_HW_ATTN,
  558. QED_HW_ERR_DMAE_FAIL,
  559. QED_HW_ERR_RAMROD_FAIL,
  560. QED_HW_ERR_FW_ASSERT,
  561. QED_HW_ERR_LAST,
  562. };
  563. enum qed_dev_type {
  564. QED_DEV_TYPE_BB,
  565. QED_DEV_TYPE_AH,
  566. };
  567. struct qed_dev_info {
  568. unsigned long pci_mem_start;
  569. unsigned long pci_mem_end;
  570. unsigned int pci_irq;
  571. u8 num_hwfns;
  572. u8 hw_mac[ETH_ALEN];
  573. /* FW version */
  574. u16 fw_major;
  575. u16 fw_minor;
  576. u16 fw_rev;
  577. u16 fw_eng;
  578. /* MFW version */
  579. u32 mfw_rev;
  580. #define QED_MFW_VERSION_0_MASK 0x000000FF
  581. #define QED_MFW_VERSION_0_OFFSET 0
  582. #define QED_MFW_VERSION_1_MASK 0x0000FF00
  583. #define QED_MFW_VERSION_1_OFFSET 8
  584. #define QED_MFW_VERSION_2_MASK 0x00FF0000
  585. #define QED_MFW_VERSION_2_OFFSET 16
  586. #define QED_MFW_VERSION_3_MASK 0xFF000000
  587. #define QED_MFW_VERSION_3_OFFSET 24
  588. u32 flash_size;
  589. bool b_arfs_capable;
  590. bool b_inter_pf_switch;
  591. bool tx_switching;
  592. bool rdma_supported;
  593. u16 mtu;
  594. bool wol_support;
  595. bool smart_an;
  596. bool esl;
  597. /* MBI version */
  598. u32 mbi_version;
  599. #define QED_MBI_VERSION_0_MASK 0x000000FF
  600. #define QED_MBI_VERSION_0_OFFSET 0
  601. #define QED_MBI_VERSION_1_MASK 0x0000FF00
  602. #define QED_MBI_VERSION_1_OFFSET 8
  603. #define QED_MBI_VERSION_2_MASK 0x00FF0000
  604. #define QED_MBI_VERSION_2_OFFSET 16
  605. enum qed_dev_type dev_type;
  606. /* Output parameters for qede */
  607. bool vxlan_enable;
  608. bool gre_enable;
  609. bool geneve_enable;
  610. u8 abs_pf_id;
  611. };
  612. enum qed_sb_type {
  613. QED_SB_TYPE_L2_QUEUE,
  614. QED_SB_TYPE_CNQ,
  615. QED_SB_TYPE_STORAGE,
  616. };
  617. enum qed_protocol {
  618. QED_PROTOCOL_ETH,
  619. QED_PROTOCOL_ISCSI,
  620. QED_PROTOCOL_NVMETCP = QED_PROTOCOL_ISCSI,
  621. QED_PROTOCOL_FCOE,
  622. };
  623. enum qed_fec_mode {
  624. QED_FEC_MODE_NONE = BIT(0),
  625. QED_FEC_MODE_FIRECODE = BIT(1),
  626. QED_FEC_MODE_RS = BIT(2),
  627. QED_FEC_MODE_AUTO = BIT(3),
  628. QED_FEC_MODE_UNSUPPORTED = BIT(4),
  629. };
  630. struct qed_link_params {
  631. bool link_up;
  632. u32 override_flags;
  633. #define QED_LINK_OVERRIDE_SPEED_AUTONEG BIT(0)
  634. #define QED_LINK_OVERRIDE_SPEED_ADV_SPEEDS BIT(1)
  635. #define QED_LINK_OVERRIDE_SPEED_FORCED_SPEED BIT(2)
  636. #define QED_LINK_OVERRIDE_PAUSE_CONFIG BIT(3)
  637. #define QED_LINK_OVERRIDE_LOOPBACK_MODE BIT(4)
  638. #define QED_LINK_OVERRIDE_EEE_CONFIG BIT(5)
  639. #define QED_LINK_OVERRIDE_FEC_CONFIG BIT(6)
  640. bool autoneg;
  641. __ETHTOOL_DECLARE_LINK_MODE_MASK(adv_speeds);
  642. u32 forced_speed;
  643. u32 pause_config;
  644. #define QED_LINK_PAUSE_AUTONEG_ENABLE BIT(0)
  645. #define QED_LINK_PAUSE_RX_ENABLE BIT(1)
  646. #define QED_LINK_PAUSE_TX_ENABLE BIT(2)
  647. u32 loopback_mode;
  648. #define QED_LINK_LOOPBACK_NONE BIT(0)
  649. #define QED_LINK_LOOPBACK_INT_PHY BIT(1)
  650. #define QED_LINK_LOOPBACK_EXT_PHY BIT(2)
  651. #define QED_LINK_LOOPBACK_EXT BIT(3)
  652. #define QED_LINK_LOOPBACK_MAC BIT(4)
  653. #define QED_LINK_LOOPBACK_CNIG_AH_ONLY_0123 BIT(5)
  654. #define QED_LINK_LOOPBACK_CNIG_AH_ONLY_2301 BIT(6)
  655. #define QED_LINK_LOOPBACK_PCS_AH_ONLY BIT(7)
  656. #define QED_LINK_LOOPBACK_REVERSE_MAC_AH_ONLY BIT(8)
  657. #define QED_LINK_LOOPBACK_INT_PHY_FEA_AH_ONLY BIT(9)
  658. struct qed_link_eee_params eee;
  659. u32 fec;
  660. };
  661. struct qed_link_output {
  662. bool link_up;
  663. __ETHTOOL_DECLARE_LINK_MODE_MASK(supported_caps);
  664. __ETHTOOL_DECLARE_LINK_MODE_MASK(advertised_caps);
  665. __ETHTOOL_DECLARE_LINK_MODE_MASK(lp_caps);
  666. u32 speed; /* In Mb/s */
  667. u8 duplex; /* In DUPLEX defs */
  668. u8 port; /* In PORT defs */
  669. bool autoneg;
  670. u32 pause_config;
  671. /* EEE - capability & param */
  672. bool eee_supported;
  673. bool eee_active;
  674. u8 sup_caps;
  675. struct qed_link_eee_params eee;
  676. u32 sup_fec;
  677. u32 active_fec;
  678. };
  679. struct qed_probe_params {
  680. enum qed_protocol protocol;
  681. u32 dp_module;
  682. u8 dp_level;
  683. bool is_vf;
  684. bool recov_in_prog;
  685. };
  686. #define QED_DRV_VER_STR_SIZE 12
  687. struct qed_slowpath_params {
  688. u32 int_mode;
  689. u8 drv_major;
  690. u8 drv_minor;
  691. u8 drv_rev;
  692. u8 drv_eng;
  693. u8 name[QED_DRV_VER_STR_SIZE];
  694. };
  695. #define ILT_PAGE_SIZE_TCFC 0x8000 /* 32KB */
  696. struct qed_int_info {
  697. struct msix_entry *msix;
  698. u8 msix_cnt;
  699. /* This should be updated by the protocol driver */
  700. u8 used_cnt;
  701. };
  702. struct qed_generic_tlvs {
  703. #define QED_TLV_IP_CSUM BIT(0)
  704. #define QED_TLV_LSO BIT(1)
  705. u16 feat_flags;
  706. #define QED_TLV_MAC_COUNT 3
  707. u8 mac[QED_TLV_MAC_COUNT][ETH_ALEN];
  708. };
  709. #define QED_I2C_DEV_ADDR_A0 0xA0
  710. #define QED_I2C_DEV_ADDR_A2 0xA2
  711. #define QED_NVM_SIGNATURE 0x12435687
  712. enum qed_nvm_flash_cmd {
  713. QED_NVM_FLASH_CMD_FILE_DATA = 0x2,
  714. QED_NVM_FLASH_CMD_FILE_START = 0x3,
  715. QED_NVM_FLASH_CMD_NVM_CHANGE = 0x4,
  716. QED_NVM_FLASH_CMD_NVM_CFG_ID = 0x5,
  717. QED_NVM_FLASH_CMD_NVM_MAX,
  718. };
  719. struct qed_devlink {
  720. struct qed_dev *cdev;
  721. struct devlink_health_reporter *fw_reporter;
  722. };
  723. struct qed_sb_info_dbg {
  724. u32 igu_prod;
  725. u32 igu_cons;
  726. u16 pi[PIS_PER_SB];
  727. };
  728. struct qed_common_cb_ops {
  729. void (*arfs_filter_op)(void *dev, void *fltr, u8 fw_rc);
  730. void (*link_update)(void *dev, struct qed_link_output *link);
  731. void (*schedule_recovery_handler)(void *dev);
  732. void (*schedule_hw_err_handler)(void *dev,
  733. enum qed_hw_err_type err_type);
  734. void (*dcbx_aen)(void *dev, struct qed_dcbx_get *get, u32 mib_type);
  735. void (*get_generic_tlv_data)(void *dev, struct qed_generic_tlvs *data);
  736. void (*get_protocol_tlv_data)(void *dev, void *data);
  737. void (*bw_update)(void *dev);
  738. };
  739. struct qed_selftest_ops {
  740. /**
  741. * selftest_interrupt(): Perform interrupt test.
  742. *
  743. * @cdev: Qed dev pointer.
  744. *
  745. * Return: 0 on success, error otherwise.
  746. */
  747. int (*selftest_interrupt)(struct qed_dev *cdev);
  748. /**
  749. * selftest_memory(): Perform memory test.
  750. *
  751. * @cdev: Qed dev pointer.
  752. *
  753. * Return: 0 on success, error otherwise.
  754. */
  755. int (*selftest_memory)(struct qed_dev *cdev);
  756. /**
  757. * selftest_register(): Perform register test.
  758. *
  759. * @cdev: Qed dev pointer.
  760. *
  761. * Return: 0 on success, error otherwise.
  762. */
  763. int (*selftest_register)(struct qed_dev *cdev);
  764. /**
  765. * selftest_clock(): Perform clock test.
  766. *
  767. * @cdev: Qed dev pointer.
  768. *
  769. * Return: 0 on success, error otherwise.
  770. */
  771. int (*selftest_clock)(struct qed_dev *cdev);
  772. /**
  773. * selftest_nvram(): Perform nvram test.
  774. *
  775. * @cdev: Qed dev pointer.
  776. *
  777. * Return: 0 on success, error otherwise.
  778. */
  779. int (*selftest_nvram) (struct qed_dev *cdev);
  780. };
  781. struct qed_common_ops {
  782. struct qed_selftest_ops *selftest;
  783. struct qed_dev* (*probe)(struct pci_dev *dev,
  784. struct qed_probe_params *params);
  785. void (*remove)(struct qed_dev *cdev);
  786. int (*set_power_state)(struct qed_dev *cdev, pci_power_t state);
  787. void (*set_name) (struct qed_dev *cdev, char name[]);
  788. /* Client drivers need to make this call before slowpath_start.
  789. * PF params required for the call before slowpath_start is
  790. * documented within the qed_pf_params structure definition.
  791. */
  792. void (*update_pf_params)(struct qed_dev *cdev,
  793. struct qed_pf_params *params);
  794. int (*slowpath_start)(struct qed_dev *cdev,
  795. struct qed_slowpath_params *params);
  796. int (*slowpath_stop)(struct qed_dev *cdev);
  797. /* Requests to use `cnt' interrupts for fastpath.
  798. * upon success, returns number of interrupts allocated for fastpath.
  799. */
  800. int (*set_fp_int)(struct qed_dev *cdev, u16 cnt);
  801. /* Fills `info' with pointers required for utilizing interrupts */
  802. int (*get_fp_int)(struct qed_dev *cdev, struct qed_int_info *info);
  803. u32 (*sb_init)(struct qed_dev *cdev,
  804. struct qed_sb_info *sb_info,
  805. void *sb_virt_addr,
  806. dma_addr_t sb_phy_addr,
  807. u16 sb_id,
  808. enum qed_sb_type type);
  809. u32 (*sb_release)(struct qed_dev *cdev,
  810. struct qed_sb_info *sb_info,
  811. u16 sb_id,
  812. enum qed_sb_type type);
  813. void (*simd_handler_config)(struct qed_dev *cdev,
  814. void *token,
  815. int index,
  816. void (*handler)(void *));
  817. void (*simd_handler_clean)(struct qed_dev *cdev, int index);
  818. int (*dbg_grc)(struct qed_dev *cdev, void *buffer, u32 *num_dumped_bytes);
  819. int (*dbg_grc_size)(struct qed_dev *cdev);
  820. int (*dbg_all_data)(struct qed_dev *cdev, void *buffer);
  821. int (*dbg_all_data_size)(struct qed_dev *cdev);
  822. int (*report_fatal_error)(struct devlink *devlink,
  823. enum qed_hw_err_type err_type);
  824. /**
  825. * can_link_change(): can the instance change the link or not.
  826. *
  827. * @cdev: Qed dev pointer.
  828. *
  829. * Return: true if link-change is allowed, false otherwise.
  830. */
  831. bool (*can_link_change)(struct qed_dev *cdev);
  832. /**
  833. * set_link(): set links according to params.
  834. *
  835. * @cdev: Qed dev pointer.
  836. * @params: values used to override the default link configuration.
  837. *
  838. * Return: 0 on success, error otherwise.
  839. */
  840. int (*set_link)(struct qed_dev *cdev,
  841. struct qed_link_params *params);
  842. /**
  843. * get_link(): returns the current link state.
  844. *
  845. * @cdev: Qed dev pointer.
  846. * @if_link: structure to be filled with current link configuration.
  847. *
  848. * Return: Void.
  849. */
  850. void (*get_link)(struct qed_dev *cdev,
  851. struct qed_link_output *if_link);
  852. /**
  853. * drain(): drains chip in case Tx completions fail to arrive due to pause.
  854. *
  855. * @cdev: Qed dev pointer.
  856. *
  857. * Return: Int.
  858. */
  859. int (*drain)(struct qed_dev *cdev);
  860. /**
  861. * update_msglvl(): update module debug level.
  862. *
  863. * @cdev: Qed dev pointer.
  864. * @dp_module: Debug module.
  865. * @dp_level: Debug level.
  866. *
  867. * Return: Void.
  868. */
  869. void (*update_msglvl)(struct qed_dev *cdev,
  870. u32 dp_module,
  871. u8 dp_level);
  872. int (*chain_alloc)(struct qed_dev *cdev,
  873. struct qed_chain *chain,
  874. struct qed_chain_init_params *params);
  875. void (*chain_free)(struct qed_dev *cdev,
  876. struct qed_chain *p_chain);
  877. /**
  878. * nvm_flash(): Flash nvm data.
  879. *
  880. * @cdev: Qed dev pointer.
  881. * @name: file containing the data.
  882. *
  883. * Return: 0 on success, error otherwise.
  884. */
  885. int (*nvm_flash)(struct qed_dev *cdev, const char *name);
  886. /**
  887. * nvm_get_image(): reads an entire image from nvram.
  888. *
  889. * @cdev: Qed dev pointer.
  890. * @type: type of the request nvram image.
  891. * @buf: preallocated buffer to fill with the image.
  892. * @len: length of the allocated buffer.
  893. *
  894. * Return: 0 on success, error otherwise.
  895. */
  896. int (*nvm_get_image)(struct qed_dev *cdev,
  897. enum qed_nvm_images type, u8 *buf, u16 len);
  898. /**
  899. * set_coalesce(): Configure Rx coalesce value in usec.
  900. *
  901. * @cdev: Qed dev pointer.
  902. * @rx_coal: Rx coalesce value in usec.
  903. * @tx_coal: Tx coalesce value in usec.
  904. * @handle: Handle.
  905. *
  906. * Return: 0 on success, error otherwise.
  907. */
  908. int (*set_coalesce)(struct qed_dev *cdev,
  909. u16 rx_coal, u16 tx_coal, void *handle);
  910. /**
  911. * set_led() - Configure LED mode.
  912. *
  913. * @cdev: Qed dev pointer.
  914. * @mode: LED mode.
  915. *
  916. * Return: 0 on success, error otherwise.
  917. */
  918. int (*set_led)(struct qed_dev *cdev,
  919. enum qed_led_mode mode);
  920. /**
  921. * attn_clr_enable(): Prevent attentions from being reasserted.
  922. *
  923. * @cdev: Qed dev pointer.
  924. * @clr_enable: Clear enable.
  925. *
  926. * Return: Void.
  927. */
  928. void (*attn_clr_enable)(struct qed_dev *cdev, bool clr_enable);
  929. /**
  930. * db_recovery_add(): add doorbell information to the doorbell
  931. * recovery mechanism.
  932. *
  933. * @cdev: Qed dev pointer.
  934. * @db_addr: Doorbell address.
  935. * @db_data: Dddress of where db_data is stored.
  936. * @db_width: Doorbell is 32b or 64b.
  937. * @db_space: Doorbell recovery addresses are user or kernel space.
  938. *
  939. * Return: Int.
  940. */
  941. int (*db_recovery_add)(struct qed_dev *cdev,
  942. void __iomem *db_addr,
  943. void *db_data,
  944. enum qed_db_rec_width db_width,
  945. enum qed_db_rec_space db_space);
  946. /**
  947. * db_recovery_del(): remove doorbell information from the doorbell
  948. * recovery mechanism. db_data serves as key (db_addr is not unique).
  949. *
  950. * @cdev: Qed dev pointer.
  951. * @db_addr: Doorbell address.
  952. * @db_data: Address where db_data is stored. Serves as key for the
  953. * entry to delete.
  954. *
  955. * Return: Int.
  956. */
  957. int (*db_recovery_del)(struct qed_dev *cdev,
  958. void __iomem *db_addr, void *db_data);
  959. /**
  960. * recovery_process(): Trigger a recovery process.
  961. *
  962. * @cdev: Qed dev pointer.
  963. *
  964. * Return: 0 on success, error otherwise.
  965. */
  966. int (*recovery_process)(struct qed_dev *cdev);
  967. /**
  968. * recovery_prolog(): Execute the prolog operations of a recovery process.
  969. *
  970. * @cdev: Qed dev pointer.
  971. *
  972. * Return: 0 on success, error otherwise.
  973. */
  974. int (*recovery_prolog)(struct qed_dev *cdev);
  975. /**
  976. * update_drv_state(): API to inform the change in the driver state.
  977. *
  978. * @cdev: Qed dev pointer.
  979. * @active: Active
  980. *
  981. * Return: Int.
  982. */
  983. int (*update_drv_state)(struct qed_dev *cdev, bool active);
  984. /**
  985. * update_mac(): API to inform the change in the mac address.
  986. *
  987. * @cdev: Qed dev pointer.
  988. * @mac: MAC.
  989. *
  990. * Return: Int.
  991. */
  992. int (*update_mac)(struct qed_dev *cdev, const u8 *mac);
  993. /**
  994. * update_mtu(): API to inform the change in the mtu.
  995. *
  996. * @cdev: Qed dev pointer.
  997. * @mtu: MTU.
  998. *
  999. * Return: Int.
  1000. */
  1001. int (*update_mtu)(struct qed_dev *cdev, u16 mtu);
  1002. /**
  1003. * update_wol(): Update of changes in the WoL configuration.
  1004. *
  1005. * @cdev: Qed dev pointer.
  1006. * @enabled: true iff WoL should be enabled.
  1007. *
  1008. * Return: Int.
  1009. */
  1010. int (*update_wol) (struct qed_dev *cdev, bool enabled);
  1011. /**
  1012. * read_module_eeprom(): Read EEPROM.
  1013. *
  1014. * @cdev: Qed dev pointer.
  1015. * @buf: buffer.
  1016. * @dev_addr: PHY device memory region.
  1017. * @offset: offset into eeprom contents to be read.
  1018. * @len: buffer length, i.e., max bytes to be read.
  1019. *
  1020. * Return: Int.
  1021. */
  1022. int (*read_module_eeprom)(struct qed_dev *cdev,
  1023. char *buf, u8 dev_addr, u32 offset, u32 len);
  1024. /**
  1025. * get_affin_hwfn_idx(): Get affine HW function.
  1026. *
  1027. * @cdev: Qed dev pointer.
  1028. *
  1029. * Return: u8.
  1030. */
  1031. u8 (*get_affin_hwfn_idx)(struct qed_dev *cdev);
  1032. /**
  1033. * read_nvm_cfg(): Read NVM config attribute value.
  1034. *
  1035. * @cdev: Qed dev pointer.
  1036. * @buf: Buffer.
  1037. * @cmd: NVM CFG command id.
  1038. * @entity_id: Entity id.
  1039. *
  1040. * Return: Int.
  1041. */
  1042. int (*read_nvm_cfg)(struct qed_dev *cdev, u8 **buf, u32 cmd,
  1043. u32 entity_id);
  1044. /**
  1045. * read_nvm_cfg_len(): Read NVM config attribute value.
  1046. *
  1047. * @cdev: Qed dev pointer.
  1048. * @cmd: NVM CFG command id.
  1049. *
  1050. * Return: config id length, 0 on error.
  1051. */
  1052. int (*read_nvm_cfg_len)(struct qed_dev *cdev, u32 cmd);
  1053. /**
  1054. * set_grc_config(): Configure value for grc config id.
  1055. *
  1056. * @cdev: Qed dev pointer.
  1057. * @cfg_id: grc config id
  1058. * @val: grc config value
  1059. *
  1060. * Return: Int.
  1061. */
  1062. int (*set_grc_config)(struct qed_dev *cdev, u32 cfg_id, u32 val);
  1063. struct devlink* (*devlink_register)(struct qed_dev *cdev);
  1064. void (*devlink_unregister)(struct devlink *devlink);
  1065. __printf(2, 3) void (*mfw_report)(struct qed_dev *cdev, char *fmt, ...);
  1066. int (*get_sb_info)(struct qed_dev *cdev, struct qed_sb_info *sb,
  1067. u16 qid, struct qed_sb_info_dbg *sb_dbg);
  1068. int (*get_esl_status)(struct qed_dev *cdev, bool *esl_active);
  1069. };
  1070. #define MASK_FIELD(_name, _value) \
  1071. ((_value) &= (_name ## _MASK))
  1072. #define FIELD_VALUE(_name, _value) \
  1073. ((_value & _name ## _MASK) << _name ## _SHIFT)
  1074. #define SET_FIELD(value, name, flag) \
  1075. do { \
  1076. (value) &= ~(name ## _MASK << name ## _SHIFT); \
  1077. (value) |= (((u64)flag) << (name ## _SHIFT)); \
  1078. } while (0)
  1079. #define GET_FIELD(value, name) \
  1080. (((value) >> (name ## _SHIFT)) & name ## _MASK)
  1081. #define GET_MFW_FIELD(name, field) \
  1082. (((name) & (field ## _MASK)) >> (field ## _OFFSET))
  1083. #define SET_MFW_FIELD(name, field, value) \
  1084. do { \
  1085. (name) &= ~(field ## _MASK); \
  1086. (name) |= (((value) << (field ## _OFFSET)) & (field ## _MASK));\
  1087. } while (0)
  1088. #define DB_ADDR_SHIFT(addr) ((addr) << DB_PWM_ADDR_OFFSET_SHIFT)
  1089. /* Debug print definitions */
  1090. #define DP_ERR(cdev, fmt, ...) \
  1091. do { \
  1092. pr_err("[%s:%d(%s)]" fmt, \
  1093. __func__, __LINE__, \
  1094. DP_NAME(cdev) ? DP_NAME(cdev) : "", \
  1095. ## __VA_ARGS__); \
  1096. } while (0)
  1097. #define DP_NOTICE(cdev, fmt, ...) \
  1098. do { \
  1099. if (unlikely((cdev)->dp_level <= QED_LEVEL_NOTICE)) { \
  1100. pr_notice("[%s:%d(%s)]" fmt, \
  1101. __func__, __LINE__, \
  1102. DP_NAME(cdev) ? DP_NAME(cdev) : "", \
  1103. ## __VA_ARGS__); \
  1104. \
  1105. } \
  1106. } while (0)
  1107. #define DP_INFO(cdev, fmt, ...) \
  1108. do { \
  1109. if (unlikely((cdev)->dp_level <= QED_LEVEL_INFO)) { \
  1110. pr_notice("[%s:%d(%s)]" fmt, \
  1111. __func__, __LINE__, \
  1112. DP_NAME(cdev) ? DP_NAME(cdev) : "", \
  1113. ## __VA_ARGS__); \
  1114. } \
  1115. } while (0)
  1116. #define DP_VERBOSE(cdev, module, fmt, ...) \
  1117. do { \
  1118. if (unlikely(((cdev)->dp_level <= QED_LEVEL_VERBOSE) && \
  1119. ((cdev)->dp_module & module))) { \
  1120. pr_notice("[%s:%d(%s)]" fmt, \
  1121. __func__, __LINE__, \
  1122. DP_NAME(cdev) ? DP_NAME(cdev) : "", \
  1123. ## __VA_ARGS__); \
  1124. } \
  1125. } while (0)
  1126. enum DP_LEVEL {
  1127. QED_LEVEL_VERBOSE = 0x0,
  1128. QED_LEVEL_INFO = 0x1,
  1129. QED_LEVEL_NOTICE = 0x2,
  1130. QED_LEVEL_ERR = 0x3,
  1131. };
  1132. #define QED_LOG_LEVEL_SHIFT (30)
  1133. #define QED_LOG_VERBOSE_MASK (0x3fffffff)
  1134. #define QED_LOG_INFO_MASK (0x40000000)
  1135. #define QED_LOG_NOTICE_MASK (0x80000000)
  1136. enum DP_MODULE {
  1137. QED_MSG_SPQ = 0x10000,
  1138. QED_MSG_STATS = 0x20000,
  1139. QED_MSG_DCB = 0x40000,
  1140. QED_MSG_IOV = 0x80000,
  1141. QED_MSG_SP = 0x100000,
  1142. QED_MSG_STORAGE = 0x200000,
  1143. QED_MSG_CXT = 0x800000,
  1144. QED_MSG_LL2 = 0x1000000,
  1145. QED_MSG_ILT = 0x2000000,
  1146. QED_MSG_RDMA = 0x4000000,
  1147. QED_MSG_DEBUG = 0x8000000,
  1148. /* to be added...up to 0x8000000 */
  1149. };
  1150. enum qed_mf_mode {
  1151. QED_MF_DEFAULT,
  1152. QED_MF_OVLAN,
  1153. QED_MF_NPAR,
  1154. };
  1155. struct qed_eth_stats_common {
  1156. u64 no_buff_discards;
  1157. u64 packet_too_big_discard;
  1158. u64 ttl0_discard;
  1159. u64 rx_ucast_bytes;
  1160. u64 rx_mcast_bytes;
  1161. u64 rx_bcast_bytes;
  1162. u64 rx_ucast_pkts;
  1163. u64 rx_mcast_pkts;
  1164. u64 rx_bcast_pkts;
  1165. u64 mftag_filter_discards;
  1166. u64 mac_filter_discards;
  1167. u64 gft_filter_drop;
  1168. u64 tx_ucast_bytes;
  1169. u64 tx_mcast_bytes;
  1170. u64 tx_bcast_bytes;
  1171. u64 tx_ucast_pkts;
  1172. u64 tx_mcast_pkts;
  1173. u64 tx_bcast_pkts;
  1174. u64 tx_err_drop_pkts;
  1175. u64 tpa_coalesced_pkts;
  1176. u64 tpa_coalesced_events;
  1177. u64 tpa_aborts_num;
  1178. u64 tpa_not_coalesced_pkts;
  1179. u64 tpa_coalesced_bytes;
  1180. /* port */
  1181. u64 rx_64_byte_packets;
  1182. u64 rx_65_to_127_byte_packets;
  1183. u64 rx_128_to_255_byte_packets;
  1184. u64 rx_256_to_511_byte_packets;
  1185. u64 rx_512_to_1023_byte_packets;
  1186. u64 rx_1024_to_1518_byte_packets;
  1187. u64 rx_crc_errors;
  1188. u64 rx_mac_crtl_frames;
  1189. u64 rx_pause_frames;
  1190. u64 rx_pfc_frames;
  1191. u64 rx_align_errors;
  1192. u64 rx_carrier_errors;
  1193. u64 rx_oversize_packets;
  1194. u64 rx_jabbers;
  1195. u64 rx_undersize_packets;
  1196. u64 rx_fragments;
  1197. u64 tx_64_byte_packets;
  1198. u64 tx_65_to_127_byte_packets;
  1199. u64 tx_128_to_255_byte_packets;
  1200. u64 tx_256_to_511_byte_packets;
  1201. u64 tx_512_to_1023_byte_packets;
  1202. u64 tx_1024_to_1518_byte_packets;
  1203. u64 tx_pause_frames;
  1204. u64 tx_pfc_frames;
  1205. u64 brb_truncates;
  1206. u64 brb_discards;
  1207. u64 rx_mac_bytes;
  1208. u64 rx_mac_uc_packets;
  1209. u64 rx_mac_mc_packets;
  1210. u64 rx_mac_bc_packets;
  1211. u64 rx_mac_frames_ok;
  1212. u64 tx_mac_bytes;
  1213. u64 tx_mac_uc_packets;
  1214. u64 tx_mac_mc_packets;
  1215. u64 tx_mac_bc_packets;
  1216. u64 tx_mac_ctrl_frames;
  1217. u64 link_change_count;
  1218. };
  1219. struct qed_eth_stats_bb {
  1220. u64 rx_1519_to_1522_byte_packets;
  1221. u64 rx_1519_to_2047_byte_packets;
  1222. u64 rx_2048_to_4095_byte_packets;
  1223. u64 rx_4096_to_9216_byte_packets;
  1224. u64 rx_9217_to_16383_byte_packets;
  1225. u64 tx_1519_to_2047_byte_packets;
  1226. u64 tx_2048_to_4095_byte_packets;
  1227. u64 tx_4096_to_9216_byte_packets;
  1228. u64 tx_9217_to_16383_byte_packets;
  1229. u64 tx_lpi_entry_count;
  1230. u64 tx_total_collisions;
  1231. };
  1232. struct qed_eth_stats_ah {
  1233. u64 rx_1519_to_max_byte_packets;
  1234. u64 tx_1519_to_max_byte_packets;
  1235. };
  1236. struct qed_eth_stats {
  1237. struct qed_eth_stats_common common;
  1238. union {
  1239. struct qed_eth_stats_bb bb;
  1240. struct qed_eth_stats_ah ah;
  1241. };
  1242. };
  1243. #define QED_SB_IDX 0x0002
  1244. #define RX_PI 0
  1245. #define TX_PI(tc) (RX_PI + 1 + tc)
  1246. struct qed_sb_cnt_info {
  1247. /* Original, current, and free SBs for PF */
  1248. int orig;
  1249. int cnt;
  1250. int free_cnt;
  1251. /* Original, current and free SBS for child VFs */
  1252. int iov_orig;
  1253. int iov_cnt;
  1254. int free_cnt_iov;
  1255. };
  1256. static inline u16 qed_sb_update_sb_idx(struct qed_sb_info *sb_info)
  1257. {
  1258. u32 prod = 0;
  1259. u16 rc = 0;
  1260. prod = le32_to_cpu(sb_info->sb_virt->prod_index) &
  1261. STATUS_BLOCK_PROD_INDEX_MASK;
  1262. if (sb_info->sb_ack != prod) {
  1263. sb_info->sb_ack = prod;
  1264. rc |= QED_SB_IDX;
  1265. }
  1266. /* Let SB update */
  1267. return rc;
  1268. }
  1269. /**
  1270. * qed_sb_ack(): This function creates an update command for interrupts
  1271. * that is written to the IGU.
  1272. *
  1273. * @sb_info: This is the structure allocated and
  1274. * initialized per status block. Assumption is
  1275. * that it was initialized using qed_sb_init
  1276. * @int_cmd: Enable/Disable/Nop
  1277. * @upd_flg: Whether igu consumer should be updated.
  1278. *
  1279. * Return: inline void.
  1280. */
  1281. static inline void qed_sb_ack(struct qed_sb_info *sb_info,
  1282. enum igu_int_cmd int_cmd,
  1283. u8 upd_flg)
  1284. {
  1285. u32 igu_ack;
  1286. igu_ack = ((sb_info->sb_ack << IGU_PROD_CONS_UPDATE_SB_INDEX_SHIFT) |
  1287. (upd_flg << IGU_PROD_CONS_UPDATE_UPDATE_FLAG_SHIFT) |
  1288. (int_cmd << IGU_PROD_CONS_UPDATE_ENABLE_INT_SHIFT) |
  1289. (IGU_SEG_ACCESS_REG <<
  1290. IGU_PROD_CONS_UPDATE_SEGMENT_ACCESS_SHIFT));
  1291. DIRECT_REG_WR(sb_info->igu_addr, igu_ack);
  1292. /* Both segments (interrupts & acks) are written to same place address;
  1293. * Need to guarantee all commands will be received (in-order) by HW.
  1294. */
  1295. barrier();
  1296. }
  1297. static inline void __internal_ram_wr(void *p_hwfn,
  1298. void __iomem *addr,
  1299. int size,
  1300. u32 *data)
  1301. {
  1302. unsigned int i;
  1303. for (i = 0; i < size / sizeof(*data); i++)
  1304. DIRECT_REG_WR(&((u32 __iomem *)addr)[i], data[i]);
  1305. }
  1306. static inline void internal_ram_wr(void __iomem *addr,
  1307. int size,
  1308. u32 *data)
  1309. {
  1310. __internal_ram_wr(NULL, addr, size, data);
  1311. }
  1312. enum qed_rss_caps {
  1313. QED_RSS_IPV4 = 0x1,
  1314. QED_RSS_IPV6 = 0x2,
  1315. QED_RSS_IPV4_TCP = 0x4,
  1316. QED_RSS_IPV6_TCP = 0x8,
  1317. QED_RSS_IPV4_UDP = 0x10,
  1318. QED_RSS_IPV6_UDP = 0x20,
  1319. };
  1320. #define QED_RSS_IND_TABLE_SIZE 128
  1321. #define QED_RSS_KEY_SIZE 10 /* size in 32b chunks */
  1322. #endif