lpfc_sli4.h 36 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180
  1. /*******************************************************************
  2. * This file is part of the Emulex Linux Device Driver for *
  3. * Fibre Channel Host Bus Adapters. *
  4. * Copyright (C) 2017-2022 Broadcom. All Rights Reserved. The term *
  5. * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
  6. * Copyright (C) 2009-2016 Emulex. All rights reserved. *
  7. * EMULEX and SLI are trademarks of Emulex. *
  8. * www.broadcom.com *
  9. * *
  10. * This program is free software; you can redistribute it and/or *
  11. * modify it under the terms of version 2 of the GNU General *
  12. * Public License as published by the Free Software Foundation. *
  13. * This program is distributed in the hope that it will be useful. *
  14. * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
  15. * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
  16. * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
  17. * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
  18. * TO BE LEGALLY INVALID. See the GNU General Public License for *
  19. * more details, a copy of which can be found in the file COPYING *
  20. * included with this package. *
  21. *******************************************************************/
  22. #include <linux/irq_poll.h>
  23. #include <linux/cpufreq.h>
  24. #if defined(CONFIG_DEBUG_FS) && !defined(CONFIG_SCSI_LPFC_DEBUG_FS)
  25. #define CONFIG_SCSI_LPFC_DEBUG_FS
  26. #endif
  27. #define LPFC_ACTIVE_MBOX_WAIT_CNT 100
  28. #define LPFC_XRI_EXCH_BUSY_WAIT_TMO 10000
  29. #define LPFC_XRI_EXCH_BUSY_WAIT_T1 10
  30. #define LPFC_XRI_EXCH_BUSY_WAIT_T2 30000
  31. #define LPFC_RPI_LOW_WATER_MARK 10
  32. #define LPFC_UNREG_FCF 1
  33. #define LPFC_SKIP_UNREG_FCF 0
  34. /* Amount of time in seconds for waiting FCF rediscovery to complete */
  35. #define LPFC_FCF_REDISCOVER_WAIT_TMO 2000 /* msec */
  36. /* Number of SGL entries can be posted in a 4KB nonembedded mbox command */
  37. #define LPFC_NEMBED_MBOX_SGL_CNT 254
  38. /* Multi-queue arrangement for FCP EQ/CQ/WQ tuples */
  39. #define LPFC_HBA_HDWQ_MIN 0
  40. #define LPFC_HBA_HDWQ_MAX 256
  41. #define LPFC_HBA_HDWQ_DEF LPFC_HBA_HDWQ_MIN
  42. /* irq_chann range, values */
  43. #define LPFC_IRQ_CHANN_MIN 0
  44. #define LPFC_IRQ_CHANN_MAX 256
  45. #define LPFC_IRQ_CHANN_DEF LPFC_IRQ_CHANN_MIN
  46. /* FCP MQ queue count limiting */
  47. #define LPFC_FCP_MQ_THRESHOLD_MIN 0
  48. #define LPFC_FCP_MQ_THRESHOLD_MAX 256
  49. #define LPFC_FCP_MQ_THRESHOLD_DEF 8
  50. /*
  51. * Provide the default FCF Record attributes used by the driver
  52. * when nonFIP mode is configured and there is no other default
  53. * FCF Record attributes.
  54. */
  55. #define LPFC_FCOE_FCF_DEF_INDEX 0
  56. #define LPFC_FCOE_FCF_GET_FIRST 0xFFFF
  57. #define LPFC_FCOE_FCF_NEXT_NONE 0xFFFF
  58. #define LPFC_FCOE_NULL_VID 0xFFF
  59. #define LPFC_FCOE_IGNORE_VID 0xFFFF
  60. /* First 3 bytes of default FCF MAC is specified by FC_MAP */
  61. #define LPFC_FCOE_FCF_MAC3 0xFF
  62. #define LPFC_FCOE_FCF_MAC4 0xFF
  63. #define LPFC_FCOE_FCF_MAC5 0xFE
  64. #define LPFC_FCOE_FCF_MAP0 0x0E
  65. #define LPFC_FCOE_FCF_MAP1 0xFC
  66. #define LPFC_FCOE_FCF_MAP2 0x00
  67. #define LPFC_FCOE_MAX_RCV_SIZE 0x800
  68. #define LPFC_FCOE_FKA_ADV_PER 0
  69. #define LPFC_FCOE_FIP_PRIORITY 0x80
  70. #define sli4_sid_from_fc_hdr(fc_hdr) \
  71. ((fc_hdr)->fh_s_id[0] << 16 | \
  72. (fc_hdr)->fh_s_id[1] << 8 | \
  73. (fc_hdr)->fh_s_id[2])
  74. #define sli4_did_from_fc_hdr(fc_hdr) \
  75. ((fc_hdr)->fh_d_id[0] << 16 | \
  76. (fc_hdr)->fh_d_id[1] << 8 | \
  77. (fc_hdr)->fh_d_id[2])
  78. #define sli4_fctl_from_fc_hdr(fc_hdr) \
  79. ((fc_hdr)->fh_f_ctl[0] << 16 | \
  80. (fc_hdr)->fh_f_ctl[1] << 8 | \
  81. (fc_hdr)->fh_f_ctl[2])
  82. #define sli4_type_from_fc_hdr(fc_hdr) \
  83. ((fc_hdr)->fh_type)
  84. #define LPFC_FW_RESET_MAXIMUM_WAIT_10MS_CNT 12000
  85. #define INT_FW_UPGRADE 0
  86. #define RUN_FW_UPGRADE 1
  87. enum lpfc_sli4_queue_type {
  88. LPFC_EQ,
  89. LPFC_GCQ,
  90. LPFC_MCQ,
  91. LPFC_WCQ,
  92. LPFC_RCQ,
  93. LPFC_MQ,
  94. LPFC_WQ,
  95. LPFC_HRQ,
  96. LPFC_DRQ
  97. };
  98. /* The queue sub-type defines the functional purpose of the queue */
  99. enum lpfc_sli4_queue_subtype {
  100. LPFC_NONE,
  101. LPFC_MBOX,
  102. LPFC_IO,
  103. LPFC_ELS,
  104. LPFC_NVMET,
  105. LPFC_NVME_LS,
  106. LPFC_USOL
  107. };
  108. /* RQ buffer list */
  109. struct lpfc_rqb {
  110. uint16_t entry_count; /* Current number of RQ slots */
  111. uint16_t buffer_count; /* Current number of buffers posted */
  112. struct list_head rqb_buffer_list; /* buffers assigned to this HBQ */
  113. /* Callback for HBQ buffer allocation */
  114. struct rqb_dmabuf *(*rqb_alloc_buffer)(struct lpfc_hba *);
  115. /* Callback for HBQ buffer free */
  116. void (*rqb_free_buffer)(struct lpfc_hba *,
  117. struct rqb_dmabuf *);
  118. };
  119. enum lpfc_poll_mode {
  120. LPFC_QUEUE_WORK,
  121. LPFC_IRQ_POLL
  122. };
  123. struct lpfc_idle_stat {
  124. u64 prev_idle;
  125. u64 prev_wall;
  126. };
  127. struct lpfc_queue {
  128. struct list_head list;
  129. struct list_head wq_list;
  130. /*
  131. * If interrupts are in effect on _all_ the eq's the footprint
  132. * of polling code is zero (except mode). This memory is chec-
  133. * ked for every io to see if the io needs to be polled and
  134. * while completion to check if the eq's needs to be rearmed.
  135. * Keep in same cacheline as the queue ptr to avoid cpu fetch
  136. * stalls. Using 1B memory will leave us with 7B hole. Fill
  137. * it with other frequently used members.
  138. */
  139. uint16_t last_cpu; /* most recent cpu */
  140. uint16_t hdwq;
  141. uint8_t qe_valid;
  142. uint8_t mode; /* interrupt or polling */
  143. #define LPFC_EQ_INTERRUPT 0
  144. #define LPFC_EQ_POLL 1
  145. struct list_head wqfull_list;
  146. enum lpfc_sli4_queue_type type;
  147. enum lpfc_sli4_queue_subtype subtype;
  148. struct lpfc_hba *phba;
  149. struct list_head child_list;
  150. struct list_head page_list;
  151. struct list_head sgl_list;
  152. struct list_head cpu_list;
  153. uint32_t entry_count; /* Number of entries to support on the queue */
  154. uint32_t entry_size; /* Size of each queue entry. */
  155. uint32_t entry_cnt_per_pg;
  156. uint32_t notify_interval; /* Queue Notification Interval
  157. * For chip->host queues (EQ, CQ, RQ):
  158. * specifies the interval (number of
  159. * entries) where the doorbell is rung to
  160. * notify the chip of entry consumption.
  161. * For host->chip queues (WQ):
  162. * specifies the interval (number of
  163. * entries) where consumption CQE is
  164. * requested to indicate WQ entries
  165. * consumed by the chip.
  166. * Not used on an MQ.
  167. */
  168. #define LPFC_EQ_NOTIFY_INTRVL 16
  169. #define LPFC_CQ_NOTIFY_INTRVL 16
  170. #define LPFC_WQ_NOTIFY_INTRVL 16
  171. #define LPFC_RQ_NOTIFY_INTRVL 16
  172. uint32_t max_proc_limit; /* Queue Processing Limit
  173. * For chip->host queues (EQ, CQ):
  174. * specifies the maximum number of
  175. * entries to be consumed in one
  176. * processing iteration sequence. Queue
  177. * will be rearmed after each iteration.
  178. * Not used on an MQ, RQ or WQ.
  179. */
  180. #define LPFC_EQ_MAX_PROC_LIMIT 256
  181. #define LPFC_CQ_MIN_PROC_LIMIT 64
  182. #define LPFC_CQ_MAX_PROC_LIMIT LPFC_CQE_EXP_COUNT // 4096
  183. #define LPFC_CQ_DEF_MAX_PROC_LIMIT LPFC_CQE_DEF_COUNT // 1024
  184. #define LPFC_CQ_MIN_THRESHOLD_TO_POLL 64
  185. #define LPFC_CQ_MAX_THRESHOLD_TO_POLL LPFC_CQ_DEF_MAX_PROC_LIMIT
  186. #define LPFC_CQ_DEF_THRESHOLD_TO_POLL LPFC_CQ_DEF_MAX_PROC_LIMIT
  187. uint32_t queue_claimed; /* indicates queue is being processed */
  188. uint32_t queue_id; /* Queue ID assigned by the hardware */
  189. uint32_t assoc_qid; /* Queue ID associated with, for CQ/WQ/MQ */
  190. uint32_t host_index; /* The host's index for putting or getting */
  191. uint32_t hba_index; /* The last known hba index for get or put */
  192. uint32_t q_mode;
  193. struct lpfc_sli_ring *pring; /* ptr to io ring associated with q */
  194. struct lpfc_rqb *rqbp; /* ptr to RQ buffers */
  195. uint16_t page_count; /* Number of pages allocated for this queue */
  196. uint16_t page_size; /* size of page allocated for this queue */
  197. #define LPFC_EXPANDED_PAGE_SIZE 16384
  198. #define LPFC_DEFAULT_PAGE_SIZE 4096
  199. uint16_t chann; /* Hardware Queue association WQ/CQ */
  200. /* CPU affinity for EQ */
  201. #define LPFC_FIND_BY_EQ 0
  202. #define LPFC_FIND_BY_HDWQ 1
  203. uint8_t db_format;
  204. #define LPFC_DB_RING_FORMAT 0x01
  205. #define LPFC_DB_LIST_FORMAT 0x02
  206. uint8_t q_flag;
  207. #define HBA_NVMET_WQFULL 0x1 /* We hit WQ Full condition for NVMET */
  208. #define HBA_NVMET_CQ_NOTIFY 0x1 /* LPFC_NVMET_CQ_NOTIFY CQEs this EQE */
  209. #define HBA_EQ_DELAY_CHK 0x2 /* EQ is a candidate for coalescing */
  210. #define LPFC_NVMET_CQ_NOTIFY 4
  211. void __iomem *db_regaddr;
  212. uint16_t dpp_enable;
  213. uint16_t dpp_id;
  214. void __iomem *dpp_regaddr;
  215. /* For q stats */
  216. uint32_t q_cnt_1;
  217. uint32_t q_cnt_2;
  218. uint32_t q_cnt_3;
  219. uint64_t q_cnt_4;
  220. /* defines for EQ stats */
  221. #define EQ_max_eqe q_cnt_1
  222. #define EQ_no_entry q_cnt_2
  223. #define EQ_cqe_cnt q_cnt_3
  224. #define EQ_processed q_cnt_4
  225. /* defines for CQ stats */
  226. #define CQ_mbox q_cnt_1
  227. #define CQ_max_cqe q_cnt_1
  228. #define CQ_release_wqe q_cnt_2
  229. #define CQ_xri_aborted q_cnt_3
  230. #define CQ_wq q_cnt_4
  231. /* defines for WQ stats */
  232. #define WQ_overflow q_cnt_1
  233. #define WQ_posted q_cnt_4
  234. /* defines for RQ stats */
  235. #define RQ_no_posted_buf q_cnt_1
  236. #define RQ_no_buf_found q_cnt_2
  237. #define RQ_buf_posted q_cnt_3
  238. #define RQ_rcv_buf q_cnt_4
  239. struct work_struct irqwork;
  240. struct work_struct spwork;
  241. struct delayed_work sched_irqwork;
  242. struct delayed_work sched_spwork;
  243. uint64_t isr_timestamp;
  244. struct lpfc_queue *assoc_qp;
  245. struct list_head _poll_list;
  246. void **q_pgs; /* array to index entries per page */
  247. #define LPFC_IRQ_POLL_WEIGHT 256
  248. struct irq_poll iop;
  249. enum lpfc_poll_mode poll_mode;
  250. };
  251. struct lpfc_sli4_link {
  252. uint32_t speed;
  253. uint8_t duplex;
  254. uint8_t status;
  255. uint8_t type;
  256. uint8_t number;
  257. uint8_t fault;
  258. uint32_t logical_speed;
  259. uint16_t topology;
  260. };
  261. struct lpfc_fcf_rec {
  262. uint8_t fabric_name[8];
  263. uint8_t switch_name[8];
  264. uint8_t mac_addr[6];
  265. uint16_t fcf_indx;
  266. uint32_t priority;
  267. uint16_t vlan_id;
  268. uint32_t addr_mode;
  269. uint32_t flag;
  270. #define BOOT_ENABLE 0x01
  271. #define RECORD_VALID 0x02
  272. };
  273. struct lpfc_fcf_pri_rec {
  274. uint16_t fcf_index;
  275. #define LPFC_FCF_ON_PRI_LIST 0x0001
  276. #define LPFC_FCF_FLOGI_FAILED 0x0002
  277. uint16_t flag;
  278. uint32_t priority;
  279. };
  280. struct lpfc_fcf_pri {
  281. struct list_head list;
  282. struct lpfc_fcf_pri_rec fcf_rec;
  283. };
  284. /*
  285. * Maximum FCF table index, it is for driver internal book keeping, it
  286. * just needs to be no less than the supported HBA's FCF table size.
  287. */
  288. #define LPFC_SLI4_FCF_TBL_INDX_MAX 32
  289. struct lpfc_fcf {
  290. uint16_t fcfi;
  291. uint32_t fcf_flag;
  292. #define FCF_AVAILABLE 0x01 /* FCF available for discovery */
  293. #define FCF_REGISTERED 0x02 /* FCF registered with FW */
  294. #define FCF_SCAN_DONE 0x04 /* FCF table scan done */
  295. #define FCF_IN_USE 0x08 /* Atleast one discovery completed */
  296. #define FCF_INIT_DISC 0x10 /* Initial FCF discovery */
  297. #define FCF_DEAD_DISC 0x20 /* FCF DEAD fast FCF failover discovery */
  298. #define FCF_ACVL_DISC 0x40 /* All CVL fast FCF failover discovery */
  299. #define FCF_DISCOVERY (FCF_INIT_DISC | FCF_DEAD_DISC | FCF_ACVL_DISC)
  300. #define FCF_REDISC_PEND 0x80 /* FCF rediscovery pending */
  301. #define FCF_REDISC_EVT 0x100 /* FCF rediscovery event to worker thread */
  302. #define FCF_REDISC_FOV 0x200 /* Post FCF rediscovery fast failover */
  303. #define FCF_REDISC_PROG (FCF_REDISC_PEND | FCF_REDISC_EVT)
  304. uint16_t fcf_redisc_attempted;
  305. uint32_t addr_mode;
  306. uint32_t eligible_fcf_cnt;
  307. struct lpfc_fcf_rec current_rec;
  308. struct lpfc_fcf_rec failover_rec;
  309. struct list_head fcf_pri_list;
  310. struct lpfc_fcf_pri fcf_pri[LPFC_SLI4_FCF_TBL_INDX_MAX];
  311. uint32_t current_fcf_scan_pri;
  312. struct timer_list redisc_wait;
  313. unsigned long *fcf_rr_bmask; /* Eligible FCF indexes for RR failover */
  314. };
  315. #define LPFC_REGION23_SIGNATURE "RG23"
  316. #define LPFC_REGION23_VERSION 1
  317. #define LPFC_REGION23_LAST_REC 0xff
  318. #define DRIVER_SPECIFIC_TYPE 0xA2
  319. #define LINUX_DRIVER_ID 0x20
  320. #define PORT_STE_TYPE 0x1
  321. struct lpfc_fip_param_hdr {
  322. uint8_t type;
  323. #define FCOE_PARAM_TYPE 0xA0
  324. uint8_t length;
  325. #define FCOE_PARAM_LENGTH 2
  326. uint8_t parm_version;
  327. #define FIPP_VERSION 0x01
  328. uint8_t parm_flags;
  329. #define lpfc_fip_param_hdr_fipp_mode_SHIFT 6
  330. #define lpfc_fip_param_hdr_fipp_mode_MASK 0x3
  331. #define lpfc_fip_param_hdr_fipp_mode_WORD parm_flags
  332. #define FIPP_MODE_ON 0x1
  333. #define FIPP_MODE_OFF 0x0
  334. #define FIPP_VLAN_VALID 0x1
  335. };
  336. struct lpfc_fcoe_params {
  337. uint8_t fc_map[3];
  338. uint8_t reserved1;
  339. uint16_t vlan_tag;
  340. uint8_t reserved[2];
  341. };
  342. struct lpfc_fcf_conn_hdr {
  343. uint8_t type;
  344. #define FCOE_CONN_TBL_TYPE 0xA1
  345. uint8_t length; /* words */
  346. uint8_t reserved[2];
  347. };
  348. struct lpfc_fcf_conn_rec {
  349. uint16_t flags;
  350. #define FCFCNCT_VALID 0x0001
  351. #define FCFCNCT_BOOT 0x0002
  352. #define FCFCNCT_PRIMARY 0x0004 /* if not set, Secondary */
  353. #define FCFCNCT_FBNM_VALID 0x0008
  354. #define FCFCNCT_SWNM_VALID 0x0010
  355. #define FCFCNCT_VLAN_VALID 0x0020
  356. #define FCFCNCT_AM_VALID 0x0040
  357. #define FCFCNCT_AM_PREFERRED 0x0080 /* if not set, AM Required */
  358. #define FCFCNCT_AM_SPMA 0x0100 /* if not set, FPMA */
  359. uint16_t vlan_tag;
  360. uint8_t fabric_name[8];
  361. uint8_t switch_name[8];
  362. };
  363. struct lpfc_fcf_conn_entry {
  364. struct list_head list;
  365. struct lpfc_fcf_conn_rec conn_rec;
  366. };
  367. /*
  368. * Define the host's bootstrap mailbox. This structure contains
  369. * the member attributes needed to create, use, and destroy the
  370. * bootstrap mailbox region.
  371. *
  372. * The macro definitions for the bmbx data structure are defined
  373. * in lpfc_hw4.h with the register definition.
  374. */
  375. struct lpfc_bmbx {
  376. struct lpfc_dmabuf *dmabuf;
  377. struct dma_address dma_address;
  378. void *avirt;
  379. dma_addr_t aphys;
  380. uint32_t bmbx_size;
  381. };
  382. #define LPFC_EQE_SIZE LPFC_EQE_SIZE_4
  383. #define LPFC_EQE_SIZE_4B 4
  384. #define LPFC_EQE_SIZE_16B 16
  385. #define LPFC_CQE_SIZE 16
  386. #define LPFC_WQE_SIZE 64
  387. #define LPFC_WQE128_SIZE 128
  388. #define LPFC_MQE_SIZE 256
  389. #define LPFC_RQE_SIZE 8
  390. #define LPFC_EQE_DEF_COUNT 1024
  391. #define LPFC_CQE_DEF_COUNT 1024
  392. #define LPFC_CQE_EXP_COUNT 4096
  393. #define LPFC_WQE_DEF_COUNT 256
  394. #define LPFC_WQE_EXP_COUNT 1024
  395. #define LPFC_MQE_DEF_COUNT 16
  396. #define LPFC_RQE_DEF_COUNT 512
  397. #define LPFC_QUEUE_NOARM false
  398. #define LPFC_QUEUE_REARM true
  399. /*
  400. * SLI4 CT field defines
  401. */
  402. #define SLI4_CT_RPI 0
  403. #define SLI4_CT_VPI 1
  404. #define SLI4_CT_VFI 2
  405. #define SLI4_CT_FCFI 3
  406. /*
  407. * SLI4 specific data structures
  408. */
  409. struct lpfc_max_cfg_param {
  410. uint16_t max_xri;
  411. uint16_t xri_base;
  412. uint16_t xri_used;
  413. uint16_t max_rpi;
  414. uint16_t rpi_base;
  415. uint16_t rpi_used;
  416. uint16_t max_vpi;
  417. uint16_t vpi_base;
  418. uint16_t vpi_used;
  419. uint16_t max_vfi;
  420. uint16_t vfi_base;
  421. uint16_t vfi_used;
  422. uint16_t max_fcfi;
  423. uint16_t fcfi_used;
  424. uint16_t max_eq;
  425. uint16_t max_rq;
  426. uint16_t max_cq;
  427. uint16_t max_wq;
  428. };
  429. struct lpfc_hba;
  430. /* SLI4 HBA multi-fcp queue handler struct */
  431. #define LPFC_SLI4_HANDLER_NAME_SZ 16
  432. struct lpfc_hba_eq_hdl {
  433. uint32_t idx;
  434. int irq;
  435. char handler_name[LPFC_SLI4_HANDLER_NAME_SZ];
  436. struct lpfc_hba *phba;
  437. struct lpfc_queue *eq;
  438. struct cpumask aff_mask;
  439. };
  440. #define lpfc_get_eq_hdl(eqidx) (&phba->sli4_hba.hba_eq_hdl[eqidx])
  441. #define lpfc_get_aff_mask(eqidx) (&phba->sli4_hba.hba_eq_hdl[eqidx].aff_mask)
  442. #define lpfc_get_irq(eqidx) (phba->sli4_hba.hba_eq_hdl[eqidx].irq)
  443. /*BB Credit recovery value*/
  444. struct lpfc_bbscn_params {
  445. uint32_t word0;
  446. #define lpfc_bbscn_min_SHIFT 0
  447. #define lpfc_bbscn_min_MASK 0x0000000F
  448. #define lpfc_bbscn_min_WORD word0
  449. #define lpfc_bbscn_max_SHIFT 4
  450. #define lpfc_bbscn_max_MASK 0x0000000F
  451. #define lpfc_bbscn_max_WORD word0
  452. #define lpfc_bbscn_def_SHIFT 8
  453. #define lpfc_bbscn_def_MASK 0x0000000F
  454. #define lpfc_bbscn_def_WORD word0
  455. };
  456. /* Port Capabilities for SLI4 Parameters */
  457. struct lpfc_pc_sli4_params {
  458. uint32_t supported;
  459. uint32_t if_type;
  460. uint32_t sli_rev;
  461. uint32_t sli_family;
  462. uint32_t featurelevel_1;
  463. uint32_t featurelevel_2;
  464. uint32_t proto_types;
  465. #define LPFC_SLI4_PROTO_FCOE 0x0000001
  466. #define LPFC_SLI4_PROTO_FC 0x0000002
  467. #define LPFC_SLI4_PROTO_NIC 0x0000004
  468. #define LPFC_SLI4_PROTO_ISCSI 0x0000008
  469. #define LPFC_SLI4_PROTO_RDMA 0x0000010
  470. uint32_t sge_supp_len;
  471. uint32_t if_page_sz;
  472. uint32_t rq_db_window;
  473. uint32_t loopbk_scope;
  474. uint32_t oas_supported;
  475. uint32_t eq_pages_max;
  476. uint32_t eqe_size;
  477. uint32_t cq_pages_max;
  478. uint32_t cqe_size;
  479. uint32_t mq_pages_max;
  480. uint32_t mqe_size;
  481. uint32_t mq_elem_cnt;
  482. uint32_t wq_pages_max;
  483. uint32_t wqe_size;
  484. uint32_t rq_pages_max;
  485. uint32_t rqe_size;
  486. uint32_t hdr_pages_max;
  487. uint32_t hdr_size;
  488. uint32_t hdr_pp_align;
  489. uint32_t sgl_pages_max;
  490. uint32_t sgl_pp_align;
  491. uint32_t mib_size;
  492. uint16_t mi_ver;
  493. #define LPFC_MIB1_SUPPORT 1
  494. #define LPFC_MIB2_SUPPORT 2
  495. #define LPFC_MIB3_SUPPORT 3
  496. uint16_t mi_value;
  497. #define LPFC_DFLT_MIB_VAL 2
  498. uint8_t mib_bde_cnt;
  499. uint8_t cmf;
  500. uint8_t cqv;
  501. uint8_t mqv;
  502. uint8_t wqv;
  503. uint8_t rqv;
  504. uint8_t eqav;
  505. uint8_t cqav;
  506. uint8_t wqsize;
  507. uint8_t bv1s;
  508. uint8_t pls;
  509. #define LPFC_WQ_SZ64_SUPPORT 1
  510. #define LPFC_WQ_SZ128_SUPPORT 2
  511. uint8_t wqpcnt;
  512. uint8_t nvme;
  513. };
  514. #define LPFC_CQ_4K_PAGE_SZ 0x1
  515. #define LPFC_CQ_16K_PAGE_SZ 0x4
  516. #define LPFC_WQ_4K_PAGE_SZ 0x1
  517. #define LPFC_WQ_16K_PAGE_SZ 0x4
  518. struct lpfc_iov {
  519. uint32_t pf_number;
  520. uint32_t vf_number;
  521. };
  522. struct lpfc_sli4_lnk_info {
  523. uint8_t lnk_dv;
  524. #define LPFC_LNK_DAT_INVAL 0
  525. #define LPFC_LNK_DAT_VAL 1
  526. uint8_t lnk_tp;
  527. #define LPFC_LNK_GE 0x0 /* FCoE */
  528. #define LPFC_LNK_FC 0x1 /* FC */
  529. #define LPFC_LNK_FC_TRUNKED 0x2 /* FC_Trunked */
  530. uint8_t lnk_no;
  531. uint8_t optic_state;
  532. };
  533. #define LPFC_SLI4_HANDLER_CNT (LPFC_HBA_IO_CHAN_MAX+ \
  534. LPFC_FOF_IO_CHAN_NUM)
  535. /* Used for tracking CPU mapping attributes */
  536. struct lpfc_vector_map_info {
  537. uint16_t phys_id;
  538. uint16_t core_id;
  539. uint16_t eq;
  540. uint16_t hdwq;
  541. uint16_t flag;
  542. #define LPFC_CPU_MAP_HYPER 0x1
  543. #define LPFC_CPU_MAP_UNASSIGN 0x2
  544. #define LPFC_CPU_FIRST_IRQ 0x4
  545. };
  546. #define LPFC_VECTOR_MAP_EMPTY 0xffff
  547. #define LPFC_IRQ_EMPTY 0xffffffff
  548. /* Multi-XRI pool */
  549. #define XRI_BATCH 8
  550. struct lpfc_pbl_pool {
  551. struct list_head list;
  552. u32 count;
  553. spinlock_t lock; /* lock for pbl_pool*/
  554. };
  555. struct lpfc_pvt_pool {
  556. u32 low_watermark;
  557. u32 high_watermark;
  558. struct list_head list;
  559. u32 count;
  560. spinlock_t lock; /* lock for pvt_pool */
  561. };
  562. struct lpfc_multixri_pool {
  563. u32 xri_limit;
  564. /* Starting point when searching a pbl_pool with round-robin method */
  565. u32 rrb_next_hwqid;
  566. /* Used by lpfc_adjust_pvt_pool_count.
  567. * io_req_count is incremented by 1 during IO submission. The heartbeat
  568. * handler uses these two variables to determine if pvt_pool is idle or
  569. * busy.
  570. */
  571. u32 prev_io_req_count;
  572. u32 io_req_count;
  573. /* statistics */
  574. u32 pbl_empty_count;
  575. #ifdef LPFC_MXP_STAT
  576. u32 above_limit_count;
  577. u32 below_limit_count;
  578. u32 local_pbl_hit_count;
  579. u32 other_pbl_hit_count;
  580. u32 stat_max_hwm;
  581. #define LPFC_MXP_SNAPSHOT_TAKEN 3 /* snapshot is taken at 3rd heartbeats */
  582. u32 stat_pbl_count;
  583. u32 stat_pvt_count;
  584. u32 stat_busy_count;
  585. u32 stat_snapshot_taken;
  586. #endif
  587. /* TODO: Separate pvt_pool into get and put list */
  588. struct lpfc_pbl_pool pbl_pool; /* Public free XRI pool */
  589. struct lpfc_pvt_pool pvt_pool; /* Private free XRI pool */
  590. };
  591. struct lpfc_fc4_ctrl_stat {
  592. u32 input_requests;
  593. u32 output_requests;
  594. u32 control_requests;
  595. u32 io_cmpls;
  596. };
  597. #ifdef LPFC_HDWQ_LOCK_STAT
  598. struct lpfc_lock_stat {
  599. uint32_t alloc_xri_get;
  600. uint32_t alloc_xri_put;
  601. uint32_t free_xri;
  602. uint32_t wq_access;
  603. uint32_t alloc_pvt_pool;
  604. uint32_t mv_from_pvt_pool;
  605. uint32_t mv_to_pub_pool;
  606. uint32_t mv_to_pvt_pool;
  607. uint32_t free_pub_pool;
  608. uint32_t free_pvt_pool;
  609. };
  610. #endif
  611. struct lpfc_eq_intr_info {
  612. struct list_head list;
  613. uint32_t icnt;
  614. };
  615. /* SLI4 HBA data structure entries */
  616. struct lpfc_sli4_hdw_queue {
  617. /* Pointers to the constructed SLI4 queues */
  618. struct lpfc_queue *hba_eq; /* Event queues for HBA */
  619. struct lpfc_queue *io_cq; /* Fast-path FCP & NVME compl queue */
  620. struct lpfc_queue *io_wq; /* Fast-path FCP & NVME work queue */
  621. uint16_t io_cq_map;
  622. /* Keep track of IO buffers for this hardware queue */
  623. spinlock_t io_buf_list_get_lock; /* Common buf alloc list lock */
  624. struct list_head lpfc_io_buf_list_get;
  625. spinlock_t io_buf_list_put_lock; /* Common buf free list lock */
  626. struct list_head lpfc_io_buf_list_put;
  627. spinlock_t abts_io_buf_list_lock; /* list of aborted IOs */
  628. struct list_head lpfc_abts_io_buf_list;
  629. uint32_t total_io_bufs;
  630. uint32_t get_io_bufs;
  631. uint32_t put_io_bufs;
  632. uint32_t empty_io_bufs;
  633. uint32_t abts_scsi_io_bufs;
  634. uint32_t abts_nvme_io_bufs;
  635. /* Multi-XRI pool per HWQ */
  636. struct lpfc_multixri_pool *p_multixri_pool;
  637. /* FC-4 Stats counters */
  638. struct lpfc_fc4_ctrl_stat nvme_cstat;
  639. struct lpfc_fc4_ctrl_stat scsi_cstat;
  640. #ifdef LPFC_HDWQ_LOCK_STAT
  641. struct lpfc_lock_stat lock_conflict;
  642. #endif
  643. /* Per HDWQ pool resources */
  644. struct list_head sgl_list;
  645. struct list_head cmd_rsp_buf_list;
  646. /* Lock for syncing Per HDWQ pool resources */
  647. spinlock_t hdwq_lock;
  648. };
  649. #ifdef LPFC_HDWQ_LOCK_STAT
  650. /* compile time trylock stats */
  651. #define lpfc_qp_spin_lock_irqsave(lock, flag, qp, lstat) \
  652. { \
  653. int only_once = 1; \
  654. while (spin_trylock_irqsave(lock, flag) == 0) { \
  655. if (only_once) { \
  656. only_once = 0; \
  657. qp->lock_conflict.lstat++; \
  658. } \
  659. } \
  660. }
  661. #define lpfc_qp_spin_lock(lock, qp, lstat) \
  662. { \
  663. int only_once = 1; \
  664. while (spin_trylock(lock) == 0) { \
  665. if (only_once) { \
  666. only_once = 0; \
  667. qp->lock_conflict.lstat++; \
  668. } \
  669. } \
  670. }
  671. #else
  672. #define lpfc_qp_spin_lock_irqsave(lock, flag, qp, lstat) \
  673. spin_lock_irqsave(lock, flag)
  674. #define lpfc_qp_spin_lock(lock, qp, lstat) spin_lock(lock)
  675. #endif
  676. #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
  677. struct lpfc_hdwq_stat {
  678. u32 hdwq_no;
  679. u32 rcv_io;
  680. u32 xmt_io;
  681. u32 cmpl_io;
  682. };
  683. #endif
  684. struct lpfc_sli4_hba {
  685. void __iomem *conf_regs_memmap_p; /* Kernel memory mapped address for
  686. * config space registers
  687. */
  688. void __iomem *ctrl_regs_memmap_p; /* Kernel memory mapped address for
  689. * control registers
  690. */
  691. void __iomem *drbl_regs_memmap_p; /* Kernel memory mapped address for
  692. * doorbell registers
  693. */
  694. void __iomem *dpp_regs_memmap_p; /* Kernel memory mapped address for
  695. * dpp registers
  696. */
  697. union {
  698. struct {
  699. /* IF Type 0, BAR 0 PCI cfg space reg mem map */
  700. void __iomem *UERRLOregaddr;
  701. void __iomem *UERRHIregaddr;
  702. void __iomem *UEMASKLOregaddr;
  703. void __iomem *UEMASKHIregaddr;
  704. } if_type0;
  705. struct {
  706. /* IF Type 2, BAR 0 PCI cfg space reg mem map. */
  707. void __iomem *STATUSregaddr;
  708. void __iomem *CTRLregaddr;
  709. void __iomem *ERR1regaddr;
  710. #define SLIPORT_ERR1_REG_ERR_CODE_1 0x1
  711. #define SLIPORT_ERR1_REG_ERR_CODE_2 0x2
  712. void __iomem *ERR2regaddr;
  713. #define SLIPORT_ERR2_REG_FW_RESTART 0x0
  714. #define SLIPORT_ERR2_REG_FUNC_PROVISON 0x1
  715. #define SLIPORT_ERR2_REG_FORCED_DUMP 0x2
  716. #define SLIPORT_ERR2_REG_FAILURE_EQ 0x3
  717. #define SLIPORT_ERR2_REG_FAILURE_CQ 0x4
  718. #define SLIPORT_ERR2_REG_FAILURE_BUS 0x5
  719. #define SLIPORT_ERR2_REG_FAILURE_RQ 0x6
  720. void __iomem *EQDregaddr;
  721. } if_type2;
  722. } u;
  723. /* IF type 0, BAR1 and if type 2, Bar 0 CSR register memory map */
  724. void __iomem *PSMPHRregaddr;
  725. /* Well-known SLI INTF register memory map. */
  726. void __iomem *SLIINTFregaddr;
  727. /* IF type 0, BAR 1 function CSR register memory map */
  728. void __iomem *ISRregaddr; /* HST_ISR register */
  729. void __iomem *IMRregaddr; /* HST_IMR register */
  730. void __iomem *ISCRregaddr; /* HST_ISCR register */
  731. /* IF type 0, BAR 0 and if type 2, BAR 0 doorbell register memory map */
  732. void __iomem *RQDBregaddr; /* RQ_DOORBELL register */
  733. void __iomem *WQDBregaddr; /* WQ_DOORBELL register */
  734. void __iomem *CQDBregaddr; /* CQ_DOORBELL register */
  735. void __iomem *EQDBregaddr; /* EQ_DOORBELL register */
  736. void __iomem *MQDBregaddr; /* MQ_DOORBELL register */
  737. void __iomem *BMBXregaddr; /* BootStrap MBX register */
  738. uint32_t ue_mask_lo;
  739. uint32_t ue_mask_hi;
  740. uint32_t ue_to_sr;
  741. uint32_t ue_to_rp;
  742. struct lpfc_register sli_intf;
  743. struct lpfc_pc_sli4_params pc_sli4_params;
  744. struct lpfc_bbscn_params bbscn_params;
  745. struct lpfc_hba_eq_hdl *hba_eq_hdl; /* HBA per-WQ handle */
  746. void (*sli4_eq_clr_intr)(struct lpfc_queue *q);
  747. void (*sli4_write_eq_db)(struct lpfc_hba *phba, struct lpfc_queue *eq,
  748. uint32_t count, bool arm);
  749. void (*sli4_write_cq_db)(struct lpfc_hba *phba, struct lpfc_queue *cq,
  750. uint32_t count, bool arm);
  751. /* Pointers to the constructed SLI4 queues */
  752. struct lpfc_sli4_hdw_queue *hdwq;
  753. struct list_head lpfc_wq_list;
  754. /* Pointers to the constructed SLI4 queues for NVMET */
  755. struct lpfc_queue **nvmet_cqset; /* Fast-path NVMET CQ Set queues */
  756. struct lpfc_queue **nvmet_mrq_hdr; /* Fast-path NVMET hdr MRQs */
  757. struct lpfc_queue **nvmet_mrq_data; /* Fast-path NVMET data MRQs */
  758. struct lpfc_queue *mbx_cq; /* Slow-path mailbox complete queue */
  759. struct lpfc_queue *els_cq; /* Slow-path ELS response complete queue */
  760. struct lpfc_queue *nvmels_cq; /* NVME LS complete queue */
  761. struct lpfc_queue *mbx_wq; /* Slow-path MBOX work queue */
  762. struct lpfc_queue *els_wq; /* Slow-path ELS work queue */
  763. struct lpfc_queue *nvmels_wq; /* NVME LS work queue */
  764. struct lpfc_queue *hdr_rq; /* Slow-path Header Receive queue */
  765. struct lpfc_queue *dat_rq; /* Slow-path Data Receive queue */
  766. struct lpfc_name wwnn;
  767. struct lpfc_name wwpn;
  768. uint32_t fw_func_mode; /* FW function protocol mode */
  769. uint32_t ulp0_mode; /* ULP0 protocol mode */
  770. uint32_t ulp1_mode; /* ULP1 protocol mode */
  771. /* Optimized Access Storage specific queues/structures */
  772. uint64_t oas_next_lun;
  773. uint8_t oas_next_tgt_wwpn[8];
  774. uint8_t oas_next_vpt_wwpn[8];
  775. /* Setup information for various queue parameters */
  776. int eq_esize;
  777. int eq_ecount;
  778. int cq_esize;
  779. int cq_ecount;
  780. int wq_esize;
  781. int wq_ecount;
  782. int mq_esize;
  783. int mq_ecount;
  784. int rq_esize;
  785. int rq_ecount;
  786. #define LPFC_SP_EQ_MAX_INTR_SEC 10000
  787. #define LPFC_FP_EQ_MAX_INTR_SEC 10000
  788. uint32_t intr_enable;
  789. struct lpfc_bmbx bmbx;
  790. struct lpfc_max_cfg_param max_cfg_param;
  791. uint16_t extents_in_use; /* must allocate resource extents. */
  792. uint16_t rpi_hdrs_in_use; /* must post rpi hdrs if set. */
  793. uint16_t next_xri; /* last_xri - max_cfg_param.xri_base = used */
  794. uint16_t next_rpi;
  795. uint16_t io_xri_max;
  796. uint16_t io_xri_cnt;
  797. uint16_t io_xri_start;
  798. uint16_t els_xri_cnt;
  799. uint16_t nvmet_xri_cnt;
  800. uint16_t nvmet_io_wait_cnt;
  801. uint16_t nvmet_io_wait_total;
  802. uint16_t cq_max;
  803. struct lpfc_queue **cq_lookup;
  804. struct list_head lpfc_els_sgl_list;
  805. struct list_head lpfc_abts_els_sgl_list;
  806. spinlock_t abts_io_buf_list_lock; /* list of aborted SCSI IOs */
  807. struct list_head lpfc_abts_io_buf_list;
  808. struct list_head lpfc_nvmet_sgl_list;
  809. spinlock_t abts_nvmet_buf_list_lock; /* list of aborted NVMET IOs */
  810. struct list_head lpfc_abts_nvmet_ctx_list;
  811. spinlock_t t_active_list_lock; /* list of active NVMET IOs */
  812. struct list_head t_active_ctx_list;
  813. struct list_head lpfc_nvmet_io_wait_list;
  814. struct lpfc_nvmet_ctx_info *nvmet_ctx_info;
  815. struct lpfc_sglq **lpfc_sglq_active_list;
  816. struct list_head lpfc_rpi_hdr_list;
  817. unsigned long *rpi_bmask;
  818. uint16_t *rpi_ids;
  819. uint16_t rpi_count;
  820. struct list_head lpfc_rpi_blk_list;
  821. unsigned long *xri_bmask;
  822. uint16_t *xri_ids;
  823. struct list_head lpfc_xri_blk_list;
  824. unsigned long *vfi_bmask;
  825. uint16_t *vfi_ids;
  826. uint16_t vfi_count;
  827. struct list_head lpfc_vfi_blk_list;
  828. struct lpfc_sli4_flags sli4_flags;
  829. struct list_head sp_queue_event;
  830. struct list_head sp_cqe_event_pool;
  831. struct list_head sp_asynce_work_queue;
  832. spinlock_t asynce_list_lock; /* protect sp_asynce_work_queue list */
  833. struct list_head sp_els_xri_aborted_work_queue;
  834. spinlock_t els_xri_abrt_list_lock; /* protect els_xri_aborted list */
  835. struct list_head sp_unsol_work_queue;
  836. struct lpfc_sli4_link link_state;
  837. struct lpfc_sli4_lnk_info lnk_info;
  838. uint32_t pport_name_sta;
  839. #define LPFC_SLI4_PPNAME_NON 0
  840. #define LPFC_SLI4_PPNAME_GET 1
  841. struct lpfc_iov iov;
  842. spinlock_t sgl_list_lock; /* list of aborted els IOs */
  843. spinlock_t nvmet_io_wait_lock; /* IOs waiting for ctx resources */
  844. uint32_t physical_port;
  845. /* CPU to vector mapping information */
  846. struct lpfc_vector_map_info *cpu_map;
  847. uint16_t num_possible_cpu;
  848. uint16_t num_present_cpu;
  849. struct cpumask irq_aff_mask;
  850. uint16_t curr_disp_cpu;
  851. struct lpfc_eq_intr_info __percpu *eq_info;
  852. #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
  853. struct lpfc_hdwq_stat __percpu *c_stat;
  854. #endif
  855. struct lpfc_idle_stat *idle_stat;
  856. uint32_t conf_trunk;
  857. #define lpfc_conf_trunk_port0_WORD conf_trunk
  858. #define lpfc_conf_trunk_port0_SHIFT 0
  859. #define lpfc_conf_trunk_port0_MASK 0x1
  860. #define lpfc_conf_trunk_port1_WORD conf_trunk
  861. #define lpfc_conf_trunk_port1_SHIFT 1
  862. #define lpfc_conf_trunk_port1_MASK 0x1
  863. #define lpfc_conf_trunk_port2_WORD conf_trunk
  864. #define lpfc_conf_trunk_port2_SHIFT 2
  865. #define lpfc_conf_trunk_port2_MASK 0x1
  866. #define lpfc_conf_trunk_port3_WORD conf_trunk
  867. #define lpfc_conf_trunk_port3_SHIFT 3
  868. #define lpfc_conf_trunk_port3_MASK 0x1
  869. #define lpfc_conf_trunk_port0_nd_WORD conf_trunk
  870. #define lpfc_conf_trunk_port0_nd_SHIFT 4
  871. #define lpfc_conf_trunk_port0_nd_MASK 0x1
  872. #define lpfc_conf_trunk_port1_nd_WORD conf_trunk
  873. #define lpfc_conf_trunk_port1_nd_SHIFT 5
  874. #define lpfc_conf_trunk_port1_nd_MASK 0x1
  875. #define lpfc_conf_trunk_port2_nd_WORD conf_trunk
  876. #define lpfc_conf_trunk_port2_nd_SHIFT 6
  877. #define lpfc_conf_trunk_port2_nd_MASK 0x1
  878. #define lpfc_conf_trunk_port3_nd_WORD conf_trunk
  879. #define lpfc_conf_trunk_port3_nd_SHIFT 7
  880. #define lpfc_conf_trunk_port3_nd_MASK 0x1
  881. uint8_t flash_id;
  882. uint8_t asic_rev;
  883. uint16_t fawwpn_flag; /* FA-WWPN support state */
  884. #define LPFC_FAWWPN_CONFIG 0x1 /* FA-PWWN is configured */
  885. #define LPFC_FAWWPN_FABRIC 0x2 /* FA-PWWN success with Fabric */
  886. };
  887. enum lpfc_sge_type {
  888. GEN_BUFF_TYPE,
  889. SCSI_BUFF_TYPE,
  890. NVMET_BUFF_TYPE
  891. };
  892. enum lpfc_sgl_state {
  893. SGL_FREED,
  894. SGL_ALLOCATED,
  895. SGL_XRI_ABORTED
  896. };
  897. struct lpfc_sglq {
  898. /* lpfc_sglqs are used in double linked lists */
  899. struct list_head list;
  900. struct list_head clist;
  901. enum lpfc_sge_type buff_type; /* is this a scsi sgl */
  902. enum lpfc_sgl_state state;
  903. struct lpfc_nodelist *ndlp; /* ndlp associated with IO */
  904. uint16_t iotag; /* pre-assigned IO tag */
  905. uint16_t sli4_lxritag; /* logical pre-assigned xri. */
  906. uint16_t sli4_xritag; /* pre-assigned XRI, (OXID) tag. */
  907. struct sli4_sge *sgl; /* pre-assigned SGL */
  908. void *virt; /* virtual address. */
  909. dma_addr_t phys; /* physical address */
  910. };
  911. struct lpfc_rpi_hdr {
  912. struct list_head list;
  913. uint32_t len;
  914. struct lpfc_dmabuf *dmabuf;
  915. uint32_t page_count;
  916. uint32_t start_rpi;
  917. uint16_t next_rpi;
  918. };
  919. struct lpfc_rsrc_blks {
  920. struct list_head list;
  921. uint16_t rsrc_start;
  922. uint16_t rsrc_size;
  923. uint16_t rsrc_used;
  924. };
  925. struct lpfc_rdp_context {
  926. struct lpfc_nodelist *ndlp;
  927. uint16_t ox_id;
  928. uint16_t rx_id;
  929. READ_LNK_VAR link_stat;
  930. uint8_t page_a0[DMP_SFF_PAGE_A0_SIZE];
  931. uint8_t page_a2[DMP_SFF_PAGE_A2_SIZE];
  932. void (*cmpl)(struct lpfc_hba *, struct lpfc_rdp_context*, int);
  933. };
  934. struct lpfc_lcb_context {
  935. uint8_t sub_command;
  936. uint8_t type;
  937. uint8_t capability;
  938. uint8_t frequency;
  939. uint16_t duration;
  940. uint16_t ox_id;
  941. uint16_t rx_id;
  942. struct lpfc_nodelist *ndlp;
  943. };
  944. /*
  945. * SLI4 specific function prototypes
  946. */
  947. int lpfc_pci_function_reset(struct lpfc_hba *);
  948. int lpfc_sli4_pdev_status_reg_wait(struct lpfc_hba *);
  949. int lpfc_sli4_hba_setup(struct lpfc_hba *);
  950. int lpfc_sli4_config(struct lpfc_hba *, struct lpfcMboxq *, uint8_t,
  951. uint8_t, uint32_t, bool);
  952. void lpfc_sli4_mbox_cmd_free(struct lpfc_hba *, struct lpfcMboxq *);
  953. void lpfc_sli4_mbx_sge_set(struct lpfcMboxq *, uint32_t, dma_addr_t, uint32_t);
  954. void lpfc_sli4_mbx_sge_get(struct lpfcMboxq *, uint32_t,
  955. struct lpfc_mbx_sge *);
  956. int lpfc_sli4_mbx_read_fcf_rec(struct lpfc_hba *, struct lpfcMboxq *,
  957. uint16_t);
  958. void lpfc_sli4_hba_reset(struct lpfc_hba *);
  959. struct lpfc_queue *lpfc_sli4_queue_alloc(struct lpfc_hba *phba,
  960. uint32_t page_size,
  961. uint32_t entry_size,
  962. uint32_t entry_count, int cpu);
  963. void lpfc_sli4_queue_free(struct lpfc_queue *);
  964. int lpfc_eq_create(struct lpfc_hba *, struct lpfc_queue *, uint32_t);
  965. void lpfc_modify_hba_eq_delay(struct lpfc_hba *phba, uint32_t startq,
  966. uint32_t numq, uint32_t usdelay);
  967. int lpfc_cq_create(struct lpfc_hba *, struct lpfc_queue *,
  968. struct lpfc_queue *, uint32_t, uint32_t);
  969. int lpfc_cq_create_set(struct lpfc_hba *phba, struct lpfc_queue **cqp,
  970. struct lpfc_sli4_hdw_queue *hdwq, uint32_t type,
  971. uint32_t subtype);
  972. int32_t lpfc_mq_create(struct lpfc_hba *, struct lpfc_queue *,
  973. struct lpfc_queue *, uint32_t);
  974. int lpfc_wq_create(struct lpfc_hba *, struct lpfc_queue *,
  975. struct lpfc_queue *, uint32_t);
  976. int lpfc_rq_create(struct lpfc_hba *, struct lpfc_queue *,
  977. struct lpfc_queue *, struct lpfc_queue *, uint32_t);
  978. int lpfc_mrq_create(struct lpfc_hba *phba, struct lpfc_queue **hrqp,
  979. struct lpfc_queue **drqp, struct lpfc_queue **cqp,
  980. uint32_t subtype);
  981. int lpfc_eq_destroy(struct lpfc_hba *, struct lpfc_queue *);
  982. int lpfc_cq_destroy(struct lpfc_hba *, struct lpfc_queue *);
  983. int lpfc_mq_destroy(struct lpfc_hba *, struct lpfc_queue *);
  984. int lpfc_wq_destroy(struct lpfc_hba *, struct lpfc_queue *);
  985. int lpfc_rq_destroy(struct lpfc_hba *, struct lpfc_queue *,
  986. struct lpfc_queue *);
  987. int lpfc_sli4_queue_setup(struct lpfc_hba *);
  988. void lpfc_sli4_queue_unset(struct lpfc_hba *);
  989. int lpfc_sli4_post_sgl(struct lpfc_hba *, dma_addr_t, dma_addr_t, uint16_t);
  990. int lpfc_repost_io_sgl_list(struct lpfc_hba *phba);
  991. uint16_t lpfc_sli4_next_xritag(struct lpfc_hba *);
  992. void lpfc_sli4_free_xri(struct lpfc_hba *, int);
  993. int lpfc_sli4_post_async_mbox(struct lpfc_hba *);
  994. struct lpfc_cq_event *__lpfc_sli4_cq_event_alloc(struct lpfc_hba *);
  995. struct lpfc_cq_event *lpfc_sli4_cq_event_alloc(struct lpfc_hba *);
  996. void __lpfc_sli4_cq_event_release(struct lpfc_hba *, struct lpfc_cq_event *);
  997. void lpfc_sli4_cq_event_release(struct lpfc_hba *, struct lpfc_cq_event *);
  998. int lpfc_sli4_init_rpi_hdrs(struct lpfc_hba *);
  999. int lpfc_sli4_post_rpi_hdr(struct lpfc_hba *, struct lpfc_rpi_hdr *);
  1000. int lpfc_sli4_post_all_rpi_hdrs(struct lpfc_hba *);
  1001. struct lpfc_rpi_hdr *lpfc_sli4_create_rpi_hdr(struct lpfc_hba *);
  1002. void lpfc_sli4_remove_rpi_hdrs(struct lpfc_hba *);
  1003. int lpfc_sli4_alloc_rpi(struct lpfc_hba *);
  1004. void lpfc_sli4_free_rpi(struct lpfc_hba *, int);
  1005. void lpfc_sli4_remove_rpis(struct lpfc_hba *);
  1006. void lpfc_sli4_async_event_proc(struct lpfc_hba *);
  1007. void lpfc_sli4_fcf_redisc_event_proc(struct lpfc_hba *);
  1008. int lpfc_sli4_resume_rpi(struct lpfc_nodelist *,
  1009. void (*)(struct lpfc_hba *, LPFC_MBOXQ_t *), void *);
  1010. void lpfc_sli4_els_xri_abort_event_proc(struct lpfc_hba *phba);
  1011. void lpfc_sli4_nvme_pci_offline_aborted(struct lpfc_hba *phba,
  1012. struct lpfc_io_buf *lpfc_ncmd);
  1013. void lpfc_sli4_nvme_xri_aborted(struct lpfc_hba *phba,
  1014. struct sli4_wcqe_xri_aborted *axri,
  1015. struct lpfc_io_buf *lpfc_ncmd);
  1016. void lpfc_sli4_io_xri_aborted(struct lpfc_hba *phba,
  1017. struct sli4_wcqe_xri_aborted *axri, int idx);
  1018. void lpfc_sli4_nvmet_xri_aborted(struct lpfc_hba *phba,
  1019. struct sli4_wcqe_xri_aborted *axri);
  1020. void lpfc_sli4_els_xri_aborted(struct lpfc_hba *,
  1021. struct sli4_wcqe_xri_aborted *);
  1022. void lpfc_sli4_vport_delete_els_xri_aborted(struct lpfc_vport *);
  1023. void lpfc_sli4_vport_delete_fcp_xri_aborted(struct lpfc_vport *);
  1024. int lpfc_sli4_brdreset(struct lpfc_hba *);
  1025. int lpfc_sli4_add_fcf_record(struct lpfc_hba *, struct fcf_record *);
  1026. void lpfc_sli_remove_dflt_fcf(struct lpfc_hba *);
  1027. int lpfc_sli4_get_els_iocb_cnt(struct lpfc_hba *);
  1028. int lpfc_sli4_get_iocb_cnt(struct lpfc_hba *phba);
  1029. int lpfc_sli4_init_vpi(struct lpfc_vport *);
  1030. void lpfc_sli4_eq_clr_intr(struct lpfc_queue *);
  1031. void lpfc_sli4_write_cq_db(struct lpfc_hba *phba, struct lpfc_queue *q,
  1032. uint32_t count, bool arm);
  1033. void lpfc_sli4_write_eq_db(struct lpfc_hba *phba, struct lpfc_queue *q,
  1034. uint32_t count, bool arm);
  1035. void lpfc_sli4_if6_eq_clr_intr(struct lpfc_queue *q);
  1036. void lpfc_sli4_if6_write_cq_db(struct lpfc_hba *phba, struct lpfc_queue *q,
  1037. uint32_t count, bool arm);
  1038. void lpfc_sli4_if6_write_eq_db(struct lpfc_hba *phba, struct lpfc_queue *q,
  1039. uint32_t count, bool arm);
  1040. void lpfc_sli4_fcfi_unreg(struct lpfc_hba *, uint16_t);
  1041. int lpfc_sli4_fcf_scan_read_fcf_rec(struct lpfc_hba *, uint16_t);
  1042. int lpfc_sli4_fcf_rr_read_fcf_rec(struct lpfc_hba *, uint16_t);
  1043. int lpfc_sli4_read_fcf_rec(struct lpfc_hba *, uint16_t);
  1044. void lpfc_mbx_cmpl_fcf_scan_read_fcf_rec(struct lpfc_hba *, LPFC_MBOXQ_t *);
  1045. void lpfc_mbx_cmpl_fcf_rr_read_fcf_rec(struct lpfc_hba *, LPFC_MBOXQ_t *);
  1046. void lpfc_mbx_cmpl_read_fcf_rec(struct lpfc_hba *, LPFC_MBOXQ_t *);
  1047. int lpfc_sli4_unregister_fcf(struct lpfc_hba *);
  1048. int lpfc_sli4_post_status_check(struct lpfc_hba *);
  1049. uint8_t lpfc_sli_config_mbox_subsys_get(struct lpfc_hba *, LPFC_MBOXQ_t *);
  1050. uint8_t lpfc_sli_config_mbox_opcode_get(struct lpfc_hba *, LPFC_MBOXQ_t *);
  1051. void lpfc_sli4_ras_dma_free(struct lpfc_hba *phba);
  1052. struct sli4_hybrid_sgl *lpfc_get_sgl_per_hdwq(struct lpfc_hba *phba,
  1053. struct lpfc_io_buf *buf);
  1054. struct fcp_cmd_rsp_buf *lpfc_get_cmd_rsp_buf_per_hdwq(struct lpfc_hba *phba,
  1055. struct lpfc_io_buf *buf);
  1056. int lpfc_put_sgl_per_hdwq(struct lpfc_hba *phba, struct lpfc_io_buf *buf);
  1057. int lpfc_put_cmd_rsp_buf_per_hdwq(struct lpfc_hba *phba,
  1058. struct lpfc_io_buf *buf);
  1059. void lpfc_free_sgl_per_hdwq(struct lpfc_hba *phba,
  1060. struct lpfc_sli4_hdw_queue *hdwq);
  1061. void lpfc_free_cmd_rsp_buf_per_hdwq(struct lpfc_hba *phba,
  1062. struct lpfc_sli4_hdw_queue *hdwq);
  1063. static inline void *lpfc_sli4_qe(struct lpfc_queue *q, uint16_t idx)
  1064. {
  1065. return q->q_pgs[idx / q->entry_cnt_per_pg] +
  1066. (q->entry_size * (idx % q->entry_cnt_per_pg));
  1067. }