megaraid_sas_fusion.h 41 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396
  1. /* SPDX-License-Identifier: GPL-2.0-or-later */
  2. /*
  3. * Linux MegaRAID driver for SAS based RAID controllers
  4. *
  5. * Copyright (c) 2009-2013 LSI Corporation
  6. * Copyright (c) 2013-2016 Avago Technologies
  7. * Copyright (c) 2016-2018 Broadcom Inc.
  8. *
  9. * FILE: megaraid_sas_fusion.h
  10. *
  11. * Authors: Broadcom Inc.
  12. * Manoj Jose
  13. * Sumant Patro
  14. * Kashyap Desai <[email protected]>
  15. * Sumit Saxena <[email protected]>
  16. *
  17. * Send feedback to: [email protected]
  18. */
  19. #ifndef _MEGARAID_SAS_FUSION_H_
  20. #define _MEGARAID_SAS_FUSION_H_
  21. /* Fusion defines */
  22. #define MEGASAS_CHAIN_FRAME_SZ_MIN 1024
  23. #define MFI_FUSION_ENABLE_INTERRUPT_MASK (0x00000009)
  24. #define MEGASAS_MAX_CHAIN_SHIFT 5
  25. #define MEGASAS_MAX_CHAIN_SIZE_UNITS_MASK 0x400000
  26. #define MEGASAS_MAX_CHAIN_SIZE_MASK 0x3E0
  27. #define MEGASAS_256K_IO 128
  28. #define MEGASAS_1MB_IO (MEGASAS_256K_IO * 4)
  29. #define MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE 256
  30. #define MEGASAS_MPI2_FUNCTION_PASSTHRU_IO_REQUEST 0xF0
  31. #define MEGASAS_MPI2_FUNCTION_LD_IO_REQUEST 0xF1
  32. #define MEGASAS_LOAD_BALANCE_FLAG 0x1
  33. #define MEGASAS_DCMD_MBOX_PEND_FLAG 0x1
  34. #define HOST_DIAG_WRITE_ENABLE 0x80
  35. #define HOST_DIAG_RESET_ADAPTER 0x4
  36. #define MEGASAS_FUSION_MAX_RESET_TRIES 3
  37. #define MAX_MSIX_QUEUES_FUSION 128
  38. #define RDPQ_MAX_INDEX_IN_ONE_CHUNK 16
  39. #define RDPQ_MAX_CHUNK_COUNT (MAX_MSIX_QUEUES_FUSION / RDPQ_MAX_INDEX_IN_ONE_CHUNK)
  40. /* Invader defines */
  41. #define MPI2_TYPE_CUDA 0x2
  42. #define MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH 0x4000
  43. #define MR_RL_FLAGS_GRANT_DESTINATION_CPU0 0x00
  44. #define MR_RL_FLAGS_GRANT_DESTINATION_CPU1 0x10
  45. #define MR_RL_FLAGS_GRANT_DESTINATION_CUDA 0x80
  46. #define MR_RL_FLAGS_SEQ_NUM_ENABLE 0x8
  47. #define MR_RL_WRITE_THROUGH_MODE 0x00
  48. #define MR_RL_WRITE_BACK_MODE 0x01
  49. /* T10 PI defines */
  50. #define MR_PROT_INFO_TYPE_CONTROLLER 0x8
  51. #define MEGASAS_SCSI_VARIABLE_LENGTH_CMD 0x7f
  52. #define MEGASAS_SCSI_SERVICE_ACTION_READ32 0x9
  53. #define MEGASAS_SCSI_SERVICE_ACTION_WRITE32 0xB
  54. #define MEGASAS_SCSI_ADDL_CDB_LEN 0x18
  55. #define MEGASAS_RD_WR_PROTECT_CHECK_ALL 0x20
  56. #define MEGASAS_RD_WR_PROTECT_CHECK_NONE 0x60
  57. #define MPI2_SUP_REPLY_POST_HOST_INDEX_OFFSET (0x0000030C)
  58. #define MPI2_REPLY_POST_HOST_INDEX_OFFSET (0x0000006C)
  59. /*
  60. * Raid context flags
  61. */
  62. #define MR_RAID_CTX_RAID_FLAGS_IO_SUB_TYPE_SHIFT 0x4
  63. #define MR_RAID_CTX_RAID_FLAGS_IO_SUB_TYPE_MASK 0x30
  64. enum MR_RAID_FLAGS_IO_SUB_TYPE {
  65. MR_RAID_FLAGS_IO_SUB_TYPE_NONE = 0,
  66. MR_RAID_FLAGS_IO_SUB_TYPE_SYSTEM_PD = 1,
  67. MR_RAID_FLAGS_IO_SUB_TYPE_RMW_DATA = 2,
  68. MR_RAID_FLAGS_IO_SUB_TYPE_RMW_P = 3,
  69. MR_RAID_FLAGS_IO_SUB_TYPE_RMW_Q = 4,
  70. MR_RAID_FLAGS_IO_SUB_TYPE_CACHE_BYPASS = 6,
  71. MR_RAID_FLAGS_IO_SUB_TYPE_LDIO_BW_LIMIT = 7,
  72. MR_RAID_FLAGS_IO_SUB_TYPE_R56_DIV_OFFLOAD = 8
  73. };
  74. /*
  75. * Request descriptor types
  76. */
  77. #define MEGASAS_REQ_DESCRIPT_FLAGS_LD_IO 0x7
  78. #define MEGASAS_REQ_DESCRIPT_FLAGS_MFA 0x1
  79. #define MEGASAS_REQ_DESCRIPT_FLAGS_NO_LOCK 0x2
  80. #define MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT 1
  81. #define MEGASAS_FP_CMD_LEN 16
  82. #define MEGASAS_FUSION_IN_RESET 0
  83. #define MEGASAS_FUSION_OCR_NOT_POSSIBLE 1
  84. #define RAID_1_PEER_CMDS 2
  85. #define JBOD_MAPS_COUNT 2
  86. #define MEGASAS_REDUCE_QD_COUNT 64
  87. #define IOC_INIT_FRAME_SIZE 4096
  88. /*
  89. * Raid Context structure which describes MegaRAID specific IO Parameters
  90. * This resides at offset 0x60 where the SGL normally starts in MPT IO Frames
  91. */
  92. struct RAID_CONTEXT {
  93. #if defined(__BIG_ENDIAN_BITFIELD)
  94. u8 nseg:4;
  95. u8 type:4;
  96. #else
  97. u8 type:4;
  98. u8 nseg:4;
  99. #endif
  100. u8 resvd0;
  101. __le16 timeout_value;
  102. u8 reg_lock_flags;
  103. u8 resvd1;
  104. __le16 virtual_disk_tgt_id;
  105. __le64 reg_lock_row_lba;
  106. __le32 reg_lock_length;
  107. __le16 next_lmid;
  108. u8 ex_status;
  109. u8 status;
  110. u8 raid_flags;
  111. u8 num_sge;
  112. __le16 config_seq_num;
  113. u8 span_arm;
  114. u8 priority;
  115. u8 num_sge_ext;
  116. u8 resvd2;
  117. };
  118. /*
  119. * Raid Context structure which describes ventura MegaRAID specific
  120. * IO Paramenters ,This resides at offset 0x60 where the SGL normally
  121. * starts in MPT IO Frames
  122. */
  123. struct RAID_CONTEXT_G35 {
  124. #define RAID_CONTEXT_NSEG_MASK 0x00F0
  125. #define RAID_CONTEXT_NSEG_SHIFT 4
  126. #define RAID_CONTEXT_TYPE_MASK 0x000F
  127. #define RAID_CONTEXT_TYPE_SHIFT 0
  128. u16 nseg_type;
  129. u16 timeout_value; /* 0x02 -0x03 */
  130. u16 routing_flags; // 0x04 -0x05 routing flags
  131. u16 virtual_disk_tgt_id; /* 0x06 -0x07 */
  132. __le64 reg_lock_row_lba; /* 0x08 - 0x0F */
  133. u32 reg_lock_length; /* 0x10 - 0x13 */
  134. union { // flow specific
  135. u16 rmw_op_index; /* 0x14 - 0x15, R5/6 RMW: rmw operation index*/
  136. u16 peer_smid; /* 0x14 - 0x15, R1 Write: peer smid*/
  137. u16 r56_arm_map; /* 0x14 - 0x15, Unused [15], LogArm[14:10], P-Arm[9:5], Q-Arm[4:0] */
  138. } flow_specific;
  139. u8 ex_status; /* 0x16 : OUT */
  140. u8 status; /* 0x17 status */
  141. u8 raid_flags; /* 0x18 resvd[7:6], ioSubType[5:4],
  142. * resvd[3:1], preferredCpu[0]
  143. */
  144. u8 span_arm; /* 0x1C span[7:5], arm[4:0] */
  145. u16 config_seq_num; /* 0x1A -0x1B */
  146. union {
  147. /*
  148. * Bit format:
  149. * ---------------------------------
  150. * | 7 | 6 | 5 | 4 | 3 | 2 | 1 | 0 |
  151. * ---------------------------------
  152. * Byte0 | numSGE[7]- numSGE[0] |
  153. * ---------------------------------
  154. * Byte1 |SD | resvd | numSGE 8-11 |
  155. * --------------------------------
  156. */
  157. #define NUM_SGE_MASK_LOWER 0xFF
  158. #define NUM_SGE_MASK_UPPER 0x0F
  159. #define NUM_SGE_SHIFT_UPPER 8
  160. #define STREAM_DETECT_SHIFT 7
  161. #define STREAM_DETECT_MASK 0x80
  162. struct {
  163. #if defined(__BIG_ENDIAN_BITFIELD) /* 0x1C - 0x1D */
  164. u16 stream_detected:1;
  165. u16 reserved:3;
  166. u16 num_sge:12;
  167. #else
  168. u16 num_sge:12;
  169. u16 reserved:3;
  170. u16 stream_detected:1;
  171. #endif
  172. } bits;
  173. u8 bytes[2];
  174. } u;
  175. u8 resvd2[2]; /* 0x1E-0x1F */
  176. };
  177. #define MR_RAID_CTX_ROUTINGFLAGS_SLD_SHIFT 1
  178. #define MR_RAID_CTX_ROUTINGFLAGS_C2D_SHIFT 2
  179. #define MR_RAID_CTX_ROUTINGFLAGS_FWD_SHIFT 3
  180. #define MR_RAID_CTX_ROUTINGFLAGS_SQN_SHIFT 4
  181. #define MR_RAID_CTX_ROUTINGFLAGS_SBS_SHIFT 5
  182. #define MR_RAID_CTX_ROUTINGFLAGS_RW_SHIFT 6
  183. #define MR_RAID_CTX_ROUTINGFLAGS_LOG_SHIFT 7
  184. #define MR_RAID_CTX_ROUTINGFLAGS_CPUSEL_SHIFT 8
  185. #define MR_RAID_CTX_ROUTINGFLAGS_CPUSEL_MASK 0x0F00
  186. #define MR_RAID_CTX_ROUTINGFLAGS_SETDIVERT_SHIFT 12
  187. #define MR_RAID_CTX_ROUTINGFLAGS_SETDIVERT_MASK 0xF000
  188. static inline void set_num_sge(struct RAID_CONTEXT_G35 *rctx_g35,
  189. u16 sge_count)
  190. {
  191. rctx_g35->u.bytes[0] = (u8)(sge_count & NUM_SGE_MASK_LOWER);
  192. rctx_g35->u.bytes[1] |= (u8)((sge_count >> NUM_SGE_SHIFT_UPPER)
  193. & NUM_SGE_MASK_UPPER);
  194. }
  195. static inline u16 get_num_sge(struct RAID_CONTEXT_G35 *rctx_g35)
  196. {
  197. u16 sge_count;
  198. sge_count = (u16)(((rctx_g35->u.bytes[1] & NUM_SGE_MASK_UPPER)
  199. << NUM_SGE_SHIFT_UPPER) | (rctx_g35->u.bytes[0]));
  200. return sge_count;
  201. }
  202. #define SET_STREAM_DETECTED(rctx_g35) \
  203. (rctx_g35.u.bytes[1] |= STREAM_DETECT_MASK)
  204. #define CLEAR_STREAM_DETECTED(rctx_g35) \
  205. (rctx_g35.u.bytes[1] &= ~(STREAM_DETECT_MASK))
  206. static inline bool is_stream_detected(struct RAID_CONTEXT_G35 *rctx_g35)
  207. {
  208. return ((rctx_g35->u.bytes[1] & STREAM_DETECT_MASK));
  209. }
  210. union RAID_CONTEXT_UNION {
  211. struct RAID_CONTEXT raid_context;
  212. struct RAID_CONTEXT_G35 raid_context_g35;
  213. };
  214. #define RAID_CTX_SPANARM_ARM_SHIFT (0)
  215. #define RAID_CTX_SPANARM_ARM_MASK (0x1f)
  216. #define RAID_CTX_SPANARM_SPAN_SHIFT (5)
  217. #define RAID_CTX_SPANARM_SPAN_MASK (0xE0)
  218. /* LogArm[14:10], P-Arm[9:5], Q-Arm[4:0] */
  219. #define RAID_CTX_R56_Q_ARM_MASK (0x1F)
  220. #define RAID_CTX_R56_P_ARM_SHIFT (5)
  221. #define RAID_CTX_R56_P_ARM_MASK (0x3E0)
  222. #define RAID_CTX_R56_LOG_ARM_SHIFT (10)
  223. #define RAID_CTX_R56_LOG_ARM_MASK (0x7C00)
  224. /* number of bits per index in U32 TrackStream */
  225. #define BITS_PER_INDEX_STREAM 4
  226. #define INVALID_STREAM_NUM 16
  227. #define MR_STREAM_BITMAP 0x76543210
  228. #define STREAM_MASK ((1 << BITS_PER_INDEX_STREAM) - 1)
  229. #define ZERO_LAST_STREAM 0x0fffffff
  230. #define MAX_STREAMS_TRACKED 8
  231. /*
  232. * define region lock types
  233. */
  234. enum REGION_TYPE {
  235. REGION_TYPE_UNUSED = 0,
  236. REGION_TYPE_SHARED_READ = 1,
  237. REGION_TYPE_SHARED_WRITE = 2,
  238. REGION_TYPE_EXCLUSIVE = 3,
  239. };
  240. /* MPI2 defines */
  241. #define MPI2_FUNCTION_IOC_INIT (0x02) /* IOC Init */
  242. #define MPI2_WHOINIT_HOST_DRIVER (0x04)
  243. #define MPI2_VERSION_MAJOR (0x02)
  244. #define MPI2_VERSION_MINOR (0x00)
  245. #define MPI2_VERSION_MAJOR_MASK (0xFF00)
  246. #define MPI2_VERSION_MAJOR_SHIFT (8)
  247. #define MPI2_VERSION_MINOR_MASK (0x00FF)
  248. #define MPI2_VERSION_MINOR_SHIFT (0)
  249. #define MPI2_VERSION ((MPI2_VERSION_MAJOR << MPI2_VERSION_MAJOR_SHIFT) | \
  250. MPI2_VERSION_MINOR)
  251. #define MPI2_HEADER_VERSION_UNIT (0x10)
  252. #define MPI2_HEADER_VERSION_DEV (0x00)
  253. #define MPI2_HEADER_VERSION_UNIT_MASK (0xFF00)
  254. #define MPI2_HEADER_VERSION_UNIT_SHIFT (8)
  255. #define MPI2_HEADER_VERSION_DEV_MASK (0x00FF)
  256. #define MPI2_HEADER_VERSION_DEV_SHIFT (0)
  257. #define MPI2_HEADER_VERSION ((MPI2_HEADER_VERSION_UNIT << 8) | \
  258. MPI2_HEADER_VERSION_DEV)
  259. #define MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR (0x03)
  260. #define MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG (0x8000)
  261. #define MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG (0x0400)
  262. #define MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP (0x0003)
  263. #define MPI2_SCSIIO_EEDPFLAGS_CHECK_APPTAG (0x0200)
  264. #define MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD (0x0100)
  265. #define MPI2_SCSIIO_EEDPFLAGS_INSERT_OP (0x0004)
  266. /* EEDP escape mode */
  267. #define MPI25_SCSIIO_EEDPFLAGS_DO_NOT_DISABLE_MODE (0x0040)
  268. #define MPI2_FUNCTION_SCSI_IO_REQUEST (0x00) /* SCSI IO */
  269. #define MPI2_FUNCTION_SCSI_TASK_MGMT (0x01)
  270. #define MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY (0x03)
  271. #define MPI2_REQ_DESCRIPT_FLAGS_FP_IO (0x06)
  272. #define MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO (0x00)
  273. #define MPI2_SGE_FLAGS_64_BIT_ADDRESSING (0x02)
  274. #define MPI2_SCSIIO_CONTROL_WRITE (0x01000000)
  275. #define MPI2_SCSIIO_CONTROL_READ (0x02000000)
  276. #define MPI2_REQ_DESCRIPT_FLAGS_TYPE_MASK (0x0E)
  277. #define MPI2_RPY_DESCRIPT_FLAGS_UNUSED (0x0F)
  278. #define MPI2_RPY_DESCRIPT_FLAGS_SCSI_IO_SUCCESS (0x00)
  279. #define MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK (0x0F)
  280. #define MPI2_WRSEQ_FLUSH_KEY_VALUE (0x0)
  281. #define MPI2_WRITE_SEQUENCE_OFFSET (0x00000004)
  282. #define MPI2_WRSEQ_1ST_KEY_VALUE (0xF)
  283. #define MPI2_WRSEQ_2ND_KEY_VALUE (0x4)
  284. #define MPI2_WRSEQ_3RD_KEY_VALUE (0xB)
  285. #define MPI2_WRSEQ_4TH_KEY_VALUE (0x2)
  286. #define MPI2_WRSEQ_5TH_KEY_VALUE (0x7)
  287. #define MPI2_WRSEQ_6TH_KEY_VALUE (0xD)
  288. struct MPI25_IEEE_SGE_CHAIN64 {
  289. __le64 Address;
  290. __le32 Length;
  291. __le16 Reserved1;
  292. u8 NextChainOffset;
  293. u8 Flags;
  294. };
  295. struct MPI2_SGE_SIMPLE_UNION {
  296. __le32 FlagsLength;
  297. union {
  298. __le32 Address32;
  299. __le64 Address64;
  300. } u;
  301. };
  302. struct MPI2_SCSI_IO_CDB_EEDP32 {
  303. u8 CDB[20]; /* 0x00 */
  304. __be32 PrimaryReferenceTag; /* 0x14 */
  305. __be16 PrimaryApplicationTag; /* 0x18 */
  306. __be16 PrimaryApplicationTagMask; /* 0x1A */
  307. __le32 TransferLength; /* 0x1C */
  308. };
  309. struct MPI2_SGE_CHAIN_UNION {
  310. __le16 Length;
  311. u8 NextChainOffset;
  312. u8 Flags;
  313. union {
  314. __le32 Address32;
  315. __le64 Address64;
  316. } u;
  317. };
  318. struct MPI2_IEEE_SGE_SIMPLE32 {
  319. __le32 Address;
  320. __le32 FlagsLength;
  321. };
  322. struct MPI2_IEEE_SGE_CHAIN32 {
  323. __le32 Address;
  324. __le32 FlagsLength;
  325. };
  326. struct MPI2_IEEE_SGE_SIMPLE64 {
  327. __le64 Address;
  328. __le32 Length;
  329. __le16 Reserved1;
  330. u8 Reserved2;
  331. u8 Flags;
  332. };
  333. struct MPI2_IEEE_SGE_CHAIN64 {
  334. __le64 Address;
  335. __le32 Length;
  336. __le16 Reserved1;
  337. u8 Reserved2;
  338. u8 Flags;
  339. };
  340. union MPI2_IEEE_SGE_SIMPLE_UNION {
  341. struct MPI2_IEEE_SGE_SIMPLE32 Simple32;
  342. struct MPI2_IEEE_SGE_SIMPLE64 Simple64;
  343. };
  344. union MPI2_IEEE_SGE_CHAIN_UNION {
  345. struct MPI2_IEEE_SGE_CHAIN32 Chain32;
  346. struct MPI2_IEEE_SGE_CHAIN64 Chain64;
  347. };
  348. union MPI2_SGE_IO_UNION {
  349. struct MPI2_SGE_SIMPLE_UNION MpiSimple;
  350. struct MPI2_SGE_CHAIN_UNION MpiChain;
  351. union MPI2_IEEE_SGE_SIMPLE_UNION IeeeSimple;
  352. union MPI2_IEEE_SGE_CHAIN_UNION IeeeChain;
  353. };
  354. union MPI2_SCSI_IO_CDB_UNION {
  355. u8 CDB32[32];
  356. struct MPI2_SCSI_IO_CDB_EEDP32 EEDP32;
  357. struct MPI2_SGE_SIMPLE_UNION SGE;
  358. };
  359. /****************************************************************************
  360. * SCSI Task Management messages
  361. ****************************************************************************/
  362. /*SCSI Task Management Request Message */
  363. struct MPI2_SCSI_TASK_MANAGE_REQUEST {
  364. u16 DevHandle; /*0x00 */
  365. u8 ChainOffset; /*0x02 */
  366. u8 Function; /*0x03 */
  367. u8 Reserved1; /*0x04 */
  368. u8 TaskType; /*0x05 */
  369. u8 Reserved2; /*0x06 */
  370. u8 MsgFlags; /*0x07 */
  371. u8 VP_ID; /*0x08 */
  372. u8 VF_ID; /*0x09 */
  373. u16 Reserved3; /*0x0A */
  374. u8 LUN[8]; /*0x0C */
  375. u32 Reserved4[7]; /*0x14 */
  376. u16 TaskMID; /*0x30 */
  377. u16 Reserved5; /*0x32 */
  378. };
  379. /*SCSI Task Management Reply Message */
  380. struct MPI2_SCSI_TASK_MANAGE_REPLY {
  381. u16 DevHandle; /*0x00 */
  382. u8 MsgLength; /*0x02 */
  383. u8 Function; /*0x03 */
  384. u8 ResponseCode; /*0x04 */
  385. u8 TaskType; /*0x05 */
  386. u8 Reserved1; /*0x06 */
  387. u8 MsgFlags; /*0x07 */
  388. u8 VP_ID; /*0x08 */
  389. u8 VF_ID; /*0x09 */
  390. u16 Reserved2; /*0x0A */
  391. u16 Reserved3; /*0x0C */
  392. u16 IOCStatus; /*0x0E */
  393. u32 IOCLogInfo; /*0x10 */
  394. u32 TerminationCount; /*0x14 */
  395. u32 ResponseInfo; /*0x18 */
  396. };
  397. struct MR_TM_REQUEST {
  398. char request[128];
  399. };
  400. struct MR_TM_REPLY {
  401. char reply[128];
  402. };
  403. /* SCSI Task Management Request Message */
  404. struct MR_TASK_MANAGE_REQUEST {
  405. /*To be type casted to struct MPI2_SCSI_TASK_MANAGE_REQUEST */
  406. struct MR_TM_REQUEST TmRequest;
  407. union {
  408. struct {
  409. #if defined(__BIG_ENDIAN_BITFIELD)
  410. u32 reserved1:30;
  411. u32 isTMForPD:1;
  412. u32 isTMForLD:1;
  413. #else
  414. u32 isTMForLD:1;
  415. u32 isTMForPD:1;
  416. u32 reserved1:30;
  417. #endif
  418. u32 reserved2;
  419. } tmReqFlags;
  420. struct MR_TM_REPLY TMReply;
  421. };
  422. };
  423. /* TaskType values */
  424. #define MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK (0x01)
  425. #define MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET (0x02)
  426. #define MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET (0x03)
  427. #define MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET (0x05)
  428. #define MPI2_SCSITASKMGMT_TASKTYPE_CLEAR_TASK_SET (0x06)
  429. #define MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK (0x07)
  430. #define MPI2_SCSITASKMGMT_TASKTYPE_CLR_ACA (0x08)
  431. #define MPI2_SCSITASKMGMT_TASKTYPE_QRY_TASK_SET (0x09)
  432. #define MPI2_SCSITASKMGMT_TASKTYPE_QRY_ASYNC_EVENT (0x0A)
  433. /* ResponseCode values */
  434. #define MPI2_SCSITASKMGMT_RSP_TM_COMPLETE (0x00)
  435. #define MPI2_SCSITASKMGMT_RSP_INVALID_FRAME (0x02)
  436. #define MPI2_SCSITASKMGMT_RSP_TM_NOT_SUPPORTED (0x04)
  437. #define MPI2_SCSITASKMGMT_RSP_TM_FAILED (0x05)
  438. #define MPI2_SCSITASKMGMT_RSP_TM_SUCCEEDED (0x08)
  439. #define MPI2_SCSITASKMGMT_RSP_TM_INVALID_LUN (0x09)
  440. #define MPI2_SCSITASKMGMT_RSP_TM_OVERLAPPED_TAG (0x0A)
  441. #define MPI2_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC (0x80)
  442. /*
  443. * RAID SCSI IO Request Message
  444. * Total SGE count will be one less than _MPI2_SCSI_IO_REQUEST
  445. */
  446. struct MPI2_RAID_SCSI_IO_REQUEST {
  447. __le16 DevHandle; /* 0x00 */
  448. u8 ChainOffset; /* 0x02 */
  449. u8 Function; /* 0x03 */
  450. __le16 Reserved1; /* 0x04 */
  451. u8 Reserved2; /* 0x06 */
  452. u8 MsgFlags; /* 0x07 */
  453. u8 VP_ID; /* 0x08 */
  454. u8 VF_ID; /* 0x09 */
  455. __le16 Reserved3; /* 0x0A */
  456. __le32 SenseBufferLowAddress; /* 0x0C */
  457. __le16 SGLFlags; /* 0x10 */
  458. u8 SenseBufferLength; /* 0x12 */
  459. u8 Reserved4; /* 0x13 */
  460. u8 SGLOffset0; /* 0x14 */
  461. u8 SGLOffset1; /* 0x15 */
  462. u8 SGLOffset2; /* 0x16 */
  463. u8 SGLOffset3; /* 0x17 */
  464. __le32 SkipCount; /* 0x18 */
  465. __le32 DataLength; /* 0x1C */
  466. __le32 BidirectionalDataLength; /* 0x20 */
  467. __le16 IoFlags; /* 0x24 */
  468. __le16 EEDPFlags; /* 0x26 */
  469. __le32 EEDPBlockSize; /* 0x28 */
  470. __le32 SecondaryReferenceTag; /* 0x2C */
  471. __le16 SecondaryApplicationTag; /* 0x30 */
  472. __le16 ApplicationTagTranslationMask; /* 0x32 */
  473. u8 LUN[8]; /* 0x34 */
  474. __le32 Control; /* 0x3C */
  475. union MPI2_SCSI_IO_CDB_UNION CDB; /* 0x40 */
  476. union RAID_CONTEXT_UNION RaidContext; /* 0x60 */
  477. union {
  478. union MPI2_SGE_IO_UNION SGL; /* 0x80 */
  479. DECLARE_FLEX_ARRAY(union MPI2_SGE_IO_UNION, SGLs);
  480. };
  481. };
  482. /*
  483. * MPT RAID MFA IO Descriptor.
  484. */
  485. struct MEGASAS_RAID_MFA_IO_REQUEST_DESCRIPTOR {
  486. u32 RequestFlags:8;
  487. u32 MessageAddress1:24;
  488. u32 MessageAddress2;
  489. };
  490. /* Default Request Descriptor */
  491. struct MPI2_DEFAULT_REQUEST_DESCRIPTOR {
  492. u8 RequestFlags; /* 0x00 */
  493. u8 MSIxIndex; /* 0x01 */
  494. __le16 SMID; /* 0x02 */
  495. __le16 LMID; /* 0x04 */
  496. __le16 DescriptorTypeDependent; /* 0x06 */
  497. };
  498. /* High Priority Request Descriptor */
  499. struct MPI2_HIGH_PRIORITY_REQUEST_DESCRIPTOR {
  500. u8 RequestFlags; /* 0x00 */
  501. u8 MSIxIndex; /* 0x01 */
  502. __le16 SMID; /* 0x02 */
  503. __le16 LMID; /* 0x04 */
  504. __le16 Reserved1; /* 0x06 */
  505. };
  506. /* SCSI IO Request Descriptor */
  507. struct MPI2_SCSI_IO_REQUEST_DESCRIPTOR {
  508. u8 RequestFlags; /* 0x00 */
  509. u8 MSIxIndex; /* 0x01 */
  510. __le16 SMID; /* 0x02 */
  511. __le16 LMID; /* 0x04 */
  512. __le16 DevHandle; /* 0x06 */
  513. };
  514. /* SCSI Target Request Descriptor */
  515. struct MPI2_SCSI_TARGET_REQUEST_DESCRIPTOR {
  516. u8 RequestFlags; /* 0x00 */
  517. u8 MSIxIndex; /* 0x01 */
  518. __le16 SMID; /* 0x02 */
  519. __le16 LMID; /* 0x04 */
  520. __le16 IoIndex; /* 0x06 */
  521. };
  522. /* RAID Accelerator Request Descriptor */
  523. struct MPI2_RAID_ACCEL_REQUEST_DESCRIPTOR {
  524. u8 RequestFlags; /* 0x00 */
  525. u8 MSIxIndex; /* 0x01 */
  526. __le16 SMID; /* 0x02 */
  527. __le16 LMID; /* 0x04 */
  528. __le16 Reserved; /* 0x06 */
  529. };
  530. /* union of Request Descriptors */
  531. union MEGASAS_REQUEST_DESCRIPTOR_UNION {
  532. struct MPI2_DEFAULT_REQUEST_DESCRIPTOR Default;
  533. struct MPI2_HIGH_PRIORITY_REQUEST_DESCRIPTOR HighPriority;
  534. struct MPI2_SCSI_IO_REQUEST_DESCRIPTOR SCSIIO;
  535. struct MPI2_SCSI_TARGET_REQUEST_DESCRIPTOR SCSITarget;
  536. struct MPI2_RAID_ACCEL_REQUEST_DESCRIPTOR RAIDAccelerator;
  537. struct MEGASAS_RAID_MFA_IO_REQUEST_DESCRIPTOR MFAIo;
  538. union {
  539. struct {
  540. __le32 low;
  541. __le32 high;
  542. } u;
  543. __le64 Words;
  544. };
  545. };
  546. /* Default Reply Descriptor */
  547. struct MPI2_DEFAULT_REPLY_DESCRIPTOR {
  548. u8 ReplyFlags; /* 0x00 */
  549. u8 MSIxIndex; /* 0x01 */
  550. __le16 DescriptorTypeDependent1; /* 0x02 */
  551. __le32 DescriptorTypeDependent2; /* 0x04 */
  552. };
  553. /* Address Reply Descriptor */
  554. struct MPI2_ADDRESS_REPLY_DESCRIPTOR {
  555. u8 ReplyFlags; /* 0x00 */
  556. u8 MSIxIndex; /* 0x01 */
  557. __le16 SMID; /* 0x02 */
  558. __le32 ReplyFrameAddress; /* 0x04 */
  559. };
  560. /* SCSI IO Success Reply Descriptor */
  561. struct MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR {
  562. u8 ReplyFlags; /* 0x00 */
  563. u8 MSIxIndex; /* 0x01 */
  564. __le16 SMID; /* 0x02 */
  565. __le16 TaskTag; /* 0x04 */
  566. __le16 Reserved1; /* 0x06 */
  567. };
  568. /* TargetAssist Success Reply Descriptor */
  569. struct MPI2_TARGETASSIST_SUCCESS_REPLY_DESCRIPTOR {
  570. u8 ReplyFlags; /* 0x00 */
  571. u8 MSIxIndex; /* 0x01 */
  572. __le16 SMID; /* 0x02 */
  573. u8 SequenceNumber; /* 0x04 */
  574. u8 Reserved1; /* 0x05 */
  575. __le16 IoIndex; /* 0x06 */
  576. };
  577. /* Target Command Buffer Reply Descriptor */
  578. struct MPI2_TARGET_COMMAND_BUFFER_REPLY_DESCRIPTOR {
  579. u8 ReplyFlags; /* 0x00 */
  580. u8 MSIxIndex; /* 0x01 */
  581. u8 VP_ID; /* 0x02 */
  582. u8 Flags; /* 0x03 */
  583. __le16 InitiatorDevHandle; /* 0x04 */
  584. __le16 IoIndex; /* 0x06 */
  585. };
  586. /* RAID Accelerator Success Reply Descriptor */
  587. struct MPI2_RAID_ACCELERATOR_SUCCESS_REPLY_DESCRIPTOR {
  588. u8 ReplyFlags; /* 0x00 */
  589. u8 MSIxIndex; /* 0x01 */
  590. __le16 SMID; /* 0x02 */
  591. __le32 Reserved; /* 0x04 */
  592. };
  593. /* union of Reply Descriptors */
  594. union MPI2_REPLY_DESCRIPTORS_UNION {
  595. struct MPI2_DEFAULT_REPLY_DESCRIPTOR Default;
  596. struct MPI2_ADDRESS_REPLY_DESCRIPTOR AddressReply;
  597. struct MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR SCSIIOSuccess;
  598. struct MPI2_TARGETASSIST_SUCCESS_REPLY_DESCRIPTOR TargetAssistSuccess;
  599. struct MPI2_TARGET_COMMAND_BUFFER_REPLY_DESCRIPTOR TargetCommandBuffer;
  600. struct MPI2_RAID_ACCELERATOR_SUCCESS_REPLY_DESCRIPTOR
  601. RAIDAcceleratorSuccess;
  602. __le64 Words;
  603. };
  604. /* IOCInit Request message */
  605. struct MPI2_IOC_INIT_REQUEST {
  606. u8 WhoInit; /* 0x00 */
  607. u8 Reserved1; /* 0x01 */
  608. u8 ChainOffset; /* 0x02 */
  609. u8 Function; /* 0x03 */
  610. __le16 Reserved2; /* 0x04 */
  611. u8 Reserved3; /* 0x06 */
  612. u8 MsgFlags; /* 0x07 */
  613. u8 VP_ID; /* 0x08 */
  614. u8 VF_ID; /* 0x09 */
  615. __le16 Reserved4; /* 0x0A */
  616. __le16 MsgVersion; /* 0x0C */
  617. __le16 HeaderVersion; /* 0x0E */
  618. u32 Reserved5; /* 0x10 */
  619. __le16 Reserved6; /* 0x14 */
  620. u8 HostPageSize; /* 0x16 */
  621. u8 HostMSIxVectors; /* 0x17 */
  622. __le16 Reserved8; /* 0x18 */
  623. __le16 SystemRequestFrameSize; /* 0x1A */
  624. __le16 ReplyDescriptorPostQueueDepth; /* 0x1C */
  625. __le16 ReplyFreeQueueDepth; /* 0x1E */
  626. __le32 SenseBufferAddressHigh; /* 0x20 */
  627. __le32 SystemReplyAddressHigh; /* 0x24 */
  628. __le64 SystemRequestFrameBaseAddress; /* 0x28 */
  629. __le64 ReplyDescriptorPostQueueAddress;/* 0x30 */
  630. __le64 ReplyFreeQueueAddress; /* 0x38 */
  631. __le64 TimeStamp; /* 0x40 */
  632. };
  633. /* mrpriv defines */
  634. #define MR_PD_INVALID 0xFFFF
  635. #define MR_DEVHANDLE_INVALID 0xFFFF
  636. #define MAX_SPAN_DEPTH 8
  637. #define MAX_QUAD_DEPTH MAX_SPAN_DEPTH
  638. #define MAX_RAIDMAP_SPAN_DEPTH (MAX_SPAN_DEPTH)
  639. #define MAX_ROW_SIZE 32
  640. #define MAX_RAIDMAP_ROW_SIZE (MAX_ROW_SIZE)
  641. #define MAX_LOGICAL_DRIVES 64
  642. #define MAX_LOGICAL_DRIVES_EXT 256
  643. #define MAX_LOGICAL_DRIVES_DYN 512
  644. #define MAX_RAIDMAP_LOGICAL_DRIVES (MAX_LOGICAL_DRIVES)
  645. #define MAX_RAIDMAP_VIEWS (MAX_LOGICAL_DRIVES)
  646. #define MAX_ARRAYS 128
  647. #define MAX_RAIDMAP_ARRAYS (MAX_ARRAYS)
  648. #define MAX_ARRAYS_EXT 256
  649. #define MAX_API_ARRAYS_EXT (MAX_ARRAYS_EXT)
  650. #define MAX_API_ARRAYS_DYN 512
  651. #define MAX_PHYSICAL_DEVICES 256
  652. #define MAX_RAIDMAP_PHYSICAL_DEVICES (MAX_PHYSICAL_DEVICES)
  653. #define MAX_RAIDMAP_PHYSICAL_DEVICES_DYN 512
  654. #define MR_DCMD_LD_MAP_GET_INFO 0x0300e101
  655. #define MR_DCMD_SYSTEM_PD_MAP_GET_INFO 0x0200e102
  656. #define MR_DCMD_DRV_GET_TARGET_PROP 0x0200e103
  657. #define MR_DCMD_CTRL_SHARED_HOST_MEM_ALLOC 0x010e8485 /* SR-IOV HB alloc*/
  658. #define MR_DCMD_LD_VF_MAP_GET_ALL_LDS_111 0x03200200
  659. #define MR_DCMD_LD_VF_MAP_GET_ALL_LDS 0x03150200
  660. #define MR_DCMD_CTRL_SNAPDUMP_GET_PROPERTIES 0x01200100
  661. #define MR_DCMD_CTRL_DEVICE_LIST_GET 0x01190600
  662. struct MR_DEV_HANDLE_INFO {
  663. __le16 curDevHdl;
  664. u8 validHandles;
  665. u8 interfaceType;
  666. __le16 devHandle[2];
  667. };
  668. struct MR_ARRAY_INFO {
  669. __le16 pd[MAX_RAIDMAP_ROW_SIZE];
  670. };
  671. struct MR_QUAD_ELEMENT {
  672. __le64 logStart;
  673. __le64 logEnd;
  674. __le64 offsetInSpan;
  675. __le32 diff;
  676. __le32 reserved1;
  677. };
  678. struct MR_SPAN_INFO {
  679. __le32 noElements;
  680. __le32 reserved1;
  681. struct MR_QUAD_ELEMENT quad[MAX_RAIDMAP_SPAN_DEPTH];
  682. };
  683. struct MR_LD_SPAN {
  684. __le64 startBlk;
  685. __le64 numBlks;
  686. __le16 arrayRef;
  687. u8 spanRowSize;
  688. u8 spanRowDataSize;
  689. u8 reserved[4];
  690. };
  691. struct MR_SPAN_BLOCK_INFO {
  692. __le64 num_rows;
  693. struct MR_LD_SPAN span;
  694. struct MR_SPAN_INFO block_span_info;
  695. };
  696. #define MR_RAID_CTX_CPUSEL_0 0
  697. #define MR_RAID_CTX_CPUSEL_1 1
  698. #define MR_RAID_CTX_CPUSEL_2 2
  699. #define MR_RAID_CTX_CPUSEL_3 3
  700. #define MR_RAID_CTX_CPUSEL_FCFS 0xF
  701. struct MR_CPU_AFFINITY_MASK {
  702. union {
  703. struct {
  704. #ifndef __BIG_ENDIAN_BITFIELD
  705. u8 hw_path:1;
  706. u8 cpu0:1;
  707. u8 cpu1:1;
  708. u8 cpu2:1;
  709. u8 cpu3:1;
  710. u8 reserved:3;
  711. #else
  712. u8 reserved:3;
  713. u8 cpu3:1;
  714. u8 cpu2:1;
  715. u8 cpu1:1;
  716. u8 cpu0:1;
  717. u8 hw_path:1;
  718. #endif
  719. };
  720. u8 core_mask;
  721. };
  722. };
  723. struct MR_IO_AFFINITY {
  724. union {
  725. struct {
  726. struct MR_CPU_AFFINITY_MASK pdRead;
  727. struct MR_CPU_AFFINITY_MASK pdWrite;
  728. struct MR_CPU_AFFINITY_MASK ldRead;
  729. struct MR_CPU_AFFINITY_MASK ldWrite;
  730. };
  731. u32 word;
  732. };
  733. u8 maxCores; /* Total cores + HW Path in ROC */
  734. u8 reserved[3];
  735. };
  736. struct MR_LD_RAID {
  737. struct {
  738. #if defined(__BIG_ENDIAN_BITFIELD)
  739. u32 reserved4:2;
  740. u32 fp_cache_bypass_capable:1;
  741. u32 fp_rmw_capable:1;
  742. u32 disable_coalescing:1;
  743. u32 fpBypassRegionLock:1;
  744. u32 tmCapable:1;
  745. u32 fpNonRWCapable:1;
  746. u32 fpReadAcrossStripe:1;
  747. u32 fpWriteAcrossStripe:1;
  748. u32 fpReadCapable:1;
  749. u32 fpWriteCapable:1;
  750. u32 encryptionType:8;
  751. u32 pdPiMode:4;
  752. u32 ldPiMode:4;
  753. u32 reserved5:2;
  754. u32 ra_capable:1;
  755. u32 fpCapable:1;
  756. #else
  757. u32 fpCapable:1;
  758. u32 ra_capable:1;
  759. u32 reserved5:2;
  760. u32 ldPiMode:4;
  761. u32 pdPiMode:4;
  762. u32 encryptionType:8;
  763. u32 fpWriteCapable:1;
  764. u32 fpReadCapable:1;
  765. u32 fpWriteAcrossStripe:1;
  766. u32 fpReadAcrossStripe:1;
  767. u32 fpNonRWCapable:1;
  768. u32 tmCapable:1;
  769. u32 fpBypassRegionLock:1;
  770. u32 disable_coalescing:1;
  771. u32 fp_rmw_capable:1;
  772. u32 fp_cache_bypass_capable:1;
  773. u32 reserved4:2;
  774. #endif
  775. } capability;
  776. __le32 reserved6;
  777. __le64 size;
  778. u8 spanDepth;
  779. u8 level;
  780. u8 stripeShift;
  781. u8 rowSize;
  782. u8 rowDataSize;
  783. u8 writeMode;
  784. u8 PRL;
  785. u8 SRL;
  786. __le16 targetId;
  787. u8 ldState;
  788. u8 regTypeReqOnWrite;
  789. u8 modFactor;
  790. u8 regTypeReqOnRead;
  791. __le16 seqNum;
  792. struct {
  793. #ifndef __BIG_ENDIAN_BITFIELD
  794. u32 ldSyncRequired:1;
  795. u32 regTypeReqOnReadIsValid:1;
  796. u32 isEPD:1;
  797. u32 enableSLDOnAllRWIOs:1;
  798. u32 reserved:28;
  799. #else
  800. u32 reserved:28;
  801. u32 enableSLDOnAllRWIOs:1;
  802. u32 isEPD:1;
  803. u32 regTypeReqOnReadIsValid:1;
  804. u32 ldSyncRequired:1;
  805. #endif
  806. } flags;
  807. u8 LUN[8]; /* 0x24 8 byte LUN field used for SCSI IO's */
  808. u8 fpIoTimeoutForLd;/*0x2C timeout value used by driver in FP IO*/
  809. /* Ox2D This LD accept priority boost of this type */
  810. u8 ld_accept_priority_type;
  811. u8 reserved2[2]; /* 0x2E - 0x2F */
  812. /* 0x30 - 0x33, Logical block size for the LD */
  813. u32 logical_block_length;
  814. struct {
  815. #ifndef __BIG_ENDIAN_BITFIELD
  816. /* 0x34, P_I_EXPONENT from READ CAPACITY 16 */
  817. u32 ld_pi_exp:4;
  818. /* 0x34, LOGICAL BLOCKS PER PHYSICAL
  819. * BLOCK EXPONENT from READ CAPACITY 16
  820. */
  821. u32 ld_logical_block_exp:4;
  822. u32 reserved1:24; /* 0x34 */
  823. #else
  824. u32 reserved1:24; /* 0x34 */
  825. /* 0x34, LOGICAL BLOCKS PER PHYSICAL
  826. * BLOCK EXPONENT from READ CAPACITY 16
  827. */
  828. u32 ld_logical_block_exp:4;
  829. /* 0x34, P_I_EXPONENT from READ CAPACITY 16 */
  830. u32 ld_pi_exp:4;
  831. #endif
  832. }; /* 0x34 - 0x37 */
  833. /* 0x38 - 0x3f, This will determine which
  834. * core will process LD IO and PD IO.
  835. */
  836. struct MR_IO_AFFINITY cpuAffinity;
  837. /* Bit definiations are specified by MR_IO_AFFINITY */
  838. u8 reserved3[0x80 - 0x40]; /* 0x40 - 0x7f */
  839. };
  840. struct MR_LD_SPAN_MAP {
  841. struct MR_LD_RAID ldRaid;
  842. u8 dataArmMap[MAX_RAIDMAP_ROW_SIZE];
  843. struct MR_SPAN_BLOCK_INFO spanBlock[MAX_RAIDMAP_SPAN_DEPTH];
  844. };
  845. struct MR_FW_RAID_MAP {
  846. __le32 totalSize;
  847. union {
  848. struct {
  849. __le32 maxLd;
  850. __le32 maxSpanDepth;
  851. __le32 maxRowSize;
  852. __le32 maxPdCount;
  853. __le32 maxArrays;
  854. } validationInfo;
  855. __le32 version[5];
  856. };
  857. __le32 ldCount;
  858. __le32 Reserved1;
  859. u8 ldTgtIdToLd[MAX_RAIDMAP_LOGICAL_DRIVES+
  860. MAX_RAIDMAP_VIEWS];
  861. u8 fpPdIoTimeoutSec;
  862. u8 reserved2[7];
  863. struct MR_ARRAY_INFO arMapInfo[MAX_RAIDMAP_ARRAYS];
  864. struct MR_DEV_HANDLE_INFO devHndlInfo[MAX_RAIDMAP_PHYSICAL_DEVICES];
  865. struct MR_LD_SPAN_MAP ldSpanMap[];
  866. };
  867. struct IO_REQUEST_INFO {
  868. u64 ldStartBlock;
  869. u32 numBlocks;
  870. u16 ldTgtId;
  871. u8 isRead;
  872. __le16 devHandle;
  873. u8 pd_interface;
  874. u64 pdBlock;
  875. u8 fpOkForIo;
  876. u8 IoforUnevenSpan;
  877. u8 start_span;
  878. u8 do_fp_rlbypass;
  879. u64 start_row;
  880. u8 span_arm; /* span[7:5], arm[4:0] */
  881. u8 pd_after_lb;
  882. u16 r1_alt_dev_handle; /* raid 1/10 only */
  883. bool ra_capable;
  884. u8 data_arms;
  885. };
  886. struct MR_LD_TARGET_SYNC {
  887. u8 targetId;
  888. u8 reserved;
  889. __le16 seqNum;
  890. };
  891. /*
  892. * RAID Map descriptor Types.
  893. * Each element should uniquely idetify one data structure in the RAID map
  894. */
  895. enum MR_RAID_MAP_DESC_TYPE {
  896. /* MR_DEV_HANDLE_INFO data */
  897. RAID_MAP_DESC_TYPE_DEVHDL_INFO = 0x0,
  898. /* target to Ld num Index map */
  899. RAID_MAP_DESC_TYPE_TGTID_INFO = 0x1,
  900. /* MR_ARRAY_INFO data */
  901. RAID_MAP_DESC_TYPE_ARRAY_INFO = 0x2,
  902. /* MR_LD_SPAN_MAP data */
  903. RAID_MAP_DESC_TYPE_SPAN_INFO = 0x3,
  904. RAID_MAP_DESC_TYPE_COUNT,
  905. };
  906. /*
  907. * This table defines the offset, size and num elements of each descriptor
  908. * type in the RAID Map buffer
  909. */
  910. struct MR_RAID_MAP_DESC_TABLE {
  911. /* Raid map descriptor type */
  912. u32 raid_map_desc_type;
  913. /* Offset into the RAID map buffer where
  914. * descriptor data is saved
  915. */
  916. u32 raid_map_desc_offset;
  917. /* total size of the
  918. * descriptor buffer
  919. */
  920. u32 raid_map_desc_buffer_size;
  921. /* Number of elements contained in the
  922. * descriptor buffer
  923. */
  924. u32 raid_map_desc_elements;
  925. };
  926. /*
  927. * Dynamic Raid Map Structure.
  928. */
  929. struct MR_FW_RAID_MAP_DYNAMIC {
  930. u32 raid_map_size; /* total size of RAID Map structure */
  931. u32 desc_table_offset;/* Offset of desc table into RAID map*/
  932. u32 desc_table_size; /* Total Size of desc table */
  933. /* Total Number of elements in the desc table */
  934. u32 desc_table_num_elements;
  935. u64 reserved1;
  936. u32 reserved2[3]; /*future use */
  937. /* timeout value used by driver in FP IOs */
  938. u8 fp_pd_io_timeout_sec;
  939. u8 reserved3[3];
  940. /* when this seqNum increments, driver needs to
  941. * release RMW buffers asap
  942. */
  943. u32 rmw_fp_seq_num;
  944. u16 ld_count; /* count of lds. */
  945. u16 ar_count; /* count of arrays */
  946. u16 span_count; /* count of spans */
  947. u16 reserved4[3];
  948. /*
  949. * The below structure of pointers is only to be used by the driver.
  950. * This is added in the ,API to reduce the amount of code changes
  951. * needed in the driver to support dynamic RAID map Firmware should
  952. * not update these pointers while preparing the raid map
  953. */
  954. union {
  955. struct {
  956. struct MR_DEV_HANDLE_INFO *dev_hndl_info;
  957. u16 *ld_tgt_id_to_ld;
  958. struct MR_ARRAY_INFO *ar_map_info;
  959. struct MR_LD_SPAN_MAP *ld_span_map;
  960. };
  961. u64 ptr_structure_size[RAID_MAP_DESC_TYPE_COUNT];
  962. };
  963. /*
  964. * RAID Map descriptor table defines the layout of data in the RAID Map.
  965. * The size of the descriptor table itself could change.
  966. */
  967. /* Variable Size descriptor Table. */
  968. struct MR_RAID_MAP_DESC_TABLE
  969. raid_map_desc_table[RAID_MAP_DESC_TYPE_COUNT];
  970. /* Variable Size buffer containing all data */
  971. u32 raid_map_desc_data[];
  972. }; /* Dynamicaly sized RAID MAp structure */
  973. #define IEEE_SGE_FLAGS_ADDR_MASK (0x03)
  974. #define IEEE_SGE_FLAGS_SYSTEM_ADDR (0x00)
  975. #define IEEE_SGE_FLAGS_IOCDDR_ADDR (0x01)
  976. #define IEEE_SGE_FLAGS_IOCPLB_ADDR (0x02)
  977. #define IEEE_SGE_FLAGS_IOCPLBNTA_ADDR (0x03)
  978. #define IEEE_SGE_FLAGS_CHAIN_ELEMENT (0x80)
  979. #define IEEE_SGE_FLAGS_END_OF_LIST (0x40)
  980. #define MPI2_SGE_FLAGS_SHIFT (0x02)
  981. #define IEEE_SGE_FLAGS_FORMAT_MASK (0xC0)
  982. #define IEEE_SGE_FLAGS_FORMAT_IEEE (0x00)
  983. #define IEEE_SGE_FLAGS_FORMAT_NVME (0x02)
  984. #define MPI26_IEEE_SGE_FLAGS_NSF_MASK (0x1C)
  985. #define MPI26_IEEE_SGE_FLAGS_NSF_MPI_IEEE (0x00)
  986. #define MPI26_IEEE_SGE_FLAGS_NSF_NVME_PRP (0x08)
  987. #define MPI26_IEEE_SGE_FLAGS_NSF_NVME_SGL (0x10)
  988. #define MEGASAS_DEFAULT_SNAP_DUMP_WAIT_TIME 15
  989. #define MEGASAS_MAX_SNAP_DUMP_WAIT_TIME 60
  990. struct megasas_register_set;
  991. struct megasas_instance;
  992. union desc_word {
  993. u64 word;
  994. struct {
  995. u32 low;
  996. u32 high;
  997. } u;
  998. };
  999. struct megasas_cmd_fusion {
  1000. struct MPI2_RAID_SCSI_IO_REQUEST *io_request;
  1001. dma_addr_t io_request_phys_addr;
  1002. union MPI2_SGE_IO_UNION *sg_frame;
  1003. dma_addr_t sg_frame_phys_addr;
  1004. u8 *sense;
  1005. dma_addr_t sense_phys_addr;
  1006. struct list_head list;
  1007. struct scsi_cmnd *scmd;
  1008. struct megasas_instance *instance;
  1009. u8 retry_for_fw_reset;
  1010. union MEGASAS_REQUEST_DESCRIPTOR_UNION *request_desc;
  1011. /*
  1012. * Context for a MFI frame.
  1013. * Used to get the mfi cmd from list when a MFI cmd is completed
  1014. */
  1015. u32 sync_cmd_idx;
  1016. u32 index;
  1017. u8 pd_r1_lb;
  1018. struct completion done;
  1019. u8 pd_interface;
  1020. u16 r1_alt_dev_handle; /* raid 1/10 only*/
  1021. bool cmd_completed; /* raid 1/10 fp writes status holder */
  1022. };
  1023. struct LD_LOAD_BALANCE_INFO {
  1024. u8 loadBalanceFlag;
  1025. u8 reserved1;
  1026. atomic_t scsi_pending_cmds[MAX_PHYSICAL_DEVICES];
  1027. u64 last_accessed_block[MAX_PHYSICAL_DEVICES];
  1028. };
  1029. /* SPAN_SET is info caclulated from span info from Raid map per LD */
  1030. typedef struct _LD_SPAN_SET {
  1031. u64 log_start_lba;
  1032. u64 log_end_lba;
  1033. u64 span_row_start;
  1034. u64 span_row_end;
  1035. u64 data_strip_start;
  1036. u64 data_strip_end;
  1037. u64 data_row_start;
  1038. u64 data_row_end;
  1039. u8 strip_offset[MAX_SPAN_DEPTH];
  1040. u32 span_row_data_width;
  1041. u32 diff;
  1042. u32 reserved[2];
  1043. } LD_SPAN_SET, *PLD_SPAN_SET;
  1044. typedef struct LOG_BLOCK_SPAN_INFO {
  1045. LD_SPAN_SET span_set[MAX_SPAN_DEPTH];
  1046. } LD_SPAN_INFO, *PLD_SPAN_INFO;
  1047. struct MR_FW_RAID_MAP_ALL {
  1048. struct MR_FW_RAID_MAP raidMap;
  1049. struct MR_LD_SPAN_MAP ldSpanMap[MAX_LOGICAL_DRIVES];
  1050. } __attribute__ ((packed));
  1051. struct MR_DRV_RAID_MAP {
  1052. /* total size of this structure, including this field.
  1053. * This feild will be manupulated by driver for ext raid map,
  1054. * else pick the value from firmware raid map.
  1055. */
  1056. __le32 totalSize;
  1057. union {
  1058. struct {
  1059. __le32 maxLd;
  1060. __le32 maxSpanDepth;
  1061. __le32 maxRowSize;
  1062. __le32 maxPdCount;
  1063. __le32 maxArrays;
  1064. } validationInfo;
  1065. __le32 version[5];
  1066. };
  1067. /* timeout value used by driver in FP IOs*/
  1068. u8 fpPdIoTimeoutSec;
  1069. u8 reserved2[7];
  1070. __le16 ldCount;
  1071. __le16 arCount;
  1072. __le16 spanCount;
  1073. __le16 reserve3;
  1074. struct MR_DEV_HANDLE_INFO
  1075. devHndlInfo[MAX_RAIDMAP_PHYSICAL_DEVICES_DYN];
  1076. u16 ldTgtIdToLd[MAX_LOGICAL_DRIVES_DYN];
  1077. struct MR_ARRAY_INFO arMapInfo[MAX_API_ARRAYS_DYN];
  1078. struct MR_LD_SPAN_MAP ldSpanMap[];
  1079. };
  1080. /* Driver raid map size is same as raid map ext
  1081. * MR_DRV_RAID_MAP_ALL is created to sync with old raid.
  1082. * And it is mainly for code re-use purpose.
  1083. */
  1084. struct MR_DRV_RAID_MAP_ALL {
  1085. struct MR_DRV_RAID_MAP raidMap;
  1086. struct MR_LD_SPAN_MAP ldSpanMap[MAX_LOGICAL_DRIVES_DYN];
  1087. } __packed;
  1088. struct MR_FW_RAID_MAP_EXT {
  1089. /* Not usred in new map */
  1090. u32 reserved;
  1091. union {
  1092. struct {
  1093. u32 maxLd;
  1094. u32 maxSpanDepth;
  1095. u32 maxRowSize;
  1096. u32 maxPdCount;
  1097. u32 maxArrays;
  1098. } validationInfo;
  1099. u32 version[5];
  1100. };
  1101. u8 fpPdIoTimeoutSec;
  1102. u8 reserved2[7];
  1103. __le16 ldCount;
  1104. __le16 arCount;
  1105. __le16 spanCount;
  1106. __le16 reserve3;
  1107. struct MR_DEV_HANDLE_INFO devHndlInfo[MAX_RAIDMAP_PHYSICAL_DEVICES];
  1108. u8 ldTgtIdToLd[MAX_LOGICAL_DRIVES_EXT];
  1109. struct MR_ARRAY_INFO arMapInfo[MAX_API_ARRAYS_EXT];
  1110. struct MR_LD_SPAN_MAP ldSpanMap[MAX_LOGICAL_DRIVES_EXT];
  1111. };
  1112. /*
  1113. * * define MR_PD_CFG_SEQ structure for system PDs
  1114. * */
  1115. struct MR_PD_CFG_SEQ {
  1116. u16 seqNum;
  1117. u16 devHandle;
  1118. struct {
  1119. #if defined(__BIG_ENDIAN_BITFIELD)
  1120. u8 reserved:7;
  1121. u8 tmCapable:1;
  1122. #else
  1123. u8 tmCapable:1;
  1124. u8 reserved:7;
  1125. #endif
  1126. } capability;
  1127. u8 reserved;
  1128. u16 pd_target_id;
  1129. } __packed;
  1130. struct MR_PD_CFG_SEQ_NUM_SYNC {
  1131. __le32 size;
  1132. __le32 count;
  1133. struct MR_PD_CFG_SEQ seq[];
  1134. } __packed;
  1135. /* stream detection */
  1136. struct STREAM_DETECT {
  1137. u64 next_seq_lba; /* next LBA to match sequential access */
  1138. struct megasas_cmd_fusion *first_cmd_fusion; /* first cmd in group */
  1139. struct megasas_cmd_fusion *last_cmd_fusion; /* last cmd in group */
  1140. u32 count_cmds_in_stream; /* count of host commands in this stream */
  1141. u16 num_sges_in_group; /* total number of SGEs in grouped IOs */
  1142. u8 is_read; /* SCSI OpCode for this stream */
  1143. u8 group_depth; /* total number of host commands in group */
  1144. /* TRUE if cannot add any more commands to this group */
  1145. bool group_flush;
  1146. u8 reserved[7]; /* pad to 64-bit alignment */
  1147. };
  1148. struct LD_STREAM_DETECT {
  1149. bool write_back; /* TRUE if WB, FALSE if WT */
  1150. bool fp_write_enabled;
  1151. bool members_ssds;
  1152. bool fp_cache_bypass_capable;
  1153. u32 mru_bit_map; /* bitmap used to track MRU and LRU stream indicies */
  1154. /* this is the array of stream detect structures (one per stream) */
  1155. struct STREAM_DETECT stream_track[MAX_STREAMS_TRACKED];
  1156. };
  1157. struct MPI2_IOC_INIT_RDPQ_ARRAY_ENTRY {
  1158. u64 RDPQBaseAddress;
  1159. u32 Reserved1;
  1160. u32 Reserved2;
  1161. };
  1162. struct rdpq_alloc_detail {
  1163. struct dma_pool *dma_pool_ptr;
  1164. dma_addr_t pool_entry_phys;
  1165. union MPI2_REPLY_DESCRIPTORS_UNION *pool_entry_virt;
  1166. };
  1167. struct fusion_context {
  1168. struct megasas_cmd_fusion **cmd_list;
  1169. dma_addr_t req_frames_desc_phys;
  1170. u8 *req_frames_desc;
  1171. struct dma_pool *io_request_frames_pool;
  1172. dma_addr_t io_request_frames_phys;
  1173. u8 *io_request_frames;
  1174. struct dma_pool *sg_dma_pool;
  1175. struct dma_pool *sense_dma_pool;
  1176. u8 *sense;
  1177. dma_addr_t sense_phys_addr;
  1178. atomic_t busy_mq_poll[MAX_MSIX_QUEUES_FUSION];
  1179. dma_addr_t reply_frames_desc_phys[MAX_MSIX_QUEUES_FUSION];
  1180. union MPI2_REPLY_DESCRIPTORS_UNION *reply_frames_desc[MAX_MSIX_QUEUES_FUSION];
  1181. struct rdpq_alloc_detail rdpq_tracker[RDPQ_MAX_CHUNK_COUNT];
  1182. struct dma_pool *reply_frames_desc_pool;
  1183. struct dma_pool *reply_frames_desc_pool_align;
  1184. u16 last_reply_idx[MAX_MSIX_QUEUES_FUSION];
  1185. u32 reply_q_depth;
  1186. u32 request_alloc_sz;
  1187. u32 reply_alloc_sz;
  1188. u32 io_frames_alloc_sz;
  1189. struct MPI2_IOC_INIT_RDPQ_ARRAY_ENTRY *rdpq_virt;
  1190. dma_addr_t rdpq_phys;
  1191. u16 max_sge_in_main_msg;
  1192. u16 max_sge_in_chain;
  1193. u8 chain_offset_io_request;
  1194. u8 chain_offset_mfi_pthru;
  1195. struct MR_FW_RAID_MAP_DYNAMIC *ld_map[2];
  1196. dma_addr_t ld_map_phys[2];
  1197. /*Non dma-able memory. Driver local copy.*/
  1198. struct MR_DRV_RAID_MAP_ALL *ld_drv_map[2];
  1199. u32 max_map_sz;
  1200. u32 current_map_sz;
  1201. u32 old_map_sz;
  1202. u32 new_map_sz;
  1203. u32 drv_map_sz;
  1204. u32 drv_map_pages;
  1205. struct MR_PD_CFG_SEQ_NUM_SYNC *pd_seq_sync[JBOD_MAPS_COUNT];
  1206. dma_addr_t pd_seq_phys[JBOD_MAPS_COUNT];
  1207. u8 fast_path_io;
  1208. struct LD_LOAD_BALANCE_INFO *load_balance_info;
  1209. u32 load_balance_info_pages;
  1210. LD_SPAN_INFO *log_to_span;
  1211. u32 log_to_span_pages;
  1212. struct LD_STREAM_DETECT **stream_detect_by_ld;
  1213. dma_addr_t ioc_init_request_phys;
  1214. struct MPI2_IOC_INIT_REQUEST *ioc_init_request;
  1215. struct megasas_cmd *ioc_init_cmd;
  1216. bool pcie_bw_limitation;
  1217. bool r56_div_offload;
  1218. };
  1219. union desc_value {
  1220. __le64 word;
  1221. struct {
  1222. __le32 low;
  1223. __le32 high;
  1224. } u;
  1225. };
  1226. enum CMD_RET_VALUES {
  1227. REFIRE_CMD = 1,
  1228. COMPLETE_CMD = 2,
  1229. RETURN_CMD = 3,
  1230. };
  1231. struct MR_SNAPDUMP_PROPERTIES {
  1232. u8 offload_num;
  1233. u8 max_num_supported;
  1234. u8 cur_num_supported;
  1235. u8 trigger_min_num_sec_before_ocr;
  1236. u8 reserved[12];
  1237. };
  1238. struct megasas_debugfs_buffer {
  1239. void *buf;
  1240. u32 len;
  1241. };
  1242. void megasas_free_cmds_fusion(struct megasas_instance *instance);
  1243. int megasas_ioc_init_fusion(struct megasas_instance *instance);
  1244. u8 megasas_get_map_info(struct megasas_instance *instance);
  1245. int megasas_sync_map_info(struct megasas_instance *instance);
  1246. void megasas_release_fusion(struct megasas_instance *instance);
  1247. void megasas_reset_reply_desc(struct megasas_instance *instance);
  1248. int megasas_check_mpio_paths(struct megasas_instance *instance,
  1249. struct scsi_cmnd *scmd);
  1250. void megasas_fusion_ocr_wq(struct work_struct *work);
  1251. #endif /* _MEGARAID_SAS_FUSION_H_ */