nitrox_csr.h 39 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. #ifndef __NITROX_CSR_H
  3. #define __NITROX_CSR_H
  4. #include <asm/byteorder.h>
  5. #include <linux/types.h>
  6. /* EMU clusters */
  7. #define NR_CLUSTERS 4
  8. /* Maximum cores per cluster,
  9. * varies based on partname
  10. */
  11. #define AE_CORES_PER_CLUSTER 20
  12. #define SE_CORES_PER_CLUSTER 16
  13. #define AE_MAX_CORES (AE_CORES_PER_CLUSTER * NR_CLUSTERS)
  14. #define SE_MAX_CORES (SE_CORES_PER_CLUSTER * NR_CLUSTERS)
  15. #define ZIP_MAX_CORES 5
  16. /* BIST registers */
  17. #define EMU_BIST_STATUSX(_i) (0x1402700 + ((_i) * 0x40000))
  18. #define UCD_BIST_STATUS 0x12C0070
  19. #define NPS_CORE_BIST_REG 0x10000E8
  20. #define NPS_CORE_NPC_BIST_REG 0x1000128
  21. #define NPS_PKT_SLC_BIST_REG 0x1040088
  22. #define NPS_PKT_IN_BIST_REG 0x1040100
  23. #define POM_BIST_REG 0x11C0100
  24. #define BMI_BIST_REG 0x1140080
  25. #define EFL_CORE_BIST_REGX(_i) (0x1240100 + ((_i) * 0x400))
  26. #define EFL_TOP_BIST_STAT 0x1241090
  27. #define BMO_BIST_REG 0x1180080
  28. #define LBC_BIST_STATUS 0x1200020
  29. #define PEM_BIST_STATUSX(_i) (0x1080468 | ((_i) << 18))
  30. /* EMU registers */
  31. #define EMU_SE_ENABLEX(_i) (0x1400000 + ((_i) * 0x40000))
  32. #define EMU_AE_ENABLEX(_i) (0x1400008 + ((_i) * 0x40000))
  33. #define EMU_WD_INT_ENA_W1SX(_i) (0x1402318 + ((_i) * 0x40000))
  34. #define EMU_GE_INT_ENA_W1SX(_i) (0x1402518 + ((_i) * 0x40000))
  35. #define EMU_FUSE_MAPX(_i) (0x1402708 + ((_i) * 0x40000))
  36. /* UCD registers */
  37. #define UCD_SE_EID_UCODE_BLOCK_NUMX(_i) (0x12C0000 + ((_i) * 0x1000))
  38. #define UCD_AE_EID_UCODE_BLOCK_NUMX(_i) (0x12C0008 + ((_i) * 0x800))
  39. #define UCD_UCODE_LOAD_BLOCK_NUM 0x12C0010
  40. #define UCD_UCODE_LOAD_IDX_DATAX(_i) (0x12C0018 + ((_i) * 0x20))
  41. #define UCD_SE_CNTX(_i) (0x12C0040 + ((_i) * 0x1000))
  42. #define UCD_AE_CNTX(_i) (0x12C0048 + ((_i) * 0x800))
  43. /* AQM registers */
  44. #define AQM_CTL 0x1300000
  45. #define AQM_INT 0x1300008
  46. #define AQM_DBELL_OVF_LO 0x1300010
  47. #define AQM_DBELL_OVF_HI 0x1300018
  48. #define AQM_DBELL_OVF_LO_W1S 0x1300020
  49. #define AQM_DBELL_OVF_LO_ENA_W1C 0x1300028
  50. #define AQM_DBELL_OVF_LO_ENA_W1S 0x1300030
  51. #define AQM_DBELL_OVF_HI_W1S 0x1300038
  52. #define AQM_DBELL_OVF_HI_ENA_W1C 0x1300040
  53. #define AQM_DBELL_OVF_HI_ENA_W1S 0x1300048
  54. #define AQM_DMA_RD_ERR_LO 0x1300050
  55. #define AQM_DMA_RD_ERR_HI 0x1300058
  56. #define AQM_DMA_RD_ERR_LO_W1S 0x1300060
  57. #define AQM_DMA_RD_ERR_LO_ENA_W1C 0x1300068
  58. #define AQM_DMA_RD_ERR_LO_ENA_W1S 0x1300070
  59. #define AQM_DMA_RD_ERR_HI_W1S 0x1300078
  60. #define AQM_DMA_RD_ERR_HI_ENA_W1C 0x1300080
  61. #define AQM_DMA_RD_ERR_HI_ENA_W1S 0x1300088
  62. #define AQM_EXEC_NA_LO 0x1300090
  63. #define AQM_EXEC_NA_HI 0x1300098
  64. #define AQM_EXEC_NA_LO_W1S 0x13000A0
  65. #define AQM_EXEC_NA_LO_ENA_W1C 0x13000A8
  66. #define AQM_EXEC_NA_LO_ENA_W1S 0x13000B0
  67. #define AQM_EXEC_NA_HI_W1S 0x13000B8
  68. #define AQM_EXEC_NA_HI_ENA_W1C 0x13000C0
  69. #define AQM_EXEC_NA_HI_ENA_W1S 0x13000C8
  70. #define AQM_EXEC_ERR_LO 0x13000D0
  71. #define AQM_EXEC_ERR_HI 0x13000D8
  72. #define AQM_EXEC_ERR_LO_W1S 0x13000E0
  73. #define AQM_EXEC_ERR_LO_ENA_W1C 0x13000E8
  74. #define AQM_EXEC_ERR_LO_ENA_W1S 0x13000F0
  75. #define AQM_EXEC_ERR_HI_W1S 0x13000F8
  76. #define AQM_EXEC_ERR_HI_ENA_W1C 0x1300100
  77. #define AQM_EXEC_ERR_HI_ENA_W1S 0x1300108
  78. #define AQM_ECC_INT 0x1300110
  79. #define AQM_ECC_INT_W1S 0x1300118
  80. #define AQM_ECC_INT_ENA_W1C 0x1300120
  81. #define AQM_ECC_INT_ENA_W1S 0x1300128
  82. #define AQM_ECC_CTL 0x1300130
  83. #define AQM_BIST_STATUS 0x1300138
  84. #define AQM_CMD_INF_THRX(x) (0x1300400 + ((x) * 0x8))
  85. #define AQM_CMD_INFX(x) (0x1300800 + ((x) * 0x8))
  86. #define AQM_GRP_EXECMSK_LOX(x) (0x1300C00 + ((x) * 0x10))
  87. #define AQM_GRP_EXECMSK_HIX(x) (0x1300C08 + ((x) * 0x10))
  88. #define AQM_ACTIVITY_STAT_LO 0x1300C80
  89. #define AQM_ACTIVITY_STAT_HI 0x1300C88
  90. #define AQM_Q_CMD_PROCX(x) (0x1301000 + ((x) * 0x8))
  91. #define AQM_PERF_CTL_LO 0x1301400
  92. #define AQM_PERF_CTL_HI 0x1301408
  93. #define AQM_PERF_CNT 0x1301410
  94. #define AQMQ_DRBLX(x) (0x20000 + ((x) * 0x40000))
  95. #define AQMQ_QSZX(x) (0x20008 + ((x) * 0x40000))
  96. #define AQMQ_BADRX(x) (0x20010 + ((x) * 0x40000))
  97. #define AQMQ_NXT_CMDX(x) (0x20018 + ((x) * 0x40000))
  98. #define AQMQ_CMD_CNTX(x) (0x20020 + ((x) * 0x40000))
  99. #define AQMQ_CMP_THRX(x) (0x20028 + ((x) * 0x40000))
  100. #define AQMQ_CMP_CNTX(x) (0x20030 + ((x) * 0x40000))
  101. #define AQMQ_TIM_LDX(x) (0x20038 + ((x) * 0x40000))
  102. #define AQMQ_TIMERX(x) (0x20040 + ((x) * 0x40000))
  103. #define AQMQ_ENX(x) (0x20048 + ((x) * 0x40000))
  104. #define AQMQ_ACTIVITY_STATX(x) (0x20050 + ((x) * 0x40000))
  105. #define AQM_VF_CMP_STATX(x) (0x28000 + ((x) * 0x40000))
  106. /* NPS core registers */
  107. #define NPS_CORE_GBL_VFCFG 0x1000000
  108. #define NPS_CORE_CONTROL 0x1000008
  109. #define NPS_CORE_INT_ACTIVE 0x1000080
  110. #define NPS_CORE_INT 0x10000A0
  111. #define NPS_CORE_INT_ENA_W1S 0x10000B8
  112. #define NPS_STATS_PKT_DMA_RD_CNT 0x1000180
  113. #define NPS_STATS_PKT_DMA_WR_CNT 0x1000190
  114. /* NPS packet registers */
  115. #define NPS_PKT_INT 0x1040018
  116. #define NPS_PKT_MBOX_INT_LO 0x1040020
  117. #define NPS_PKT_MBOX_INT_LO_ENA_W1C 0x1040030
  118. #define NPS_PKT_MBOX_INT_LO_ENA_W1S 0x1040038
  119. #define NPS_PKT_MBOX_INT_HI 0x1040040
  120. #define NPS_PKT_MBOX_INT_HI_ENA_W1C 0x1040050
  121. #define NPS_PKT_MBOX_INT_HI_ENA_W1S 0x1040058
  122. #define NPS_PKT_IN_RERR_HI 0x1040108
  123. #define NPS_PKT_IN_RERR_HI_ENA_W1S 0x1040120
  124. #define NPS_PKT_IN_RERR_LO 0x1040128
  125. #define NPS_PKT_IN_RERR_LO_ENA_W1S 0x1040140
  126. #define NPS_PKT_IN_ERR_TYPE 0x1040148
  127. #define NPS_PKT_IN_ERR_TYPE_ENA_W1S 0x1040160
  128. #define NPS_PKT_IN_INSTR_CTLX(_i) (0x10060 + ((_i) * 0x40000))
  129. #define NPS_PKT_IN_INSTR_BADDRX(_i) (0x10068 + ((_i) * 0x40000))
  130. #define NPS_PKT_IN_INSTR_RSIZEX(_i) (0x10070 + ((_i) * 0x40000))
  131. #define NPS_PKT_IN_DONE_CNTSX(_i) (0x10080 + ((_i) * 0x40000))
  132. #define NPS_PKT_IN_INSTR_BAOFF_DBELLX(_i) (0x10078 + ((_i) * 0x40000))
  133. #define NPS_PKT_IN_INT_LEVELSX(_i) (0x10088 + ((_i) * 0x40000))
  134. #define NPS_PKT_SLC_RERR_HI 0x1040208
  135. #define NPS_PKT_SLC_RERR_HI_ENA_W1S 0x1040220
  136. #define NPS_PKT_SLC_RERR_LO 0x1040228
  137. #define NPS_PKT_SLC_RERR_LO_ENA_W1S 0x1040240
  138. #define NPS_PKT_SLC_ERR_TYPE 0x1040248
  139. #define NPS_PKT_SLC_ERR_TYPE_ENA_W1S 0x1040260
  140. /* Mailbox PF->VF PF Accessible Data registers */
  141. #define NPS_PKT_MBOX_PF_VF_PFDATAX(_i) (0x1040800 + ((_i) * 0x8))
  142. #define NPS_PKT_MBOX_VF_PF_PFDATAX(_i) (0x1040C00 + ((_i) * 0x8))
  143. #define NPS_PKT_SLC_CTLX(_i) (0x10000 + ((_i) * 0x40000))
  144. #define NPS_PKT_SLC_CNTSX(_i) (0x10008 + ((_i) * 0x40000))
  145. #define NPS_PKT_SLC_INT_LEVELSX(_i) (0x10010 + ((_i) * 0x40000))
  146. /* POM registers */
  147. #define POM_INT_ENA_W1S 0x11C0018
  148. #define POM_GRP_EXECMASKX(_i) (0x11C1100 | ((_i) * 8))
  149. #define POM_INT 0x11C0000
  150. #define POM_PERF_CTL 0x11CC400
  151. /* BMI registers */
  152. #define BMI_INT 0x1140000
  153. #define BMI_CTL 0x1140020
  154. #define BMI_INT_ENA_W1S 0x1140018
  155. #define BMI_NPS_PKT_CNT 0x1140070
  156. /* EFL registers */
  157. #define EFL_CORE_INT_ENA_W1SX(_i) (0x1240018 + ((_i) * 0x400))
  158. #define EFL_CORE_VF_ERR_INT0X(_i) (0x1240050 + ((_i) * 0x400))
  159. #define EFL_CORE_VF_ERR_INT0_ENA_W1SX(_i) (0x1240068 + ((_i) * 0x400))
  160. #define EFL_CORE_VF_ERR_INT1X(_i) (0x1240070 + ((_i) * 0x400))
  161. #define EFL_CORE_VF_ERR_INT1_ENA_W1SX(_i) (0x1240088 + ((_i) * 0x400))
  162. #define EFL_CORE_SE_ERR_INTX(_i) (0x12400A0 + ((_i) * 0x400))
  163. #define EFL_RNM_CTL_STATUS 0x1241800
  164. #define EFL_CORE_INTX(_i) (0x1240000 + ((_i) * 0x400))
  165. /* BMO registers */
  166. #define BMO_CTL2 0x1180028
  167. #define BMO_NPS_SLC_PKT_CNT 0x1180078
  168. /* LBC registers */
  169. #define LBC_INT 0x1200000
  170. #define LBC_INVAL_CTL 0x1201010
  171. #define LBC_PLM_VF1_64_INT 0x1202008
  172. #define LBC_INVAL_STATUS 0x1202010
  173. #define LBC_INT_ENA_W1S 0x1203000
  174. #define LBC_PLM_VF1_64_INT_ENA_W1S 0x1205008
  175. #define LBC_PLM_VF65_128_INT 0x1206008
  176. #define LBC_ELM_VF1_64_INT 0x1208000
  177. #define LBC_PLM_VF65_128_INT_ENA_W1S 0x1209008
  178. #define LBC_ELM_VF1_64_INT_ENA_W1S 0x120B000
  179. #define LBC_ELM_VF65_128_INT 0x120C000
  180. #define LBC_ELM_VF65_128_INT_ENA_W1S 0x120F000
  181. #define RST_BOOT 0x10C1600
  182. #define FUS_DAT1 0x10C1408
  183. /* PEM registers */
  184. #define PEM0_INT 0x1080428
  185. /**
  186. * struct ucd_core_eid_ucode_block_num - Core Eid to Ucode Blk Mapping Registers
  187. * @ucode_len: Ucode length identifier 32KB or 64KB
  188. * @ucode_blk: Ucode Block Number
  189. */
  190. union ucd_core_eid_ucode_block_num {
  191. u64 value;
  192. struct {
  193. #if (defined(__BIG_ENDIAN_BITFIELD))
  194. u64 raz_4_63 : 60;
  195. u64 ucode_len : 1;
  196. u64 ucode_blk : 3;
  197. #else
  198. u64 ucode_blk : 3;
  199. u64 ucode_len : 1;
  200. u64 raz_4_63 : 60;
  201. #endif
  202. };
  203. };
  204. /**
  205. * struct aqm_grp_execmsk_lo - Available AE engines for the group
  206. * @exec_0_to_39: AE engines 0 to 39 status
  207. */
  208. union aqm_grp_execmsk_lo {
  209. u64 value;
  210. struct {
  211. #if (defined(__BIG_ENDIAN_BITFIELD))
  212. u64 raz_40_63 : 24;
  213. u64 exec_0_to_39 : 40;
  214. #else
  215. u64 exec_0_to_39 : 40;
  216. u64 raz_40_63 : 24;
  217. #endif
  218. };
  219. };
  220. /**
  221. * struct aqm_grp_execmsk_hi - Available AE engines for the group
  222. * @exec_40_to_79: AE engines 40 to 79 status
  223. */
  224. union aqm_grp_execmsk_hi {
  225. u64 value;
  226. struct {
  227. #if (defined(__BIG_ENDIAN_BITFIELD))
  228. u64 raz_40_63 : 24;
  229. u64 exec_40_to_79 : 40;
  230. #else
  231. u64 exec_40_to_79 : 40;
  232. u64 raz_40_63 : 24;
  233. #endif
  234. };
  235. };
  236. /**
  237. * struct aqmq_drbl - AQM Queue Doorbell Counter Registers
  238. * @dbell_count: Doorbell Counter
  239. */
  240. union aqmq_drbl {
  241. u64 value;
  242. struct {
  243. #if (defined(__BIG_ENDIAN_BITFIELD))
  244. u64 raz_32_63 : 32;
  245. u64 dbell_count : 32;
  246. #else
  247. u64 dbell_count : 32;
  248. u64 raz_32_63 : 32;
  249. #endif
  250. };
  251. };
  252. /**
  253. * struct aqmq_qsz - AQM Queue Host Queue Size Registers
  254. * @host_queue_size: Size, in numbers of 'aqmq_command_s' command
  255. * of the Host Ring.
  256. */
  257. union aqmq_qsz {
  258. u64 value;
  259. struct {
  260. #if (defined(__BIG_ENDIAN_BITFIELD))
  261. u64 raz_32_63 : 32;
  262. u64 host_queue_size : 32;
  263. #else
  264. u64 host_queue_size : 32;
  265. u64 raz_32_63 : 32;
  266. #endif
  267. };
  268. };
  269. /**
  270. * struct aqmq_cmp_thr - AQM Queue Commands Completed Threshold Registers
  271. * @commands_completed_threshold: Count of 'aqmq_command_s' commands executed
  272. * by AE engines for which completion interrupt is asserted.
  273. */
  274. union aqmq_cmp_thr {
  275. u64 value;
  276. struct {
  277. #if (defined(__BIG_ENDIAN_BITFIELD))
  278. u64 raz_32_63 : 32;
  279. u64 commands_completed_threshold : 32;
  280. #else
  281. u64 commands_completed_threshold : 32;
  282. u64 raz_32_63 : 32;
  283. #endif
  284. };
  285. };
  286. /**
  287. * struct aqmq_cmp_cnt - AQM Queue Commands Completed Count Registers
  288. * @resend: Bit to request completion interrupt Resend.
  289. * @completion_status: Command completion status of the ring.
  290. * @commands_completed_count: Count of 'aqmq_command_s' commands executed by
  291. * AE engines.
  292. */
  293. union aqmq_cmp_cnt {
  294. u64 value;
  295. struct {
  296. #if (defined(__BIG_ENDIAN_BITFIELD))
  297. u64 raz_34_63 : 30;
  298. u64 resend : 1;
  299. u64 completion_status : 1;
  300. u64 commands_completed_count : 32;
  301. #else
  302. u64 commands_completed_count : 32;
  303. u64 completion_status : 1;
  304. u64 resend : 1;
  305. u64 raz_34_63 : 30;
  306. #endif
  307. };
  308. };
  309. /**
  310. * struct aqmq_en - AQM Queue Enable Registers
  311. * @queue_status: 1 = AQMQ is enabled, 0 = AQMQ is disabled
  312. */
  313. union aqmq_en {
  314. u64 value;
  315. struct {
  316. #if (defined(__BIG_ENDIAN_BITFIELD))
  317. u64 raz_1_63 : 63;
  318. u64 queue_enable : 1;
  319. #else
  320. u64 queue_enable : 1;
  321. u64 raz_1_63 : 63;
  322. #endif
  323. };
  324. };
  325. /**
  326. * struct aqmq_activity_stat - AQM Queue Activity Status Registers
  327. * @queue_active: 1 = AQMQ is active, 0 = AQMQ is quiescent
  328. */
  329. union aqmq_activity_stat {
  330. u64 value;
  331. struct {
  332. #if (defined(__BIG_ENDIAN_BITFIELD))
  333. u64 raz_1_63 : 63;
  334. u64 queue_active : 1;
  335. #else
  336. u64 queue_active : 1;
  337. u64 raz_1_63 : 63;
  338. #endif
  339. };
  340. };
  341. /**
  342. * struct emu_fuse_map - EMU Fuse Map Registers
  343. * @ae_fuse: Fuse settings for AE 19..0
  344. * @se_fuse: Fuse settings for SE 15..0
  345. *
  346. * A set bit indicates the unit is fuse disabled.
  347. */
  348. union emu_fuse_map {
  349. u64 value;
  350. struct {
  351. #if (defined(__BIG_ENDIAN_BITFIELD))
  352. u64 valid : 1;
  353. u64 raz_52_62 : 11;
  354. u64 ae_fuse : 20;
  355. u64 raz_16_31 : 16;
  356. u64 se_fuse : 16;
  357. #else
  358. u64 se_fuse : 16;
  359. u64 raz_16_31 : 16;
  360. u64 ae_fuse : 20;
  361. u64 raz_52_62 : 11;
  362. u64 valid : 1;
  363. #endif
  364. } s;
  365. };
  366. /**
  367. * struct emu_se_enable - Symmetric Engine Enable Registers
  368. * @enable: Individual enables for each of the clusters
  369. * 16 symmetric engines.
  370. */
  371. union emu_se_enable {
  372. u64 value;
  373. struct {
  374. #if (defined(__BIG_ENDIAN_BITFIELD))
  375. u64 raz : 48;
  376. u64 enable : 16;
  377. #else
  378. u64 enable : 16;
  379. u64 raz : 48;
  380. #endif
  381. } s;
  382. };
  383. /**
  384. * struct emu_ae_enable - EMU Asymmetric engines.
  385. * @enable: Individual enables for each of the cluster's
  386. * 20 Asymmetric Engines.
  387. */
  388. union emu_ae_enable {
  389. u64 value;
  390. struct {
  391. #if (defined(__BIG_ENDIAN_BITFIELD))
  392. u64 raz : 44;
  393. u64 enable : 20;
  394. #else
  395. u64 enable : 20;
  396. u64 raz : 44;
  397. #endif
  398. } s;
  399. };
  400. /**
  401. * struct emu_wd_int_ena_w1s - EMU Interrupt Enable Registers
  402. * @ae_wd: Reads or sets enable for EMU(0..3)_WD_INT[AE_WD]
  403. * @se_wd: Reads or sets enable for EMU(0..3)_WD_INT[SE_WD]
  404. */
  405. union emu_wd_int_ena_w1s {
  406. u64 value;
  407. struct {
  408. #if (defined(__BIG_ENDIAN_BITFIELD))
  409. u64 raz2 : 12;
  410. u64 ae_wd : 20;
  411. u64 raz1 : 16;
  412. u64 se_wd : 16;
  413. #else
  414. u64 se_wd : 16;
  415. u64 raz1 : 16;
  416. u64 ae_wd : 20;
  417. u64 raz2 : 12;
  418. #endif
  419. } s;
  420. };
  421. /**
  422. * struct emu_ge_int_ena_w1s - EMU Interrupt Enable set registers
  423. * @ae_ge: Reads or sets enable for EMU(0..3)_GE_INT[AE_GE]
  424. * @se_ge: Reads or sets enable for EMU(0..3)_GE_INT[SE_GE]
  425. */
  426. union emu_ge_int_ena_w1s {
  427. u64 value;
  428. struct {
  429. #if (defined(__BIG_ENDIAN_BITFIELD))
  430. u64 raz_52_63 : 12;
  431. u64 ae_ge : 20;
  432. u64 raz_16_31: 16;
  433. u64 se_ge : 16;
  434. #else
  435. u64 se_ge : 16;
  436. u64 raz_16_31: 16;
  437. u64 ae_ge : 20;
  438. u64 raz_52_63 : 12;
  439. #endif
  440. } s;
  441. };
  442. /**
  443. * struct nps_pkt_slc_ctl - Solicited Packet Out Control Registers
  444. * @rh: Indicates whether to remove or include the response header
  445. * 1 = Include, 0 = Remove
  446. * @z: If set, 8 trailing 0x00 bytes will be added to the end of the
  447. * outgoing packet.
  448. * @enb: Enable for this port.
  449. */
  450. union nps_pkt_slc_ctl {
  451. u64 value;
  452. struct {
  453. #if defined(__BIG_ENDIAN_BITFIELD)
  454. u64 raz : 61;
  455. u64 rh : 1;
  456. u64 z : 1;
  457. u64 enb : 1;
  458. #else
  459. u64 enb : 1;
  460. u64 z : 1;
  461. u64 rh : 1;
  462. u64 raz : 61;
  463. #endif
  464. } s;
  465. };
  466. /**
  467. * struct nps_pkt_slc_cnts - Solicited Packet Out Count Registers
  468. * @slc_int: Returns a 1 when:
  469. * NPS_PKT_SLC(i)_CNTS[CNT] > NPS_PKT_SLC(i)_INT_LEVELS[CNT], or
  470. * NPS_PKT_SLC(i)_CNTS[TIMER] > NPS_PKT_SLC(i)_INT_LEVELS[TIMET].
  471. * To clear the bit, the CNTS register must be written to clear.
  472. * @in_int: Returns a 1 when:
  473. * NPS_PKT_IN(i)_DONE_CNTS[CNT] > NPS_PKT_IN(i)_INT_LEVELS[CNT].
  474. * To clear the bit, the DONE_CNTS register must be written to clear.
  475. * @mbox_int: Returns a 1 when:
  476. * NPS_PKT_MBOX_PF_VF(i)_INT[INTR] is set. To clear the bit,
  477. * write NPS_PKT_MBOX_PF_VF(i)_INT[INTR] with 1.
  478. * @timer: Timer, incremented every 2048 coprocessor clock cycles
  479. * when [CNT] is not zero. The hardware clears both [TIMER] and
  480. * [INT] when [CNT] goes to 0.
  481. * @cnt: Packet counter. Hardware adds to [CNT] as it sends packets out.
  482. * On a write to this CSR, hardware subtracts the amount written to the
  483. * [CNT] field from [CNT].
  484. */
  485. union nps_pkt_slc_cnts {
  486. u64 value;
  487. struct {
  488. #if defined(__BIG_ENDIAN_BITFIELD)
  489. u64 slc_int : 1;
  490. u64 uns_int : 1;
  491. u64 in_int : 1;
  492. u64 mbox_int : 1;
  493. u64 resend : 1;
  494. u64 raz : 5;
  495. u64 timer : 22;
  496. u64 cnt : 32;
  497. #else
  498. u64 cnt : 32;
  499. u64 timer : 22;
  500. u64 raz : 5;
  501. u64 resend : 1;
  502. u64 mbox_int : 1;
  503. u64 in_int : 1;
  504. u64 uns_int : 1;
  505. u64 slc_int : 1;
  506. #endif
  507. } s;
  508. };
  509. /**
  510. * struct nps_pkt_slc_int_levels - Solicited Packet Out Interrupt Levels
  511. * Registers.
  512. * @bmode: Determines whether NPS_PKT_SLC_CNTS[CNT] is a byte or
  513. * packet counter.
  514. * @timet: Output port counter time interrupt threshold.
  515. * @cnt: Output port counter interrupt threshold.
  516. */
  517. union nps_pkt_slc_int_levels {
  518. u64 value;
  519. struct {
  520. #if defined(__BIG_ENDIAN_BITFIELD)
  521. u64 bmode : 1;
  522. u64 raz : 9;
  523. u64 timet : 22;
  524. u64 cnt : 32;
  525. #else
  526. u64 cnt : 32;
  527. u64 timet : 22;
  528. u64 raz : 9;
  529. u64 bmode : 1;
  530. #endif
  531. } s;
  532. };
  533. /**
  534. * struct nps_pkt_inst - NPS Packet Interrupt Register
  535. * @in_err: Set when any NPS_PKT_IN_RERR_HI/LO bit and
  536. * corresponding NPS_PKT_IN_RERR_*_ENA_* bit are bot set.
  537. * @uns_err: Set when any NSP_PKT_UNS_RERR_HI/LO bit and
  538. * corresponding NPS_PKT_UNS_RERR_*_ENA_* bit are both set.
  539. * @slc_er: Set when any NSP_PKT_SLC_RERR_HI/LO bit and
  540. * corresponding NPS_PKT_SLC_RERR_*_ENA_* bit are both set.
  541. */
  542. union nps_pkt_int {
  543. u64 value;
  544. struct {
  545. #if defined(__BIG_ENDIAN_BITFIELD)
  546. u64 raz : 54;
  547. u64 uns_wto : 1;
  548. u64 in_err : 1;
  549. u64 uns_err : 1;
  550. u64 slc_err : 1;
  551. u64 in_dbe : 1;
  552. u64 in_sbe : 1;
  553. u64 uns_dbe : 1;
  554. u64 uns_sbe : 1;
  555. u64 slc_dbe : 1;
  556. u64 slc_sbe : 1;
  557. #else
  558. u64 slc_sbe : 1;
  559. u64 slc_dbe : 1;
  560. u64 uns_sbe : 1;
  561. u64 uns_dbe : 1;
  562. u64 in_sbe : 1;
  563. u64 in_dbe : 1;
  564. u64 slc_err : 1;
  565. u64 uns_err : 1;
  566. u64 in_err : 1;
  567. u64 uns_wto : 1;
  568. u64 raz : 54;
  569. #endif
  570. } s;
  571. };
  572. /**
  573. * struct nps_pkt_in_done_cnts - Input instruction ring counts registers
  574. * @slc_cnt: Returns a 1 when:
  575. * NPS_PKT_SLC(i)_CNTS[CNT] > NPS_PKT_SLC(i)_INT_LEVELS[CNT], or
  576. * NPS_PKT_SLC(i)_CNTS[TIMER] > NPS_PKT_SCL(i)_INT_LEVELS[TIMET]
  577. * To clear the bit, the CNTS register must be
  578. * written to clear the underlying condition
  579. * @uns_int: Return a 1 when:
  580. * NPS_PKT_UNS(i)_CNTS[CNT] > NPS_PKT_UNS(i)_INT_LEVELS[CNT], or
  581. * NPS_PKT_UNS(i)_CNTS[TIMER] > NPS_PKT_UNS(i)_INT_LEVELS[TIMET]
  582. * To clear the bit, the CNTS register must be
  583. * written to clear the underlying condition
  584. * @in_int: Returns a 1 when:
  585. * NPS_PKT_IN(i)_DONE_CNTS[CNT] > NPS_PKT_IN(i)_INT_LEVELS[CNT]
  586. * To clear the bit, the DONE_CNTS register
  587. * must be written to clear the underlying condition
  588. * @mbox_int: Returns a 1 when:
  589. * NPS_PKT_MBOX_PF_VF(i)_INT[INTR] is set.
  590. * To clear the bit, write NPS_PKT_MBOX_PF_VF(i)_INT[INTR]
  591. * with 1.
  592. * @resend: A write of 1 will resend an MSI-X interrupt message if any
  593. * of the following conditions are true for this ring "i".
  594. * NPS_PKT_SLC(i)_CNTS[CNT] > NPS_PKT_SLC(i)_INT_LEVELS[CNT]
  595. * NPS_PKT_SLC(i)_CNTS[TIMER] > NPS_PKT_SLC(i)_INT_LEVELS[TIMET]
  596. * NPS_PKT_UNS(i)_CNTS[CNT] > NPS_PKT_UNS(i)_INT_LEVELS[CNT]
  597. * NPS_PKT_UNS(i)_CNTS[TIMER] > NPS_PKT_UNS(i)_INT_LEVELS[TIMET]
  598. * NPS_PKT_IN(i)_DONE_CNTS[CNT] > NPS_PKT_IN(i)_INT_LEVELS[CNT]
  599. * NPS_PKT_MBOX_PF_VF(i)_INT[INTR] is set
  600. * @cnt: Packet counter. Hardware adds to [CNT] as it reads
  601. * packets. On a write to this CSR, hardware substracts the
  602. * amount written to the [CNT] field from [CNT], which will
  603. * clear PKT_IN(i)_INT_STATUS[INTR] if [CNT] becomes <=
  604. * NPS_PKT_IN(i)_INT_LEVELS[CNT]. This register should be
  605. * cleared before enabling a ring by reading the current
  606. * value and writing it back.
  607. */
  608. union nps_pkt_in_done_cnts {
  609. u64 value;
  610. struct {
  611. #if defined(__BIG_ENDIAN_BITFIELD)
  612. u64 slc_int : 1;
  613. u64 uns_int : 1;
  614. u64 in_int : 1;
  615. u64 mbox_int : 1;
  616. u64 resend : 1;
  617. u64 raz : 27;
  618. u64 cnt : 32;
  619. #else
  620. u64 cnt : 32;
  621. u64 raz : 27;
  622. u64 resend : 1;
  623. u64 mbox_int : 1;
  624. u64 in_int : 1;
  625. u64 uns_int : 1;
  626. u64 slc_int : 1;
  627. #endif
  628. } s;
  629. };
  630. /**
  631. * struct nps_pkt_in_instr_ctl - Input Instruction Ring Control Registers.
  632. * @is64b: If 1, the ring uses 64-byte instructions. If 0, the
  633. * ring uses 32-byte instructions.
  634. * @enb: Enable for the input ring.
  635. */
  636. union nps_pkt_in_instr_ctl {
  637. u64 value;
  638. struct {
  639. #if (defined(__BIG_ENDIAN_BITFIELD))
  640. u64 raz : 62;
  641. u64 is64b : 1;
  642. u64 enb : 1;
  643. #else
  644. u64 enb : 1;
  645. u64 is64b : 1;
  646. u64 raz : 62;
  647. #endif
  648. } s;
  649. };
  650. /**
  651. * struct nps_pkt_in_instr_rsize - Input instruction ring size registers
  652. * @rsize: Ring size (number of instructions)
  653. */
  654. union nps_pkt_in_instr_rsize {
  655. u64 value;
  656. struct {
  657. #if (defined(__BIG_ENDIAN_BITFIELD))
  658. u64 raz : 32;
  659. u64 rsize : 32;
  660. #else
  661. u64 rsize : 32;
  662. u64 raz : 32;
  663. #endif
  664. } s;
  665. };
  666. /**
  667. * struct nps_pkt_in_instr_baoff_dbell - Input instruction ring
  668. * base address offset and doorbell registers
  669. * @aoff: Address offset. The offset from the NPS_PKT_IN_INSTR_BADDR
  670. * where the next pointer is read.
  671. * @dbell: Pointer list doorbell count. Write operations to this field
  672. * increments the present value here. Read operations return the
  673. * present value.
  674. */
  675. union nps_pkt_in_instr_baoff_dbell {
  676. u64 value;
  677. struct {
  678. #if (defined(__BIG_ENDIAN_BITFIELD))
  679. u64 aoff : 32;
  680. u64 dbell : 32;
  681. #else
  682. u64 dbell : 32;
  683. u64 aoff : 32;
  684. #endif
  685. } s;
  686. };
  687. /**
  688. * struct nps_core_int_ena_w1s - NPS core interrupt enable set register
  689. * @host_nps_wr_err: Reads or sets enable for
  690. * NPS_CORE_INT[HOST_NPS_WR_ERR].
  691. * @npco_dma_malform: Reads or sets enable for
  692. * NPS_CORE_INT[NPCO_DMA_MALFORM].
  693. * @exec_wr_timeout: Reads or sets enable for
  694. * NPS_CORE_INT[EXEC_WR_TIMEOUT].
  695. * @host_wr_timeout: Reads or sets enable for
  696. * NPS_CORE_INT[HOST_WR_TIMEOUT].
  697. * @host_wr_err: Reads or sets enable for
  698. * NPS_CORE_INT[HOST_WR_ERR]
  699. */
  700. union nps_core_int_ena_w1s {
  701. u64 value;
  702. struct {
  703. #if (defined(__BIG_ENDIAN_BITFIELD))
  704. u64 raz4 : 55;
  705. u64 host_nps_wr_err : 1;
  706. u64 npco_dma_malform : 1;
  707. u64 exec_wr_timeout : 1;
  708. u64 host_wr_timeout : 1;
  709. u64 host_wr_err : 1;
  710. u64 raz3 : 1;
  711. u64 raz2 : 1;
  712. u64 raz1 : 1;
  713. u64 raz0 : 1;
  714. #else
  715. u64 raz0 : 1;
  716. u64 raz1 : 1;
  717. u64 raz2 : 1;
  718. u64 raz3 : 1;
  719. u64 host_wr_err : 1;
  720. u64 host_wr_timeout : 1;
  721. u64 exec_wr_timeout : 1;
  722. u64 npco_dma_malform : 1;
  723. u64 host_nps_wr_err : 1;
  724. u64 raz4 : 55;
  725. #endif
  726. } s;
  727. };
  728. /**
  729. * struct nps_core_gbl_vfcfg - Global VF Configuration Register.
  730. * @ilk_disable: When set, this bit indicates that the ILK interface has
  731. * been disabled.
  732. * @obaf: BMO allocation control
  733. * 0 = allocate per queue
  734. * 1 = allocate per VF
  735. * @ibaf: BMI allocation control
  736. * 0 = allocate per queue
  737. * 1 = allocate per VF
  738. * @zaf: ZIP allocation control
  739. * 0 = allocate per queue
  740. * 1 = allocate per VF
  741. * @aeaf: AE allocation control
  742. * 0 = allocate per queue
  743. * 1 = allocate per VF
  744. * @seaf: SE allocation control
  745. * 0 = allocation per queue
  746. * 1 = allocate per VF
  747. * @cfg: VF/PF mode.
  748. */
  749. union nps_core_gbl_vfcfg {
  750. u64 value;
  751. struct {
  752. #if (defined(__BIG_ENDIAN_BITFIELD))
  753. u64 raz :55;
  754. u64 ilk_disable :1;
  755. u64 obaf :1;
  756. u64 ibaf :1;
  757. u64 zaf :1;
  758. u64 aeaf :1;
  759. u64 seaf :1;
  760. u64 cfg :3;
  761. #else
  762. u64 cfg :3;
  763. u64 seaf :1;
  764. u64 aeaf :1;
  765. u64 zaf :1;
  766. u64 ibaf :1;
  767. u64 obaf :1;
  768. u64 ilk_disable :1;
  769. u64 raz :55;
  770. #endif
  771. } s;
  772. };
  773. /**
  774. * struct nps_core_int_active - NPS Core Interrupt Active Register
  775. * @resend: Resend MSI-X interrupt if needs to handle interrupts
  776. * Sofware can set this bit and then exit the ISR.
  777. * @ocla: Set when any OCLA(0)_INT and corresponding OCLA(0_INT_ENA_W1C
  778. * bit are set
  779. * @mbox: Set when any NPS_PKT_MBOX_INT_LO/HI and corresponding
  780. * NPS_PKT_MBOX_INT_LO_ENA_W1C/HI_ENA_W1C bits are set
  781. * @emu: bit i is set in [EMU] when any EMU(i)_INT bit is set
  782. * @bmo: Set when any BMO_INT bit is set
  783. * @bmi: Set when any BMI_INT bit is set or when any non-RO
  784. * BMI_INT and corresponding BMI_INT_ENA_W1C bits are both set
  785. * @aqm: Set when any AQM_INT bit is set
  786. * @zqm: Set when any ZQM_INT bit is set
  787. * @efl: Set when any EFL_INT RO bit is set or when any non-RO EFL_INT
  788. * and corresponding EFL_INT_ENA_W1C bits are both set
  789. * @ilk: Set when any ILK_INT bit is set
  790. * @lbc: Set when any LBC_INT RO bit is set or when any non-RO LBC_INT
  791. * and corresponding LBC_INT_ENA_W1C bits are bot set
  792. * @pem: Set when any PEM(0)_INT RO bit is set or when any non-RO
  793. * PEM(0)_INT and corresponding PEM(0)_INT_ENA_W1C bit are both set
  794. * @ucd: Set when any UCD_INT bit is set
  795. * @zctl: Set when any ZIP_INT RO bit is set or when any non-RO ZIP_INT
  796. * and corresponding ZIP_INT_ENA_W1C bits are both set
  797. * @lbm: Set when any LBM_INT bit is set
  798. * @nps_pkt: Set when any NPS_PKT_INT bit is set
  799. * @nps_core: Set when any NPS_CORE_INT RO bit is set or when non-RO
  800. * NPS_CORE_INT and corresponding NSP_CORE_INT_ENA_W1C bits are both set
  801. */
  802. union nps_core_int_active {
  803. u64 value;
  804. struct {
  805. #if (defined(__BIG_ENDIAN_BITFIELD))
  806. u64 resend : 1;
  807. u64 raz : 43;
  808. u64 ocla : 1;
  809. u64 mbox : 1;
  810. u64 emu : 4;
  811. u64 bmo : 1;
  812. u64 bmi : 1;
  813. u64 aqm : 1;
  814. u64 zqm : 1;
  815. u64 efl : 1;
  816. u64 ilk : 1;
  817. u64 lbc : 1;
  818. u64 pem : 1;
  819. u64 pom : 1;
  820. u64 ucd : 1;
  821. u64 zctl : 1;
  822. u64 lbm : 1;
  823. u64 nps_pkt : 1;
  824. u64 nps_core : 1;
  825. #else
  826. u64 nps_core : 1;
  827. u64 nps_pkt : 1;
  828. u64 lbm : 1;
  829. u64 zctl: 1;
  830. u64 ucd : 1;
  831. u64 pom : 1;
  832. u64 pem : 1;
  833. u64 lbc : 1;
  834. u64 ilk : 1;
  835. u64 efl : 1;
  836. u64 zqm : 1;
  837. u64 aqm : 1;
  838. u64 bmi : 1;
  839. u64 bmo : 1;
  840. u64 emu : 4;
  841. u64 mbox : 1;
  842. u64 ocla : 1;
  843. u64 raz : 43;
  844. u64 resend : 1;
  845. #endif
  846. } s;
  847. };
  848. /**
  849. * struct efl_core_int - EFL Interrupt Registers
  850. * @epci_decode_err: EPCI decoded a transacation that was unknown
  851. * This error should only occurred when there is a micrcode/SE error
  852. * and should be considered fatal
  853. * @ae_err: An AE uncorrectable error occurred.
  854. * See EFL_CORE(0..3)_AE_ERR_INT
  855. * @se_err: An SE uncorrectable error occurred.
  856. * See EFL_CORE(0..3)_SE_ERR_INT
  857. * @dbe: Double-bit error occurred in EFL
  858. * @sbe: Single-bit error occurred in EFL
  859. * @d_left: Asserted when new POM-Header-BMI-data is
  860. * being sent to an Exec, and that Exec has Not read all BMI
  861. * data associated with the previous POM header
  862. * @len_ovr: Asserted when an Exec-Read is issued that is more than
  863. * 14 greater in length that the BMI data left to be read
  864. */
  865. union efl_core_int {
  866. u64 value;
  867. struct {
  868. #if (defined(__BIG_ENDIAN_BITFIELD))
  869. u64 raz : 57;
  870. u64 epci_decode_err : 1;
  871. u64 ae_err : 1;
  872. u64 se_err : 1;
  873. u64 dbe : 1;
  874. u64 sbe : 1;
  875. u64 d_left : 1;
  876. u64 len_ovr : 1;
  877. #else
  878. u64 len_ovr : 1;
  879. u64 d_left : 1;
  880. u64 sbe : 1;
  881. u64 dbe : 1;
  882. u64 se_err : 1;
  883. u64 ae_err : 1;
  884. u64 epci_decode_err : 1;
  885. u64 raz : 57;
  886. #endif
  887. } s;
  888. };
  889. /**
  890. * struct efl_core_int_ena_w1s - EFL core interrupt enable set register
  891. * @epci_decode_err: Reads or sets enable for
  892. * EFL_CORE(0..3)_INT[EPCI_DECODE_ERR].
  893. * @d_left: Reads or sets enable for
  894. * EFL_CORE(0..3)_INT[D_LEFT].
  895. * @len_ovr: Reads or sets enable for
  896. * EFL_CORE(0..3)_INT[LEN_OVR].
  897. */
  898. union efl_core_int_ena_w1s {
  899. u64 value;
  900. struct {
  901. #if (defined(__BIG_ENDIAN_BITFIELD))
  902. u64 raz_7_63 : 57;
  903. u64 epci_decode_err : 1;
  904. u64 raz_2_5 : 4;
  905. u64 d_left : 1;
  906. u64 len_ovr : 1;
  907. #else
  908. u64 len_ovr : 1;
  909. u64 d_left : 1;
  910. u64 raz_2_5 : 4;
  911. u64 epci_decode_err : 1;
  912. u64 raz_7_63 : 57;
  913. #endif
  914. } s;
  915. };
  916. /**
  917. * struct efl_rnm_ctl_status - RNM Control and Status Register
  918. * @ent_sel: Select input to RNM FIFO
  919. * @exp_ent: Exported entropy enable for random number generator
  920. * @rng_rst: Reset to RNG. Setting this bit to 1 cancels the generation
  921. * of the current random number.
  922. * @rnm_rst: Reset the RNM. Setting this bit to 1 clears all sorted numbers
  923. * in the random number memory.
  924. * @rng_en: Enabled the output of the RNG.
  925. * @ent_en: Entropy enable for random number generator.
  926. */
  927. union efl_rnm_ctl_status {
  928. u64 value;
  929. struct {
  930. #if (defined(__BIG_ENDIAN_BITFIELD))
  931. u64 raz_9_63 : 55;
  932. u64 ent_sel : 4;
  933. u64 exp_ent : 1;
  934. u64 rng_rst : 1;
  935. u64 rnm_rst : 1;
  936. u64 rng_en : 1;
  937. u64 ent_en : 1;
  938. #else
  939. u64 ent_en : 1;
  940. u64 rng_en : 1;
  941. u64 rnm_rst : 1;
  942. u64 rng_rst : 1;
  943. u64 exp_ent : 1;
  944. u64 ent_sel : 4;
  945. u64 raz_9_63 : 55;
  946. #endif
  947. } s;
  948. };
  949. /**
  950. * struct bmi_ctl - BMI control register
  951. * @ilk_hdrq_thrsh: Maximum number of header queue locations
  952. * that ILK packets may consume. When the threshold is
  953. * exceeded ILK_XOFF is sent to the BMI_X2P_ARB.
  954. * @nps_hdrq_thrsh: Maximum number of header queue locations
  955. * that NPS packets may consume. When the threshold is
  956. * exceeded NPS_XOFF is sent to the BMI_X2P_ARB.
  957. * @totl_hdrq_thrsh: Maximum number of header queue locations
  958. * that the sum of ILK and NPS packets may consume.
  959. * @ilk_free_thrsh: Maximum number of buffers that ILK packet
  960. * flows may consume before ILK_XOFF is sent to the BMI_X2P_ARB.
  961. * @nps_free_thrsh: Maximum number of buffers that NPS packet
  962. * flows may consume before NPS XOFF is sent to the BMI_X2p_ARB.
  963. * @totl_free_thrsh: Maximum number of buffers that bot ILK and NPS
  964. * packet flows may consume before both NPS_XOFF and ILK_XOFF
  965. * are asserted to the BMI_X2P_ARB.
  966. * @max_pkt_len: Maximum packet length, integral number of 256B
  967. * buffers.
  968. */
  969. union bmi_ctl {
  970. u64 value;
  971. struct {
  972. #if (defined(__BIG_ENDIAN_BITFIELD))
  973. u64 raz_56_63 : 8;
  974. u64 ilk_hdrq_thrsh : 8;
  975. u64 nps_hdrq_thrsh : 8;
  976. u64 totl_hdrq_thrsh : 8;
  977. u64 ilk_free_thrsh : 8;
  978. u64 nps_free_thrsh : 8;
  979. u64 totl_free_thrsh : 8;
  980. u64 max_pkt_len : 8;
  981. #else
  982. u64 max_pkt_len : 8;
  983. u64 totl_free_thrsh : 8;
  984. u64 nps_free_thrsh : 8;
  985. u64 ilk_free_thrsh : 8;
  986. u64 totl_hdrq_thrsh : 8;
  987. u64 nps_hdrq_thrsh : 8;
  988. u64 ilk_hdrq_thrsh : 8;
  989. u64 raz_56_63 : 8;
  990. #endif
  991. } s;
  992. };
  993. /**
  994. * struct bmi_int_ena_w1s - BMI interrupt enable set register
  995. * @ilk_req_oflw: Reads or sets enable for
  996. * BMI_INT[ILK_REQ_OFLW].
  997. * @nps_req_oflw: Reads or sets enable for
  998. * BMI_INT[NPS_REQ_OFLW].
  999. * @fpf_undrrn: Reads or sets enable for
  1000. * BMI_INT[FPF_UNDRRN].
  1001. * @eop_err_ilk: Reads or sets enable for
  1002. * BMI_INT[EOP_ERR_ILK].
  1003. * @eop_err_nps: Reads or sets enable for
  1004. * BMI_INT[EOP_ERR_NPS].
  1005. * @sop_err_ilk: Reads or sets enable for
  1006. * BMI_INT[SOP_ERR_ILK].
  1007. * @sop_err_nps: Reads or sets enable for
  1008. * BMI_INT[SOP_ERR_NPS].
  1009. * @pkt_rcv_err_ilk: Reads or sets enable for
  1010. * BMI_INT[PKT_RCV_ERR_ILK].
  1011. * @pkt_rcv_err_nps: Reads or sets enable for
  1012. * BMI_INT[PKT_RCV_ERR_NPS].
  1013. * @max_len_err_ilk: Reads or sets enable for
  1014. * BMI_INT[MAX_LEN_ERR_ILK].
  1015. * @max_len_err_nps: Reads or sets enable for
  1016. * BMI_INT[MAX_LEN_ERR_NPS].
  1017. */
  1018. union bmi_int_ena_w1s {
  1019. u64 value;
  1020. struct {
  1021. #if (defined(__BIG_ENDIAN_BITFIELD))
  1022. u64 raz_13_63 : 51;
  1023. u64 ilk_req_oflw : 1;
  1024. u64 nps_req_oflw : 1;
  1025. u64 raz_10 : 1;
  1026. u64 raz_9 : 1;
  1027. u64 fpf_undrrn : 1;
  1028. u64 eop_err_ilk : 1;
  1029. u64 eop_err_nps : 1;
  1030. u64 sop_err_ilk : 1;
  1031. u64 sop_err_nps : 1;
  1032. u64 pkt_rcv_err_ilk : 1;
  1033. u64 pkt_rcv_err_nps : 1;
  1034. u64 max_len_err_ilk : 1;
  1035. u64 max_len_err_nps : 1;
  1036. #else
  1037. u64 max_len_err_nps : 1;
  1038. u64 max_len_err_ilk : 1;
  1039. u64 pkt_rcv_err_nps : 1;
  1040. u64 pkt_rcv_err_ilk : 1;
  1041. u64 sop_err_nps : 1;
  1042. u64 sop_err_ilk : 1;
  1043. u64 eop_err_nps : 1;
  1044. u64 eop_err_ilk : 1;
  1045. u64 fpf_undrrn : 1;
  1046. u64 raz_9 : 1;
  1047. u64 raz_10 : 1;
  1048. u64 nps_req_oflw : 1;
  1049. u64 ilk_req_oflw : 1;
  1050. u64 raz_13_63 : 51;
  1051. #endif
  1052. } s;
  1053. };
  1054. /**
  1055. * struct bmo_ctl2 - BMO Control2 Register
  1056. * @arb_sel: Determines P2X Arbitration
  1057. * @ilk_buf_thrsh: Maximum number of buffers that the
  1058. * ILK packet flows may consume before ILK XOFF is
  1059. * asserted to the POM.
  1060. * @nps_slc_buf_thrsh: Maximum number of buffers that the
  1061. * NPS_SLC packet flow may consume before NPS_SLC XOFF is
  1062. * asserted to the POM.
  1063. * @nps_uns_buf_thrsh: Maximum number of buffers that the
  1064. * NPS_UNS packet flow may consume before NPS_UNS XOFF is
  1065. * asserted to the POM.
  1066. * @totl_buf_thrsh: Maximum number of buffers that ILK, NPS_UNS and
  1067. * NPS_SLC packet flows may consume before NPS_UNS XOFF, NSP_SLC and
  1068. * ILK_XOFF are all asserted POM.
  1069. */
  1070. union bmo_ctl2 {
  1071. u64 value;
  1072. struct {
  1073. #if (defined(__BIG_ENDIAN_BITFIELD))
  1074. u64 arb_sel : 1;
  1075. u64 raz_32_62 : 31;
  1076. u64 ilk_buf_thrsh : 8;
  1077. u64 nps_slc_buf_thrsh : 8;
  1078. u64 nps_uns_buf_thrsh : 8;
  1079. u64 totl_buf_thrsh : 8;
  1080. #else
  1081. u64 totl_buf_thrsh : 8;
  1082. u64 nps_uns_buf_thrsh : 8;
  1083. u64 nps_slc_buf_thrsh : 8;
  1084. u64 ilk_buf_thrsh : 8;
  1085. u64 raz_32_62 : 31;
  1086. u64 arb_sel : 1;
  1087. #endif
  1088. } s;
  1089. };
  1090. /**
  1091. * struct pom_int_ena_w1s - POM interrupt enable set register
  1092. * @illegal_intf: Reads or sets enable for POM_INT[ILLEGAL_INTF].
  1093. * @illegal_dport: Reads or sets enable for POM_INT[ILLEGAL_DPORT].
  1094. */
  1095. union pom_int_ena_w1s {
  1096. u64 value;
  1097. struct {
  1098. #if (defined(__BIG_ENDIAN_BITFIELD))
  1099. u64 raz2 : 60;
  1100. u64 illegal_intf : 1;
  1101. u64 illegal_dport : 1;
  1102. u64 raz1 : 1;
  1103. u64 raz0 : 1;
  1104. #else
  1105. u64 raz0 : 1;
  1106. u64 raz1 : 1;
  1107. u64 illegal_dport : 1;
  1108. u64 illegal_intf : 1;
  1109. u64 raz2 : 60;
  1110. #endif
  1111. } s;
  1112. };
  1113. /**
  1114. * struct lbc_inval_ctl - LBC invalidation control register
  1115. * @wait_timer: Wait timer for wait state. [WAIT_TIMER] must
  1116. * always be written with its reset value.
  1117. * @cam_inval_start: Software should write [CAM_INVAL_START]=1
  1118. * to initiate an LBC cache invalidation. After this, software
  1119. * should read LBC_INVAL_STATUS until LBC_INVAL_STATUS[DONE] is set.
  1120. * LBC hardware clears [CAVM_INVAL_START] before software can
  1121. * observed LBC_INVAL_STATUS[DONE] to be set
  1122. */
  1123. union lbc_inval_ctl {
  1124. u64 value;
  1125. struct {
  1126. #if (defined(__BIG_ENDIAN_BITFIELD))
  1127. u64 raz2 : 48;
  1128. u64 wait_timer : 8;
  1129. u64 raz1 : 6;
  1130. u64 cam_inval_start : 1;
  1131. u64 raz0 : 1;
  1132. #else
  1133. u64 raz0 : 1;
  1134. u64 cam_inval_start : 1;
  1135. u64 raz1 : 6;
  1136. u64 wait_timer : 8;
  1137. u64 raz2 : 48;
  1138. #endif
  1139. } s;
  1140. };
  1141. /**
  1142. * struct lbc_int_ena_w1s - LBC interrupt enable set register
  1143. * @cam_hard_err: Reads or sets enable for LBC_INT[CAM_HARD_ERR].
  1144. * @cam_inval_abort: Reads or sets enable for LBC_INT[CAM_INVAL_ABORT].
  1145. * @over_fetch_err: Reads or sets enable for LBC_INT[OVER_FETCH_ERR].
  1146. * @cache_line_to_err: Reads or sets enable for
  1147. * LBC_INT[CACHE_LINE_TO_ERR].
  1148. * @cam_soft_err: Reads or sets enable for
  1149. * LBC_INT[CAM_SOFT_ERR].
  1150. * @dma_rd_err: Reads or sets enable for
  1151. * LBC_INT[DMA_RD_ERR].
  1152. */
  1153. union lbc_int_ena_w1s {
  1154. u64 value;
  1155. struct {
  1156. #if (defined(__BIG_ENDIAN_BITFIELD))
  1157. u64 raz_10_63 : 54;
  1158. u64 cam_hard_err : 1;
  1159. u64 cam_inval_abort : 1;
  1160. u64 over_fetch_err : 1;
  1161. u64 cache_line_to_err : 1;
  1162. u64 raz_2_5 : 4;
  1163. u64 cam_soft_err : 1;
  1164. u64 dma_rd_err : 1;
  1165. #else
  1166. u64 dma_rd_err : 1;
  1167. u64 cam_soft_err : 1;
  1168. u64 raz_2_5 : 4;
  1169. u64 cache_line_to_err : 1;
  1170. u64 over_fetch_err : 1;
  1171. u64 cam_inval_abort : 1;
  1172. u64 cam_hard_err : 1;
  1173. u64 raz_10_63 : 54;
  1174. #endif
  1175. } s;
  1176. };
  1177. /**
  1178. * struct lbc_int - LBC interrupt summary register
  1179. * @cam_hard_err: indicates a fatal hardware error.
  1180. * It requires system reset.
  1181. * When [CAM_HARD_ERR] is set, LBC stops logging any new information in
  1182. * LBC_POM_MISS_INFO_LOG,
  1183. * LBC_POM_MISS_ADDR_LOG,
  1184. * LBC_EFL_MISS_INFO_LOG, and
  1185. * LBC_EFL_MISS_ADDR_LOG.
  1186. * Software should sample them.
  1187. * @cam_inval_abort: indicates a fatal hardware error.
  1188. * System reset is required.
  1189. * @over_fetch_err: indicates a fatal hardware error
  1190. * System reset is required
  1191. * @cache_line_to_err: is a debug feature.
  1192. * This timeout interrupt bit tells the software that
  1193. * a cacheline in LBC has non-zero usage and the context
  1194. * has not been used for greater than the
  1195. * LBC_TO_CNT[TO_CNT] time interval.
  1196. * @sbe: Memory SBE error. This is recoverable via ECC.
  1197. * See LBC_ECC_INT for more details.
  1198. * @dbe: Memory DBE error. This is a fatal and requires a
  1199. * system reset.
  1200. * @pref_dat_len_mismatch_err: Summary bit for context length
  1201. * mismatch errors.
  1202. * @rd_dat_len_mismatch_err: Summary bit for SE read data length
  1203. * greater than data prefect length errors.
  1204. * @cam_soft_err: is recoverable. Software must complete a
  1205. * LBC_INVAL_CTL[CAM_INVAL_START] invalidation sequence and
  1206. * then clear [CAM_SOFT_ERR].
  1207. * @dma_rd_err: A context prefect read of host memory returned with
  1208. * a read error.
  1209. */
  1210. union lbc_int {
  1211. u64 value;
  1212. struct {
  1213. #if (defined(__BIG_ENDIAN_BITFIELD))
  1214. u64 raz_10_63 : 54;
  1215. u64 cam_hard_err : 1;
  1216. u64 cam_inval_abort : 1;
  1217. u64 over_fetch_err : 1;
  1218. u64 cache_line_to_err : 1;
  1219. u64 sbe : 1;
  1220. u64 dbe : 1;
  1221. u64 pref_dat_len_mismatch_err : 1;
  1222. u64 rd_dat_len_mismatch_err : 1;
  1223. u64 cam_soft_err : 1;
  1224. u64 dma_rd_err : 1;
  1225. #else
  1226. u64 dma_rd_err : 1;
  1227. u64 cam_soft_err : 1;
  1228. u64 rd_dat_len_mismatch_err : 1;
  1229. u64 pref_dat_len_mismatch_err : 1;
  1230. u64 dbe : 1;
  1231. u64 sbe : 1;
  1232. u64 cache_line_to_err : 1;
  1233. u64 over_fetch_err : 1;
  1234. u64 cam_inval_abort : 1;
  1235. u64 cam_hard_err : 1;
  1236. u64 raz_10_63 : 54;
  1237. #endif
  1238. } s;
  1239. };
  1240. /**
  1241. * struct lbc_inval_status: LBC Invalidation status register
  1242. * @cam_clean_entry_complete_cnt: The number of entries that are
  1243. * cleaned up successfully.
  1244. * @cam_clean_entry_cnt: The number of entries that have the CAM
  1245. * inval command issued.
  1246. * @cam_inval_state: cam invalidation FSM state
  1247. * @cam_inval_abort: cam invalidation abort
  1248. * @cam_rst_rdy: lbc_cam reset ready
  1249. * @done: LBC clears [DONE] when
  1250. * LBC_INVAL_CTL[CAM_INVAL_START] is written with a one,
  1251. * and sets [DONE] when it completes the invalidation
  1252. * sequence.
  1253. */
  1254. union lbc_inval_status {
  1255. u64 value;
  1256. struct {
  1257. #if (defined(__BIG_ENDIAN_BITFIELD))
  1258. u64 raz3 : 23;
  1259. u64 cam_clean_entry_complete_cnt : 9;
  1260. u64 raz2 : 7;
  1261. u64 cam_clean_entry_cnt : 9;
  1262. u64 raz1 : 5;
  1263. u64 cam_inval_state : 3;
  1264. u64 raz0 : 5;
  1265. u64 cam_inval_abort : 1;
  1266. u64 cam_rst_rdy : 1;
  1267. u64 done : 1;
  1268. #else
  1269. u64 done : 1;
  1270. u64 cam_rst_rdy : 1;
  1271. u64 cam_inval_abort : 1;
  1272. u64 raz0 : 5;
  1273. u64 cam_inval_state : 3;
  1274. u64 raz1 : 5;
  1275. u64 cam_clean_entry_cnt : 9;
  1276. u64 raz2 : 7;
  1277. u64 cam_clean_entry_complete_cnt : 9;
  1278. u64 raz3 : 23;
  1279. #endif
  1280. } s;
  1281. };
  1282. /**
  1283. * struct rst_boot: RST Boot Register
  1284. * @jtcsrdis: when set, internal CSR access via JTAG TAP controller
  1285. * is disabled
  1286. * @jt_tst_mode: JTAG test mode
  1287. * @io_supply: I/O power supply setting based on IO_VDD_SELECT pin:
  1288. * 0x1 = 1.8V
  1289. * 0x2 = 2.5V
  1290. * 0x4 = 3.3V
  1291. * All other values are reserved
  1292. * @pnr_mul: clock multiplier
  1293. * @lboot: last boot cause mask, resets only with PLL_DC_OK
  1294. * @rboot: determines whether core 0 remains in reset after
  1295. * chip cold or warm or soft reset
  1296. * @rboot_pin: read only access to REMOTE_BOOT pin
  1297. */
  1298. union rst_boot {
  1299. u64 value;
  1300. struct {
  1301. #if (defined(__BIG_ENDIAN_BITFIELD))
  1302. u64 raz_63 : 1;
  1303. u64 jtcsrdis : 1;
  1304. u64 raz_59_61 : 3;
  1305. u64 jt_tst_mode : 1;
  1306. u64 raz_40_57 : 18;
  1307. u64 io_supply : 3;
  1308. u64 raz_30_36 : 7;
  1309. u64 pnr_mul : 6;
  1310. u64 raz_12_23 : 12;
  1311. u64 lboot : 10;
  1312. u64 rboot : 1;
  1313. u64 rboot_pin : 1;
  1314. #else
  1315. u64 rboot_pin : 1;
  1316. u64 rboot : 1;
  1317. u64 lboot : 10;
  1318. u64 raz_12_23 : 12;
  1319. u64 pnr_mul : 6;
  1320. u64 raz_30_36 : 7;
  1321. u64 io_supply : 3;
  1322. u64 raz_40_57 : 18;
  1323. u64 jt_tst_mode : 1;
  1324. u64 raz_59_61 : 3;
  1325. u64 jtcsrdis : 1;
  1326. u64 raz_63 : 1;
  1327. #endif
  1328. };
  1329. };
  1330. /**
  1331. * struct fus_dat1: Fuse Data 1 Register
  1332. * @pll_mul: main clock PLL multiplier hardware limit
  1333. * @pll_half_dis: main clock PLL control
  1334. * @efus_lck: efuse lockdown
  1335. * @zip_info: ZIP information
  1336. * @bar2_sz_conf: when zero, BAR2 size conforms to
  1337. * PCIe specification
  1338. * @efus_ign: efuse ignore
  1339. * @nozip: ZIP disable
  1340. * @pll_alt_matrix: select alternate PLL matrix
  1341. * @pll_bwadj_denom: select CLKF denominator for
  1342. * BWADJ value
  1343. * @chip_id: chip ID
  1344. */
  1345. union fus_dat1 {
  1346. u64 value;
  1347. struct {
  1348. #if (defined(__BIG_ENDIAN_BITFIELD))
  1349. u64 raz_57_63 : 7;
  1350. u64 pll_mul : 3;
  1351. u64 pll_half_dis : 1;
  1352. u64 raz_43_52 : 10;
  1353. u64 efus_lck : 3;
  1354. u64 raz_26_39 : 14;
  1355. u64 zip_info : 5;
  1356. u64 bar2_sz_conf : 1;
  1357. u64 efus_ign : 1;
  1358. u64 nozip : 1;
  1359. u64 raz_11_17 : 7;
  1360. u64 pll_alt_matrix : 1;
  1361. u64 pll_bwadj_denom : 2;
  1362. u64 chip_id : 8;
  1363. #else
  1364. u64 chip_id : 8;
  1365. u64 pll_bwadj_denom : 2;
  1366. u64 pll_alt_matrix : 1;
  1367. u64 raz_11_17 : 7;
  1368. u64 nozip : 1;
  1369. u64 efus_ign : 1;
  1370. u64 bar2_sz_conf : 1;
  1371. u64 zip_info : 5;
  1372. u64 raz_26_39 : 14;
  1373. u64 efus_lck : 3;
  1374. u64 raz_43_52 : 10;
  1375. u64 pll_half_dis : 1;
  1376. u64 pll_mul : 3;
  1377. u64 raz_57_63 : 7;
  1378. #endif
  1379. };
  1380. };
  1381. #endif /* __NITROX_CSR_H */