cvmx-pow.h 64 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215
  1. /***********************license start***************
  2. * Author: Cavium Networks
  3. *
  4. * Contact: [email protected]
  5. * This file is part of the OCTEON SDK
  6. *
  7. * Copyright (c) 2003-2008 Cavium Networks
  8. *
  9. * This file is free software; you can redistribute it and/or modify
  10. * it under the terms of the GNU General Public License, Version 2, as
  11. * published by the Free Software Foundation.
  12. *
  13. * This file is distributed in the hope that it will be useful, but
  14. * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
  15. * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
  16. * NONINFRINGEMENT. See the GNU General Public License for more
  17. * details.
  18. *
  19. * You should have received a copy of the GNU General Public License
  20. * along with this file; if not, write to the Free Software
  21. * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
  22. * or visit http://www.gnu.org/licenses/.
  23. *
  24. * This file may also be available under a different license from Cavium.
  25. * Contact Cavium Networks for more information
  26. ***********************license end**************************************/
  27. /**
  28. * Interface to the hardware Packet Order / Work unit.
  29. *
  30. * New, starting with SDK 1.7.0, cvmx-pow supports a number of
  31. * extended consistency checks. The define
  32. * CVMX_ENABLE_POW_CHECKS controls the runtime insertion of POW
  33. * internal state checks to find common programming errors. If
  34. * CVMX_ENABLE_POW_CHECKS is not defined, checks are by default
  35. * enabled. For example, cvmx-pow will check for the following
  36. * program errors or POW state inconsistency.
  37. * - Requesting a POW operation with an active tag switch in
  38. * progress.
  39. * - Waiting for a tag switch to complete for an excessively
  40. * long period. This is normally a sign of an error in locking
  41. * causing deadlock.
  42. * - Illegal tag switches from NULL_NULL.
  43. * - Illegal tag switches from NULL.
  44. * - Illegal deschedule request.
  45. * - WQE pointer not matching the one attached to the core by
  46. * the POW.
  47. *
  48. */
  49. #ifndef __CVMX_POW_H__
  50. #define __CVMX_POW_H__
  51. #include <asm/octeon/cvmx-pow-defs.h>
  52. #include <asm/octeon/cvmx-scratch.h>
  53. #include <asm/octeon/cvmx-wqe.h>
  54. /* Default to having all POW constancy checks turned on */
  55. #ifndef CVMX_ENABLE_POW_CHECKS
  56. #define CVMX_ENABLE_POW_CHECKS 1
  57. #endif
  58. enum cvmx_pow_tag_type {
  59. /* Tag ordering is maintained */
  60. CVMX_POW_TAG_TYPE_ORDERED = 0L,
  61. /* Tag ordering is maintained, and at most one PP has the tag */
  62. CVMX_POW_TAG_TYPE_ATOMIC = 1L,
  63. /*
  64. * The work queue entry from the order - NEVER tag switch from
  65. * NULL to NULL
  66. */
  67. CVMX_POW_TAG_TYPE_NULL = 2L,
  68. /* A tag switch to NULL, and there is no space reserved in POW
  69. * - NEVER tag switch to NULL_NULL
  70. * - NEVER tag switch from NULL_NULL
  71. * - NULL_NULL is entered at the beginning of time and on a deschedule.
  72. * - NULL_NULL can be exited by a new work request. A NULL_SWITCH
  73. * load can also switch the state to NULL
  74. */
  75. CVMX_POW_TAG_TYPE_NULL_NULL = 3L
  76. };
  77. /**
  78. * Wait flag values for pow functions.
  79. */
  80. typedef enum {
  81. CVMX_POW_WAIT = 1,
  82. CVMX_POW_NO_WAIT = 0,
  83. } cvmx_pow_wait_t;
  84. /**
  85. * POW tag operations. These are used in the data stored to the POW.
  86. */
  87. typedef enum {
  88. /*
  89. * switch the tag (only) for this PP
  90. * - the previous tag should be non-NULL in this case
  91. * - tag switch response required
  92. * - fields used: op, type, tag
  93. */
  94. CVMX_POW_TAG_OP_SWTAG = 0L,
  95. /*
  96. * switch the tag for this PP, with full information
  97. * - this should be used when the previous tag is NULL
  98. * - tag switch response required
  99. * - fields used: address, op, grp, type, tag
  100. */
  101. CVMX_POW_TAG_OP_SWTAG_FULL = 1L,
  102. /*
  103. * switch the tag (and/or group) for this PP and de-schedule
  104. * - OK to keep the tag the same and only change the group
  105. * - fields used: op, no_sched, grp, type, tag
  106. */
  107. CVMX_POW_TAG_OP_SWTAG_DESCH = 2L,
  108. /*
  109. * just de-schedule
  110. * - fields used: op, no_sched
  111. */
  112. CVMX_POW_TAG_OP_DESCH = 3L,
  113. /*
  114. * create an entirely new work queue entry
  115. * - fields used: address, op, qos, grp, type, tag
  116. */
  117. CVMX_POW_TAG_OP_ADDWQ = 4L,
  118. /*
  119. * just update the work queue pointer and grp for this PP
  120. * - fields used: address, op, grp
  121. */
  122. CVMX_POW_TAG_OP_UPDATE_WQP_GRP = 5L,
  123. /*
  124. * set the no_sched bit on the de-schedule list
  125. *
  126. * - does nothing if the selected entry is not on the
  127. * de-schedule list
  128. *
  129. * - does nothing if the stored work queue pointer does not
  130. * match the address field
  131. *
  132. * - fields used: address, index, op
  133. *
  134. * Before issuing a *_NSCHED operation, SW must guarantee
  135. * that all prior deschedules and set/clr NSCHED operations
  136. * are complete and all prior switches are complete. The
  137. * hardware provides the opsdone bit and swdone bit for SW
  138. * polling. After issuing a *_NSCHED operation, SW must
  139. * guarantee that the set/clr NSCHED is complete before any
  140. * subsequent operations.
  141. */
  142. CVMX_POW_TAG_OP_SET_NSCHED = 6L,
  143. /*
  144. * clears the no_sched bit on the de-schedule list
  145. *
  146. * - does nothing if the selected entry is not on the
  147. * de-schedule list
  148. *
  149. * - does nothing if the stored work queue pointer does not
  150. * match the address field
  151. *
  152. * - fields used: address, index, op
  153. *
  154. * Before issuing a *_NSCHED operation, SW must guarantee that
  155. * all prior deschedules and set/clr NSCHED operations are
  156. * complete and all prior switches are complete. The hardware
  157. * provides the opsdone bit and swdone bit for SW
  158. * polling. After issuing a *_NSCHED operation, SW must
  159. * guarantee that the set/clr NSCHED is complete before any
  160. * subsequent operations.
  161. */
  162. CVMX_POW_TAG_OP_CLR_NSCHED = 7L,
  163. /* do nothing */
  164. CVMX_POW_TAG_OP_NOP = 15L
  165. } cvmx_pow_tag_op_t;
  166. /**
  167. * This structure defines the store data on a store to POW
  168. */
  169. typedef union {
  170. uint64_t u64;
  171. struct {
  172. #ifdef __BIG_ENDIAN_BITFIELD
  173. /*
  174. * Don't reschedule this entry. no_sched is used for
  175. * CVMX_POW_TAG_OP_SWTAG_DESCH and
  176. * CVMX_POW_TAG_OP_DESCH
  177. */
  178. uint64_t no_sched:1;
  179. uint64_t unused:2;
  180. /* Tontains index of entry for a CVMX_POW_TAG_OP_*_NSCHED */
  181. uint64_t index:13;
  182. /* The operation to perform */
  183. cvmx_pow_tag_op_t op:4;
  184. uint64_t unused2:2;
  185. /*
  186. * The QOS level for the packet. qos is only used for
  187. * CVMX_POW_TAG_OP_ADDWQ
  188. */
  189. uint64_t qos:3;
  190. /*
  191. * The group that the work queue entry will be
  192. * scheduled to grp is used for CVMX_POW_TAG_OP_ADDWQ,
  193. * CVMX_POW_TAG_OP_SWTAG_FULL,
  194. * CVMX_POW_TAG_OP_SWTAG_DESCH, and
  195. * CVMX_POW_TAG_OP_UPDATE_WQP_GRP
  196. */
  197. uint64_t grp:4;
  198. /*
  199. * The type of the tag. type is used for everything
  200. * except CVMX_POW_TAG_OP_DESCH,
  201. * CVMX_POW_TAG_OP_UPDATE_WQP_GRP, and
  202. * CVMX_POW_TAG_OP_*_NSCHED
  203. */
  204. uint64_t type:3;
  205. /*
  206. * The actual tag. tag is used for everything except
  207. * CVMX_POW_TAG_OP_DESCH,
  208. * CVMX_POW_TAG_OP_UPDATE_WQP_GRP, and
  209. * CVMX_POW_TAG_OP_*_NSCHED
  210. */
  211. uint64_t tag:32;
  212. #else
  213. uint64_t tag:32;
  214. uint64_t type:3;
  215. uint64_t grp:4;
  216. uint64_t qos:3;
  217. uint64_t unused2:2;
  218. cvmx_pow_tag_op_t op:4;
  219. uint64_t index:13;
  220. uint64_t unused:2;
  221. uint64_t no_sched:1;
  222. #endif
  223. } s;
  224. } cvmx_pow_tag_req_t;
  225. /**
  226. * This structure describes the address to load stuff from POW
  227. */
  228. typedef union {
  229. uint64_t u64;
  230. /**
  231. * Address for new work request loads (did<2:0> == 0)
  232. */
  233. struct {
  234. #ifdef __BIG_ENDIAN_BITFIELD
  235. /* Mips64 address region. Should be CVMX_IO_SEG */
  236. uint64_t mem_region:2;
  237. /* Must be zero */
  238. uint64_t reserved_49_61:13;
  239. /* Must be one */
  240. uint64_t is_io:1;
  241. /* the ID of POW -- did<2:0> == 0 in this case */
  242. uint64_t did:8;
  243. /* Must be zero */
  244. uint64_t reserved_4_39:36;
  245. /*
  246. * If set, don't return load response until work is
  247. * available.
  248. */
  249. uint64_t wait:1;
  250. /* Must be zero */
  251. uint64_t reserved_0_2:3;
  252. #else
  253. uint64_t reserved_0_2:3;
  254. uint64_t wait:1;
  255. uint64_t reserved_4_39:36;
  256. uint64_t did:8;
  257. uint64_t is_io:1;
  258. uint64_t reserved_49_61:13;
  259. uint64_t mem_region:2;
  260. #endif
  261. } swork;
  262. /**
  263. * Address for loads to get POW internal status
  264. */
  265. struct {
  266. #ifdef __BIG_ENDIAN_BITFIELD
  267. /* Mips64 address region. Should be CVMX_IO_SEG */
  268. uint64_t mem_region:2;
  269. /* Must be zero */
  270. uint64_t reserved_49_61:13;
  271. /* Must be one */
  272. uint64_t is_io:1;
  273. /* the ID of POW -- did<2:0> == 1 in this case */
  274. uint64_t did:8;
  275. /* Must be zero */
  276. uint64_t reserved_10_39:30;
  277. /* The core id to get status for */
  278. uint64_t coreid:4;
  279. /*
  280. * If set and get_cur is set, return reverse tag-list
  281. * pointer rather than forward tag-list pointer.
  282. */
  283. uint64_t get_rev:1;
  284. /*
  285. * If set, return current status rather than pending
  286. * status.
  287. */
  288. uint64_t get_cur:1;
  289. /*
  290. * If set, get the work-queue pointer rather than
  291. * tag/type.
  292. */
  293. uint64_t get_wqp:1;
  294. /* Must be zero */
  295. uint64_t reserved_0_2:3;
  296. #else
  297. uint64_t reserved_0_2:3;
  298. uint64_t get_wqp:1;
  299. uint64_t get_cur:1;
  300. uint64_t get_rev:1;
  301. uint64_t coreid:4;
  302. uint64_t reserved_10_39:30;
  303. uint64_t did:8;
  304. uint64_t is_io:1;
  305. uint64_t reserved_49_61:13;
  306. uint64_t mem_region:2;
  307. #endif
  308. } sstatus;
  309. /**
  310. * Address for memory loads to get POW internal state
  311. */
  312. struct {
  313. #ifdef __BIG_ENDIAN_BITFIELD
  314. /* Mips64 address region. Should be CVMX_IO_SEG */
  315. uint64_t mem_region:2;
  316. /* Must be zero */
  317. uint64_t reserved_49_61:13;
  318. /* Must be one */
  319. uint64_t is_io:1;
  320. /* the ID of POW -- did<2:0> == 2 in this case */
  321. uint64_t did:8;
  322. /* Must be zero */
  323. uint64_t reserved_16_39:24;
  324. /* POW memory index */
  325. uint64_t index:11;
  326. /*
  327. * If set, return deschedule information rather than
  328. * the standard response for work-queue index (invalid
  329. * if the work-queue entry is not on the deschedule
  330. * list).
  331. */
  332. uint64_t get_des:1;
  333. /*
  334. * If set, get the work-queue pointer rather than
  335. * tag/type (no effect when get_des set).
  336. */
  337. uint64_t get_wqp:1;
  338. /* Must be zero */
  339. uint64_t reserved_0_2:3;
  340. #else
  341. uint64_t reserved_0_2:3;
  342. uint64_t get_wqp:1;
  343. uint64_t get_des:1;
  344. uint64_t index:11;
  345. uint64_t reserved_16_39:24;
  346. uint64_t did:8;
  347. uint64_t is_io:1;
  348. uint64_t reserved_49_61:13;
  349. uint64_t mem_region:2;
  350. #endif
  351. } smemload;
  352. /**
  353. * Address for index/pointer loads
  354. */
  355. struct {
  356. #ifdef __BIG_ENDIAN_BITFIELD
  357. /* Mips64 address region. Should be CVMX_IO_SEG */
  358. uint64_t mem_region:2;
  359. /* Must be zero */
  360. uint64_t reserved_49_61:13;
  361. /* Must be one */
  362. uint64_t is_io:1;
  363. /* the ID of POW -- did<2:0> == 3 in this case */
  364. uint64_t did:8;
  365. /* Must be zero */
  366. uint64_t reserved_9_39:31;
  367. /*
  368. * when {get_rmt ==0 AND get_des_get_tail == 0}, this
  369. * field selects one of eight POW internal-input
  370. * queues (0-7), one per QOS level; values 8-15 are
  371. * illegal in this case; when {get_rmt ==0 AND
  372. * get_des_get_tail == 1}, this field selects one of
  373. * 16 deschedule lists (per group); when get_rmt ==1,
  374. * this field selects one of 16 memory-input queue
  375. * lists. The two memory-input queue lists associated
  376. * with each QOS level are:
  377. *
  378. * - qosgrp = 0, qosgrp = 8: QOS0
  379. * - qosgrp = 1, qosgrp = 9: QOS1
  380. * - qosgrp = 2, qosgrp = 10: QOS2
  381. * - qosgrp = 3, qosgrp = 11: QOS3
  382. * - qosgrp = 4, qosgrp = 12: QOS4
  383. * - qosgrp = 5, qosgrp = 13: QOS5
  384. * - qosgrp = 6, qosgrp = 14: QOS6
  385. * - qosgrp = 7, qosgrp = 15: QOS7
  386. */
  387. uint64_t qosgrp:4;
  388. /*
  389. * If set and get_rmt is clear, return deschedule list
  390. * indexes rather than indexes for the specified qos
  391. * level; if set and get_rmt is set, return the tail
  392. * pointer rather than the head pointer for the
  393. * specified qos level.
  394. */
  395. uint64_t get_des_get_tail:1;
  396. /*
  397. * If set, return remote pointers rather than the
  398. * local indexes for the specified qos level.
  399. */
  400. uint64_t get_rmt:1;
  401. /* Must be zero */
  402. uint64_t reserved_0_2:3;
  403. #else
  404. uint64_t reserved_0_2:3;
  405. uint64_t get_rmt:1;
  406. uint64_t get_des_get_tail:1;
  407. uint64_t qosgrp:4;
  408. uint64_t reserved_9_39:31;
  409. uint64_t did:8;
  410. uint64_t is_io:1;
  411. uint64_t reserved_49_61:13;
  412. uint64_t mem_region:2;
  413. #endif
  414. } sindexload;
  415. /**
  416. * address for NULL_RD request (did<2:0> == 4) when this is read,
  417. * HW attempts to change the state to NULL if it is NULL_NULL (the
  418. * hardware cannot switch from NULL_NULL to NULL if a POW entry is
  419. * not available - software may need to recover by finishing
  420. * another piece of work before a POW entry can ever become
  421. * available.)
  422. */
  423. struct {
  424. #ifdef __BIG_ENDIAN_BITFIELD
  425. /* Mips64 address region. Should be CVMX_IO_SEG */
  426. uint64_t mem_region:2;
  427. /* Must be zero */
  428. uint64_t reserved_49_61:13;
  429. /* Must be one */
  430. uint64_t is_io:1;
  431. /* the ID of POW -- did<2:0> == 4 in this case */
  432. uint64_t did:8;
  433. /* Must be zero */
  434. uint64_t reserved_0_39:40;
  435. #else
  436. uint64_t reserved_0_39:40;
  437. uint64_t did:8;
  438. uint64_t is_io:1;
  439. uint64_t reserved_49_61:13;
  440. uint64_t mem_region:2;
  441. #endif
  442. } snull_rd;
  443. } cvmx_pow_load_addr_t;
  444. /**
  445. * This structure defines the response to a load/SENDSINGLE to POW
  446. * (except CSR reads)
  447. */
  448. typedef union {
  449. uint64_t u64;
  450. /**
  451. * Response to new work request loads
  452. */
  453. struct {
  454. #ifdef __BIG_ENDIAN_BITFIELD
  455. /*
  456. * Set when no new work queue entry was returned. *
  457. * If there was de-scheduled work, the HW will
  458. * definitely return it. When this bit is set, it
  459. * could mean either mean:
  460. *
  461. * - There was no work, or
  462. *
  463. * - There was no work that the HW could find. This
  464. * case can happen, regardless of the wait bit value
  465. * in the original request, when there is work in
  466. * the IQ's that is too deep down the list.
  467. */
  468. uint64_t no_work:1;
  469. /* Must be zero */
  470. uint64_t reserved_40_62:23;
  471. /* 36 in O1 -- the work queue pointer */
  472. uint64_t addr:40;
  473. #else
  474. uint64_t addr:40;
  475. uint64_t reserved_40_62:23;
  476. uint64_t no_work:1;
  477. #endif
  478. } s_work;
  479. /**
  480. * Result for a POW Status Load (when get_cur==0 and get_wqp==0)
  481. */
  482. struct {
  483. #ifdef __BIG_ENDIAN_BITFIELD
  484. uint64_t reserved_62_63:2;
  485. /* Set when there is a pending non-NULL SWTAG or
  486. * SWTAG_FULL, and the POW entry has not left the list
  487. * for the original tag. */
  488. uint64_t pend_switch:1;
  489. /* Set when SWTAG_FULL and pend_switch is set. */
  490. uint64_t pend_switch_full:1;
  491. /*
  492. * Set when there is a pending NULL SWTAG, or an
  493. * implicit switch to NULL.
  494. */
  495. uint64_t pend_switch_null:1;
  496. /* Set when there is a pending DESCHED or SWTAG_DESCHED. */
  497. uint64_t pend_desched:1;
  498. /*
  499. * Set when there is a pending SWTAG_DESCHED and
  500. * pend_desched is set.
  501. */
  502. uint64_t pend_desched_switch:1;
  503. /* Set when nosched is desired and pend_desched is set. */
  504. uint64_t pend_nosched:1;
  505. /* Set when there is a pending GET_WORK. */
  506. uint64_t pend_new_work:1;
  507. /*
  508. * When pend_new_work is set, this bit indicates that
  509. * the wait bit was set.
  510. */
  511. uint64_t pend_new_work_wait:1;
  512. /* Set when there is a pending NULL_RD. */
  513. uint64_t pend_null_rd:1;
  514. /* Set when there is a pending CLR_NSCHED. */
  515. uint64_t pend_nosched_clr:1;
  516. uint64_t reserved_51:1;
  517. /* This is the index when pend_nosched_clr is set. */
  518. uint64_t pend_index:11;
  519. /*
  520. * This is the new_grp when (pend_desched AND
  521. * pend_desched_switch) is set.
  522. */
  523. uint64_t pend_grp:4;
  524. uint64_t reserved_34_35:2;
  525. /*
  526. * This is the tag type when pend_switch or
  527. * (pend_desched AND pend_desched_switch) are set.
  528. */
  529. uint64_t pend_type:2;
  530. /*
  531. * - this is the tag when pend_switch or (pend_desched
  532. * AND pend_desched_switch) are set.
  533. */
  534. uint64_t pend_tag:32;
  535. #else
  536. uint64_t pend_tag:32;
  537. uint64_t pend_type:2;
  538. uint64_t reserved_34_35:2;
  539. uint64_t pend_grp:4;
  540. uint64_t pend_index:11;
  541. uint64_t reserved_51:1;
  542. uint64_t pend_nosched_clr:1;
  543. uint64_t pend_null_rd:1;
  544. uint64_t pend_new_work_wait:1;
  545. uint64_t pend_new_work:1;
  546. uint64_t pend_nosched:1;
  547. uint64_t pend_desched_switch:1;
  548. uint64_t pend_desched:1;
  549. uint64_t pend_switch_null:1;
  550. uint64_t pend_switch_full:1;
  551. uint64_t pend_switch:1;
  552. uint64_t reserved_62_63:2;
  553. #endif
  554. } s_sstatus0;
  555. /**
  556. * Result for a POW Status Load (when get_cur==0 and get_wqp==1)
  557. */
  558. struct {
  559. #ifdef __BIG_ENDIAN_BITFIELD
  560. uint64_t reserved_62_63:2;
  561. /*
  562. * Set when there is a pending non-NULL SWTAG or
  563. * SWTAG_FULL, and the POW entry has not left the list
  564. * for the original tag.
  565. */
  566. uint64_t pend_switch:1;
  567. /* Set when SWTAG_FULL and pend_switch is set. */
  568. uint64_t pend_switch_full:1;
  569. /*
  570. * Set when there is a pending NULL SWTAG, or an
  571. * implicit switch to NULL.
  572. */
  573. uint64_t pend_switch_null:1;
  574. /*
  575. * Set when there is a pending DESCHED or
  576. * SWTAG_DESCHED.
  577. */
  578. uint64_t pend_desched:1;
  579. /*
  580. * Set when there is a pending SWTAG_DESCHED and
  581. * pend_desched is set.
  582. */
  583. uint64_t pend_desched_switch:1;
  584. /* Set when nosched is desired and pend_desched is set. */
  585. uint64_t pend_nosched:1;
  586. /* Set when there is a pending GET_WORK. */
  587. uint64_t pend_new_work:1;
  588. /*
  589. * When pend_new_work is set, this bit indicates that
  590. * the wait bit was set.
  591. */
  592. uint64_t pend_new_work_wait:1;
  593. /* Set when there is a pending NULL_RD. */
  594. uint64_t pend_null_rd:1;
  595. /* Set when there is a pending CLR_NSCHED. */
  596. uint64_t pend_nosched_clr:1;
  597. uint64_t reserved_51:1;
  598. /* This is the index when pend_nosched_clr is set. */
  599. uint64_t pend_index:11;
  600. /*
  601. * This is the new_grp when (pend_desched AND
  602. * pend_desched_switch) is set.
  603. */
  604. uint64_t pend_grp:4;
  605. /* This is the wqp when pend_nosched_clr is set. */
  606. uint64_t pend_wqp:36;
  607. #else
  608. uint64_t pend_wqp:36;
  609. uint64_t pend_grp:4;
  610. uint64_t pend_index:11;
  611. uint64_t reserved_51:1;
  612. uint64_t pend_nosched_clr:1;
  613. uint64_t pend_null_rd:1;
  614. uint64_t pend_new_work_wait:1;
  615. uint64_t pend_new_work:1;
  616. uint64_t pend_nosched:1;
  617. uint64_t pend_desched_switch:1;
  618. uint64_t pend_desched:1;
  619. uint64_t pend_switch_null:1;
  620. uint64_t pend_switch_full:1;
  621. uint64_t pend_switch:1;
  622. uint64_t reserved_62_63:2;
  623. #endif
  624. } s_sstatus1;
  625. /**
  626. * Result for a POW Status Load (when get_cur==1, get_wqp==0, and
  627. * get_rev==0)
  628. */
  629. struct {
  630. #ifdef __BIG_ENDIAN_BITFIELD
  631. uint64_t reserved_62_63:2;
  632. /*
  633. * Points to the next POW entry in the tag list when
  634. * tail == 0 (and tag_type is not NULL or NULL_NULL).
  635. */
  636. uint64_t link_index:11;
  637. /* The POW entry attached to the core. */
  638. uint64_t index:11;
  639. /*
  640. * The group attached to the core (updated when new
  641. * tag list entered on SWTAG_FULL).
  642. */
  643. uint64_t grp:4;
  644. /*
  645. * Set when this POW entry is at the head of its tag
  646. * list (also set when in the NULL or NULL_NULL
  647. * state).
  648. */
  649. uint64_t head:1;
  650. /*
  651. * Set when this POW entry is at the tail of its tag
  652. * list (also set when in the NULL or NULL_NULL
  653. * state).
  654. */
  655. uint64_t tail:1;
  656. /*
  657. * The tag type attached to the core (updated when new
  658. * tag list entered on SWTAG, SWTAG_FULL, or
  659. * SWTAG_DESCHED).
  660. */
  661. uint64_t tag_type:2;
  662. /*
  663. * The tag attached to the core (updated when new tag
  664. * list entered on SWTAG, SWTAG_FULL, or
  665. * SWTAG_DESCHED).
  666. */
  667. uint64_t tag:32;
  668. #else
  669. uint64_t tag:32;
  670. uint64_t tag_type:2;
  671. uint64_t tail:1;
  672. uint64_t head:1;
  673. uint64_t grp:4;
  674. uint64_t index:11;
  675. uint64_t link_index:11;
  676. uint64_t reserved_62_63:2;
  677. #endif
  678. } s_sstatus2;
  679. /**
  680. * Result for a POW Status Load (when get_cur==1, get_wqp==0, and get_rev==1)
  681. */
  682. struct {
  683. #ifdef __BIG_ENDIAN_BITFIELD
  684. uint64_t reserved_62_63:2;
  685. /*
  686. * Points to the prior POW entry in the tag list when
  687. * head == 0 (and tag_type is not NULL or
  688. * NULL_NULL). This field is unpredictable when the
  689. * core's state is NULL or NULL_NULL.
  690. */
  691. uint64_t revlink_index:11;
  692. /* The POW entry attached to the core. */
  693. uint64_t index:11;
  694. /*
  695. * The group attached to the core (updated when new
  696. * tag list entered on SWTAG_FULL).
  697. */
  698. uint64_t grp:4;
  699. /* Set when this POW entry is at the head of its tag
  700. * list (also set when in the NULL or NULL_NULL
  701. * state).
  702. */
  703. uint64_t head:1;
  704. /*
  705. * Set when this POW entry is at the tail of its tag
  706. * list (also set when in the NULL or NULL_NULL
  707. * state).
  708. */
  709. uint64_t tail:1;
  710. /*
  711. * The tag type attached to the core (updated when new
  712. * tag list entered on SWTAG, SWTAG_FULL, or
  713. * SWTAG_DESCHED).
  714. */
  715. uint64_t tag_type:2;
  716. /*
  717. * The tag attached to the core (updated when new tag
  718. * list entered on SWTAG, SWTAG_FULL, or
  719. * SWTAG_DESCHED).
  720. */
  721. uint64_t tag:32;
  722. #else
  723. uint64_t tag:32;
  724. uint64_t tag_type:2;
  725. uint64_t tail:1;
  726. uint64_t head:1;
  727. uint64_t grp:4;
  728. uint64_t index:11;
  729. uint64_t revlink_index:11;
  730. uint64_t reserved_62_63:2;
  731. #endif
  732. } s_sstatus3;
  733. /**
  734. * Result for a POW Status Load (when get_cur==1, get_wqp==1, and
  735. * get_rev==0)
  736. */
  737. struct {
  738. #ifdef __BIG_ENDIAN_BITFIELD
  739. uint64_t reserved_62_63:2;
  740. /*
  741. * Points to the next POW entry in the tag list when
  742. * tail == 0 (and tag_type is not NULL or NULL_NULL).
  743. */
  744. uint64_t link_index:11;
  745. /* The POW entry attached to the core. */
  746. uint64_t index:11;
  747. /*
  748. * The group attached to the core (updated when new
  749. * tag list entered on SWTAG_FULL).
  750. */
  751. uint64_t grp:4;
  752. /*
  753. * The wqp attached to the core (updated when new tag
  754. * list entered on SWTAG_FULL).
  755. */
  756. uint64_t wqp:36;
  757. #else
  758. uint64_t wqp:36;
  759. uint64_t grp:4;
  760. uint64_t index:11;
  761. uint64_t link_index:11;
  762. uint64_t reserved_62_63:2;
  763. #endif
  764. } s_sstatus4;
  765. /**
  766. * Result for a POW Status Load (when get_cur==1, get_wqp==1, and
  767. * get_rev==1)
  768. */
  769. struct {
  770. #ifdef __BIG_ENDIAN_BITFIELD
  771. uint64_t reserved_62_63:2;
  772. /*
  773. * Points to the prior POW entry in the tag list when
  774. * head == 0 (and tag_type is not NULL or
  775. * NULL_NULL). This field is unpredictable when the
  776. * core's state is NULL or NULL_NULL.
  777. */
  778. uint64_t revlink_index:11;
  779. /* The POW entry attached to the core. */
  780. uint64_t index:11;
  781. /*
  782. * The group attached to the core (updated when new
  783. * tag list entered on SWTAG_FULL).
  784. */
  785. uint64_t grp:4;
  786. /*
  787. * The wqp attached to the core (updated when new tag
  788. * list entered on SWTAG_FULL).
  789. */
  790. uint64_t wqp:36;
  791. #else
  792. uint64_t wqp:36;
  793. uint64_t grp:4;
  794. uint64_t index:11;
  795. uint64_t revlink_index:11;
  796. uint64_t reserved_62_63:2;
  797. #endif
  798. } s_sstatus5;
  799. /**
  800. * Result For POW Memory Load (get_des == 0 and get_wqp == 0)
  801. */
  802. struct {
  803. #ifdef __BIG_ENDIAN_BITFIELD
  804. uint64_t reserved_51_63:13;
  805. /*
  806. * The next entry in the input, free, descheduled_head
  807. * list (unpredictable if entry is the tail of the
  808. * list).
  809. */
  810. uint64_t next_index:11;
  811. /* The group of the POW entry. */
  812. uint64_t grp:4;
  813. uint64_t reserved_35:1;
  814. /*
  815. * Set when this POW entry is at the tail of its tag
  816. * list (also set when in the NULL or NULL_NULL
  817. * state).
  818. */
  819. uint64_t tail:1;
  820. /* The tag type of the POW entry. */
  821. uint64_t tag_type:2;
  822. /* The tag of the POW entry. */
  823. uint64_t tag:32;
  824. #else
  825. uint64_t tag:32;
  826. uint64_t tag_type:2;
  827. uint64_t tail:1;
  828. uint64_t reserved_35:1;
  829. uint64_t grp:4;
  830. uint64_t next_index:11;
  831. uint64_t reserved_51_63:13;
  832. #endif
  833. } s_smemload0;
  834. /**
  835. * Result For POW Memory Load (get_des == 0 and get_wqp == 1)
  836. */
  837. struct {
  838. #ifdef __BIG_ENDIAN_BITFIELD
  839. uint64_t reserved_51_63:13;
  840. /*
  841. * The next entry in the input, free, descheduled_head
  842. * list (unpredictable if entry is the tail of the
  843. * list).
  844. */
  845. uint64_t next_index:11;
  846. /* The group of the POW entry. */
  847. uint64_t grp:4;
  848. /* The WQP held in the POW entry. */
  849. uint64_t wqp:36;
  850. #else
  851. uint64_t wqp:36;
  852. uint64_t grp:4;
  853. uint64_t next_index:11;
  854. uint64_t reserved_51_63:13;
  855. #endif
  856. } s_smemload1;
  857. /**
  858. * Result For POW Memory Load (get_des == 1)
  859. */
  860. struct {
  861. #ifdef __BIG_ENDIAN_BITFIELD
  862. uint64_t reserved_51_63:13;
  863. /*
  864. * The next entry in the tag list connected to the
  865. * descheduled head.
  866. */
  867. uint64_t fwd_index:11;
  868. /* The group of the POW entry. */
  869. uint64_t grp:4;
  870. /* The nosched bit for the POW entry. */
  871. uint64_t nosched:1;
  872. /* There is a pending tag switch */
  873. uint64_t pend_switch:1;
  874. /*
  875. * The next tag type for the new tag list when
  876. * pend_switch is set.
  877. */
  878. uint64_t pend_type:2;
  879. /*
  880. * The next tag for the new tag list when pend_switch
  881. * is set.
  882. */
  883. uint64_t pend_tag:32;
  884. #else
  885. uint64_t pend_tag:32;
  886. uint64_t pend_type:2;
  887. uint64_t pend_switch:1;
  888. uint64_t nosched:1;
  889. uint64_t grp:4;
  890. uint64_t fwd_index:11;
  891. uint64_t reserved_51_63:13;
  892. #endif
  893. } s_smemload2;
  894. /**
  895. * Result For POW Index/Pointer Load (get_rmt == 0/get_des_get_tail == 0)
  896. */
  897. struct {
  898. #ifdef __BIG_ENDIAN_BITFIELD
  899. uint64_t reserved_52_63:12;
  900. /*
  901. * set when there is one or more POW entries on the
  902. * free list.
  903. */
  904. uint64_t free_val:1;
  905. /*
  906. * set when there is exactly one POW entry on the free
  907. * list.
  908. */
  909. uint64_t free_one:1;
  910. uint64_t reserved_49:1;
  911. /*
  912. * when free_val is set, indicates the first entry on
  913. * the free list.
  914. */
  915. uint64_t free_head:11;
  916. uint64_t reserved_37:1;
  917. /*
  918. * when free_val is set, indicates the last entry on
  919. * the free list.
  920. */
  921. uint64_t free_tail:11;
  922. /*
  923. * set when there is one or more POW entries on the
  924. * input Q list selected by qosgrp.
  925. */
  926. uint64_t loc_val:1;
  927. /*
  928. * set when there is exactly one POW entry on the
  929. * input Q list selected by qosgrp.
  930. */
  931. uint64_t loc_one:1;
  932. uint64_t reserved_23:1;
  933. /*
  934. * when loc_val is set, indicates the first entry on
  935. * the input Q list selected by qosgrp.
  936. */
  937. uint64_t loc_head:11;
  938. uint64_t reserved_11:1;
  939. /*
  940. * when loc_val is set, indicates the last entry on
  941. * the input Q list selected by qosgrp.
  942. */
  943. uint64_t loc_tail:11;
  944. #else
  945. uint64_t loc_tail:11;
  946. uint64_t reserved_11:1;
  947. uint64_t loc_head:11;
  948. uint64_t reserved_23:1;
  949. uint64_t loc_one:1;
  950. uint64_t loc_val:1;
  951. uint64_t free_tail:11;
  952. uint64_t reserved_37:1;
  953. uint64_t free_head:11;
  954. uint64_t reserved_49:1;
  955. uint64_t free_one:1;
  956. uint64_t free_val:1;
  957. uint64_t reserved_52_63:12;
  958. #endif
  959. } sindexload0;
  960. /**
  961. * Result For POW Index/Pointer Load (get_rmt == 0/get_des_get_tail == 1)
  962. */
  963. struct {
  964. #ifdef __BIG_ENDIAN_BITFIELD
  965. uint64_t reserved_52_63:12;
  966. /*
  967. * set when there is one or more POW entries on the
  968. * nosched list.
  969. */
  970. uint64_t nosched_val:1;
  971. /*
  972. * set when there is exactly one POW entry on the
  973. * nosched list.
  974. */
  975. uint64_t nosched_one:1;
  976. uint64_t reserved_49:1;
  977. /*
  978. * when nosched_val is set, indicates the first entry
  979. * on the nosched list.
  980. */
  981. uint64_t nosched_head:11;
  982. uint64_t reserved_37:1;
  983. /*
  984. * when nosched_val is set, indicates the last entry
  985. * on the nosched list.
  986. */
  987. uint64_t nosched_tail:11;
  988. /*
  989. * set when there is one or more descheduled heads on
  990. * the descheduled list selected by qosgrp.
  991. */
  992. uint64_t des_val:1;
  993. /*
  994. * set when there is exactly one descheduled head on
  995. * the descheduled list selected by qosgrp.
  996. */
  997. uint64_t des_one:1;
  998. uint64_t reserved_23:1;
  999. /*
  1000. * when des_val is set, indicates the first
  1001. * descheduled head on the descheduled list selected
  1002. * by qosgrp.
  1003. */
  1004. uint64_t des_head:11;
  1005. uint64_t reserved_11:1;
  1006. /*
  1007. * when des_val is set, indicates the last descheduled
  1008. * head on the descheduled list selected by qosgrp.
  1009. */
  1010. uint64_t des_tail:11;
  1011. #else
  1012. uint64_t des_tail:11;
  1013. uint64_t reserved_11:1;
  1014. uint64_t des_head:11;
  1015. uint64_t reserved_23:1;
  1016. uint64_t des_one:1;
  1017. uint64_t des_val:1;
  1018. uint64_t nosched_tail:11;
  1019. uint64_t reserved_37:1;
  1020. uint64_t nosched_head:11;
  1021. uint64_t reserved_49:1;
  1022. uint64_t nosched_one:1;
  1023. uint64_t nosched_val:1;
  1024. uint64_t reserved_52_63:12;
  1025. #endif
  1026. } sindexload1;
  1027. /**
  1028. * Result For POW Index/Pointer Load (get_rmt == 1/get_des_get_tail == 0)
  1029. */
  1030. struct {
  1031. #ifdef __BIG_ENDIAN_BITFIELD
  1032. uint64_t reserved_39_63:25;
  1033. /*
  1034. * Set when this DRAM list is the current head
  1035. * (i.e. is the next to be reloaded when the POW
  1036. * hardware reloads a POW entry from DRAM). The POW
  1037. * hardware alternates between the two DRAM lists
  1038. * associated with a QOS level when it reloads work
  1039. * from DRAM into the POW unit.
  1040. */
  1041. uint64_t rmt_is_head:1;
  1042. /*
  1043. * Set when the DRAM portion of the input Q list
  1044. * selected by qosgrp contains one or more pieces of
  1045. * work.
  1046. */
  1047. uint64_t rmt_val:1;
  1048. /*
  1049. * Set when the DRAM portion of the input Q list
  1050. * selected by qosgrp contains exactly one piece of
  1051. * work.
  1052. */
  1053. uint64_t rmt_one:1;
  1054. /*
  1055. * When rmt_val is set, indicates the first piece of
  1056. * work on the DRAM input Q list selected by
  1057. * qosgrp.
  1058. */
  1059. uint64_t rmt_head:36;
  1060. #else
  1061. uint64_t rmt_head:36;
  1062. uint64_t rmt_one:1;
  1063. uint64_t rmt_val:1;
  1064. uint64_t rmt_is_head:1;
  1065. uint64_t reserved_39_63:25;
  1066. #endif
  1067. } sindexload2;
  1068. /**
  1069. * Result For POW Index/Pointer Load (get_rmt ==
  1070. * 1/get_des_get_tail == 1)
  1071. */
  1072. struct {
  1073. #ifdef __BIG_ENDIAN_BITFIELD
  1074. uint64_t reserved_39_63:25;
  1075. /*
  1076. * set when this DRAM list is the current head
  1077. * (i.e. is the next to be reloaded when the POW
  1078. * hardware reloads a POW entry from DRAM). The POW
  1079. * hardware alternates between the two DRAM lists
  1080. * associated with a QOS level when it reloads work
  1081. * from DRAM into the POW unit.
  1082. */
  1083. uint64_t rmt_is_head:1;
  1084. /*
  1085. * set when the DRAM portion of the input Q list
  1086. * selected by qosgrp contains one or more pieces of
  1087. * work.
  1088. */
  1089. uint64_t rmt_val:1;
  1090. /*
  1091. * set when the DRAM portion of the input Q list
  1092. * selected by qosgrp contains exactly one piece of
  1093. * work.
  1094. */
  1095. uint64_t rmt_one:1;
  1096. /*
  1097. * when rmt_val is set, indicates the last piece of
  1098. * work on the DRAM input Q list selected by
  1099. * qosgrp.
  1100. */
  1101. uint64_t rmt_tail:36;
  1102. #else
  1103. uint64_t rmt_tail:36;
  1104. uint64_t rmt_one:1;
  1105. uint64_t rmt_val:1;
  1106. uint64_t rmt_is_head:1;
  1107. uint64_t reserved_39_63:25;
  1108. #endif
  1109. } sindexload3;
  1110. /**
  1111. * Response to NULL_RD request loads
  1112. */
  1113. struct {
  1114. #ifdef __BIG_ENDIAN_BITFIELD
  1115. uint64_t unused:62;
  1116. /* of type cvmx_pow_tag_type_t. state is one of the
  1117. * following:
  1118. *
  1119. * - CVMX_POW_TAG_TYPE_ORDERED
  1120. * - CVMX_POW_TAG_TYPE_ATOMIC
  1121. * - CVMX_POW_TAG_TYPE_NULL
  1122. * - CVMX_POW_TAG_TYPE_NULL_NULL
  1123. */
  1124. uint64_t state:2;
  1125. #else
  1126. uint64_t state:2;
  1127. uint64_t unused:62;
  1128. #endif
  1129. } s_null_rd;
  1130. } cvmx_pow_tag_load_resp_t;
  1131. /**
  1132. * This structure describes the address used for stores to the POW.
  1133. * The store address is meaningful on stores to the POW. The
  1134. * hardware assumes that an aligned 64-bit store was used for all
  1135. * these stores. Note the assumption that the work queue entry is
  1136. * aligned on an 8-byte boundary (since the low-order 3 address bits
  1137. * must be zero). Note that not all fields are used by all
  1138. * operations.
  1139. *
  1140. * NOTE: The following is the behavior of the pending switch bit at the PP
  1141. * for POW stores (i.e. when did<7:3> == 0xc)
  1142. * - did<2:0> == 0 => pending switch bit is set
  1143. * - did<2:0> == 1 => no affect on the pending switch bit
  1144. * - did<2:0> == 3 => pending switch bit is cleared
  1145. * - did<2:0> == 7 => no affect on the pending switch bit
  1146. * - did<2:0> == others => must not be used
  1147. * - No other loads/stores have an affect on the pending switch bit
  1148. * - The switch bus from POW can clear the pending switch bit
  1149. *
  1150. * NOTE: did<2:0> == 2 is used by the HW for a special single-cycle
  1151. * ADDWQ command that only contains the pointer). SW must never use
  1152. * did<2:0> == 2.
  1153. */
  1154. typedef union {
  1155. /**
  1156. * Unsigned 64 bit integer representation of store address
  1157. */
  1158. uint64_t u64;
  1159. struct {
  1160. #ifdef __BIG_ENDIAN_BITFIELD
  1161. /* Memory region. Should be CVMX_IO_SEG in most cases */
  1162. uint64_t mem_reg:2;
  1163. uint64_t reserved_49_61:13; /* Must be zero */
  1164. uint64_t is_io:1; /* Must be one */
  1165. /* Device ID of POW. Note that different sub-dids are used. */
  1166. uint64_t did:8;
  1167. uint64_t reserved_36_39:4; /* Must be zero */
  1168. /* Address field. addr<2:0> must be zero */
  1169. uint64_t addr:36;
  1170. #else
  1171. uint64_t addr:36;
  1172. uint64_t reserved_36_39:4;
  1173. uint64_t did:8;
  1174. uint64_t is_io:1;
  1175. uint64_t reserved_49_61:13;
  1176. uint64_t mem_reg:2;
  1177. #endif
  1178. } stag;
  1179. } cvmx_pow_tag_store_addr_t;
  1180. /**
  1181. * decode of the store data when an IOBDMA SENDSINGLE is sent to POW
  1182. */
  1183. typedef union {
  1184. uint64_t u64;
  1185. struct {
  1186. #ifdef __BIG_ENDIAN_BITFIELD
  1187. /*
  1188. * the (64-bit word) location in scratchpad to write
  1189. * to (if len != 0)
  1190. */
  1191. uint64_t scraddr:8;
  1192. /* the number of words in the response (0 => no response) */
  1193. uint64_t len:8;
  1194. /* the ID of the device on the non-coherent bus */
  1195. uint64_t did:8;
  1196. uint64_t unused:36;
  1197. /* if set, don't return load response until work is available */
  1198. uint64_t wait:1;
  1199. uint64_t unused2:3;
  1200. #else
  1201. uint64_t unused2:3;
  1202. uint64_t wait:1;
  1203. uint64_t unused:36;
  1204. uint64_t did:8;
  1205. uint64_t len:8;
  1206. uint64_t scraddr:8;
  1207. #endif
  1208. } s;
  1209. } cvmx_pow_iobdma_store_t;
  1210. /* CSR typedefs have been moved to cvmx-csr-*.h */
  1211. /**
  1212. * Get the POW tag for this core. This returns the current
  1213. * tag type, tag, group, and POW entry index associated with
  1214. * this core. Index is only valid if the tag type isn't NULL_NULL.
  1215. * If a tag switch is pending this routine returns the tag before
  1216. * the tag switch, not after.
  1217. *
  1218. * Returns Current tag
  1219. */
  1220. static inline cvmx_pow_tag_req_t cvmx_pow_get_current_tag(void)
  1221. {
  1222. cvmx_pow_load_addr_t load_addr;
  1223. cvmx_pow_tag_load_resp_t load_resp;
  1224. cvmx_pow_tag_req_t result;
  1225. load_addr.u64 = 0;
  1226. load_addr.sstatus.mem_region = CVMX_IO_SEG;
  1227. load_addr.sstatus.is_io = 1;
  1228. load_addr.sstatus.did = CVMX_OCT_DID_TAG_TAG1;
  1229. load_addr.sstatus.coreid = cvmx_get_core_num();
  1230. load_addr.sstatus.get_cur = 1;
  1231. load_resp.u64 = cvmx_read_csr(load_addr.u64);
  1232. result.u64 = 0;
  1233. result.s.grp = load_resp.s_sstatus2.grp;
  1234. result.s.index = load_resp.s_sstatus2.index;
  1235. result.s.type = load_resp.s_sstatus2.tag_type;
  1236. result.s.tag = load_resp.s_sstatus2.tag;
  1237. return result;
  1238. }
  1239. /**
  1240. * Get the POW WQE for this core. This returns the work queue
  1241. * entry currently associated with this core.
  1242. *
  1243. * Returns WQE pointer
  1244. */
  1245. static inline struct cvmx_wqe *cvmx_pow_get_current_wqp(void)
  1246. {
  1247. cvmx_pow_load_addr_t load_addr;
  1248. cvmx_pow_tag_load_resp_t load_resp;
  1249. load_addr.u64 = 0;
  1250. load_addr.sstatus.mem_region = CVMX_IO_SEG;
  1251. load_addr.sstatus.is_io = 1;
  1252. load_addr.sstatus.did = CVMX_OCT_DID_TAG_TAG1;
  1253. load_addr.sstatus.coreid = cvmx_get_core_num();
  1254. load_addr.sstatus.get_cur = 1;
  1255. load_addr.sstatus.get_wqp = 1;
  1256. load_resp.u64 = cvmx_read_csr(load_addr.u64);
  1257. return (struct cvmx_wqe *) cvmx_phys_to_ptr(load_resp.s_sstatus4.wqp);
  1258. }
  1259. #ifndef CVMX_MF_CHORD
  1260. #define CVMX_MF_CHORD(dest) CVMX_RDHWR(dest, 30)
  1261. #endif
  1262. /**
  1263. * Print a warning if a tag switch is pending for this core
  1264. *
  1265. * @function: Function name checking for a pending tag switch
  1266. */
  1267. static inline void __cvmx_pow_warn_if_pending_switch(const char *function)
  1268. {
  1269. uint64_t switch_complete;
  1270. CVMX_MF_CHORD(switch_complete);
  1271. if (!switch_complete)
  1272. pr_warn("%s called with tag switch in progress\n", function);
  1273. }
  1274. /**
  1275. * Waits for a tag switch to complete by polling the completion bit.
  1276. * Note that switches to NULL complete immediately and do not need
  1277. * to be waited for.
  1278. */
  1279. static inline void cvmx_pow_tag_sw_wait(void)
  1280. {
  1281. const uint64_t MAX_CYCLES = 1ull << 31;
  1282. uint64_t switch_complete;
  1283. uint64_t start_cycle = cvmx_get_cycle();
  1284. while (1) {
  1285. CVMX_MF_CHORD(switch_complete);
  1286. if (unlikely(switch_complete))
  1287. break;
  1288. if (unlikely(cvmx_get_cycle() > start_cycle + MAX_CYCLES)) {
  1289. pr_warn("Tag switch is taking a long time, possible deadlock\n");
  1290. start_cycle = -MAX_CYCLES - 1;
  1291. }
  1292. }
  1293. }
  1294. /**
  1295. * Synchronous work request. Requests work from the POW.
  1296. * This function does NOT wait for previous tag switches to complete,
  1297. * so the caller must ensure that there is not a pending tag switch.
  1298. *
  1299. * @wait: When set, call stalls until work becomes avaiable, or times out.
  1300. * If not set, returns immediately.
  1301. *
  1302. * Returns: the WQE pointer from POW. Returns NULL if no work
  1303. * was available.
  1304. */
  1305. static inline struct cvmx_wqe *cvmx_pow_work_request_sync_nocheck(cvmx_pow_wait_t
  1306. wait)
  1307. {
  1308. cvmx_pow_load_addr_t ptr;
  1309. cvmx_pow_tag_load_resp_t result;
  1310. if (CVMX_ENABLE_POW_CHECKS)
  1311. __cvmx_pow_warn_if_pending_switch(__func__);
  1312. ptr.u64 = 0;
  1313. ptr.swork.mem_region = CVMX_IO_SEG;
  1314. ptr.swork.is_io = 1;
  1315. ptr.swork.did = CVMX_OCT_DID_TAG_SWTAG;
  1316. ptr.swork.wait = wait;
  1317. result.u64 = cvmx_read_csr(ptr.u64);
  1318. if (result.s_work.no_work)
  1319. return NULL;
  1320. else
  1321. return (struct cvmx_wqe *) cvmx_phys_to_ptr(result.s_work.addr);
  1322. }
  1323. /**
  1324. * Synchronous work request. Requests work from the POW.
  1325. * This function waits for any previous tag switch to complete before
  1326. * requesting the new work.
  1327. *
  1328. * @wait: When set, call stalls until work becomes avaiable, or times out.
  1329. * If not set, returns immediately.
  1330. *
  1331. * Returns: the WQE pointer from POW. Returns NULL if no work
  1332. * was available.
  1333. */
  1334. static inline struct cvmx_wqe *cvmx_pow_work_request_sync(cvmx_pow_wait_t wait)
  1335. {
  1336. if (CVMX_ENABLE_POW_CHECKS)
  1337. __cvmx_pow_warn_if_pending_switch(__func__);
  1338. /* Must not have a switch pending when requesting work */
  1339. cvmx_pow_tag_sw_wait();
  1340. return cvmx_pow_work_request_sync_nocheck(wait);
  1341. }
  1342. /**
  1343. * Synchronous null_rd request. Requests a switch out of NULL_NULL POW state.
  1344. * This function waits for any previous tag switch to complete before
  1345. * requesting the null_rd.
  1346. *
  1347. * Returns: the POW state of type cvmx_pow_tag_type_t.
  1348. */
  1349. static inline enum cvmx_pow_tag_type cvmx_pow_work_request_null_rd(void)
  1350. {
  1351. cvmx_pow_load_addr_t ptr;
  1352. cvmx_pow_tag_load_resp_t result;
  1353. if (CVMX_ENABLE_POW_CHECKS)
  1354. __cvmx_pow_warn_if_pending_switch(__func__);
  1355. /* Must not have a switch pending when requesting work */
  1356. cvmx_pow_tag_sw_wait();
  1357. ptr.u64 = 0;
  1358. ptr.snull_rd.mem_region = CVMX_IO_SEG;
  1359. ptr.snull_rd.is_io = 1;
  1360. ptr.snull_rd.did = CVMX_OCT_DID_TAG_NULL_RD;
  1361. result.u64 = cvmx_read_csr(ptr.u64);
  1362. return (enum cvmx_pow_tag_type) result.s_null_rd.state;
  1363. }
  1364. /**
  1365. * Asynchronous work request. Work is requested from the POW unit,
  1366. * and should later be checked with function
  1367. * cvmx_pow_work_response_async. This function does NOT wait for
  1368. * previous tag switches to complete, so the caller must ensure that
  1369. * there is not a pending tag switch.
  1370. *
  1371. * @scr_addr: Scratch memory address that response will be returned
  1372. * to, which is either a valid WQE, or a response with the
  1373. * invalid bit set. Byte address, must be 8 byte aligned.
  1374. *
  1375. * @wait: 1 to cause response to wait for work to become available (or
  1376. * timeout), 0 to cause response to return immediately
  1377. */
  1378. static inline void cvmx_pow_work_request_async_nocheck(int scr_addr,
  1379. cvmx_pow_wait_t wait)
  1380. {
  1381. cvmx_pow_iobdma_store_t data;
  1382. if (CVMX_ENABLE_POW_CHECKS)
  1383. __cvmx_pow_warn_if_pending_switch(__func__);
  1384. /* scr_addr must be 8 byte aligned */
  1385. data.s.scraddr = scr_addr >> 3;
  1386. data.s.len = 1;
  1387. data.s.did = CVMX_OCT_DID_TAG_SWTAG;
  1388. data.s.wait = wait;
  1389. cvmx_send_single(data.u64);
  1390. }
  1391. /**
  1392. * Asynchronous work request. Work is requested from the POW unit,
  1393. * and should later be checked with function
  1394. * cvmx_pow_work_response_async. This function waits for any previous
  1395. * tag switch to complete before requesting the new work.
  1396. *
  1397. * @scr_addr: Scratch memory address that response will be returned
  1398. * to, which is either a valid WQE, or a response with the
  1399. * invalid bit set. Byte address, must be 8 byte aligned.
  1400. *
  1401. * @wait: 1 to cause response to wait for work to become available (or
  1402. * timeout), 0 to cause response to return immediately
  1403. */
  1404. static inline void cvmx_pow_work_request_async(int scr_addr,
  1405. cvmx_pow_wait_t wait)
  1406. {
  1407. if (CVMX_ENABLE_POW_CHECKS)
  1408. __cvmx_pow_warn_if_pending_switch(__func__);
  1409. /* Must not have a switch pending when requesting work */
  1410. cvmx_pow_tag_sw_wait();
  1411. cvmx_pow_work_request_async_nocheck(scr_addr, wait);
  1412. }
  1413. /**
  1414. * Gets result of asynchronous work request. Performs a IOBDMA sync
  1415. * to wait for the response.
  1416. *
  1417. * @scr_addr: Scratch memory address to get result from Byte address,
  1418. * must be 8 byte aligned.
  1419. *
  1420. * Returns: the WQE from the scratch register, or NULL if no
  1421. * work was available.
  1422. */
  1423. static inline struct cvmx_wqe *cvmx_pow_work_response_async(int scr_addr)
  1424. {
  1425. cvmx_pow_tag_load_resp_t result;
  1426. CVMX_SYNCIOBDMA;
  1427. result.u64 = cvmx_scratch_read64(scr_addr);
  1428. if (result.s_work.no_work)
  1429. return NULL;
  1430. else
  1431. return (struct cvmx_wqe *) cvmx_phys_to_ptr(result.s_work.addr);
  1432. }
  1433. /**
  1434. * Checks if a work queue entry pointer returned by a work
  1435. * request is valid. It may be invalid due to no work
  1436. * being available or due to a timeout.
  1437. *
  1438. * @wqe_ptr: pointer to a work queue entry returned by the POW
  1439. *
  1440. * Returns 0 if pointer is valid
  1441. * 1 if invalid (no work was returned)
  1442. */
  1443. static inline uint64_t cvmx_pow_work_invalid(struct cvmx_wqe *wqe_ptr)
  1444. {
  1445. return wqe_ptr == NULL;
  1446. }
  1447. /**
  1448. * Starts a tag switch to the provided tag value and tag type.
  1449. * Completion for the tag switch must be checked for separately. This
  1450. * function does NOT update the work queue entry in dram to match tag
  1451. * value and type, so the application must keep track of these if they
  1452. * are important to the application. This tag switch command must not
  1453. * be used for switches to NULL, as the tag switch pending bit will be
  1454. * set by the switch request, but never cleared by the hardware.
  1455. *
  1456. * NOTE: This should not be used when switching from a NULL tag. Use
  1457. * cvmx_pow_tag_sw_full() instead.
  1458. *
  1459. * This function does no checks, so the caller must ensure that any
  1460. * previous tag switch has completed.
  1461. *
  1462. * @tag: new tag value
  1463. * @tag_type: new tag type (ordered or atomic)
  1464. */
  1465. static inline void cvmx_pow_tag_sw_nocheck(uint32_t tag,
  1466. enum cvmx_pow_tag_type tag_type)
  1467. {
  1468. cvmx_addr_t ptr;
  1469. cvmx_pow_tag_req_t tag_req;
  1470. if (CVMX_ENABLE_POW_CHECKS) {
  1471. cvmx_pow_tag_req_t current_tag;
  1472. __cvmx_pow_warn_if_pending_switch(__func__);
  1473. current_tag = cvmx_pow_get_current_tag();
  1474. if (current_tag.s.type == CVMX_POW_TAG_TYPE_NULL_NULL)
  1475. pr_warn("%s called with NULL_NULL tag\n", __func__);
  1476. if (current_tag.s.type == CVMX_POW_TAG_TYPE_NULL)
  1477. pr_warn("%s called with NULL tag\n", __func__);
  1478. if ((current_tag.s.type == tag_type)
  1479. && (current_tag.s.tag == tag))
  1480. pr_warn("%s called to perform a tag switch to the same tag\n",
  1481. __func__);
  1482. if (tag_type == CVMX_POW_TAG_TYPE_NULL)
  1483. pr_warn("%s called to perform a tag switch to NULL. Use cvmx_pow_tag_sw_null() instead\n",
  1484. __func__);
  1485. }
  1486. /*
  1487. * Note that WQE in DRAM is not updated here, as the POW does
  1488. * not read from DRAM once the WQE is in flight. See hardware
  1489. * manual for complete details. It is the application's
  1490. * responsibility to keep track of the current tag value if
  1491. * that is important.
  1492. */
  1493. tag_req.u64 = 0;
  1494. tag_req.s.op = CVMX_POW_TAG_OP_SWTAG;
  1495. tag_req.s.tag = tag;
  1496. tag_req.s.type = tag_type;
  1497. ptr.u64 = 0;
  1498. ptr.sio.mem_region = CVMX_IO_SEG;
  1499. ptr.sio.is_io = 1;
  1500. ptr.sio.did = CVMX_OCT_DID_TAG_SWTAG;
  1501. /* once this store arrives at POW, it will attempt the switch
  1502. software must wait for the switch to complete separately */
  1503. cvmx_write_io(ptr.u64, tag_req.u64);
  1504. }
  1505. /**
  1506. * Starts a tag switch to the provided tag value and tag type.
  1507. * Completion for the tag switch must be checked for separately. This
  1508. * function does NOT update the work queue entry in dram to match tag
  1509. * value and type, so the application must keep track of these if they
  1510. * are important to the application. This tag switch command must not
  1511. * be used for switches to NULL, as the tag switch pending bit will be
  1512. * set by the switch request, but never cleared by the hardware.
  1513. *
  1514. * NOTE: This should not be used when switching from a NULL tag. Use
  1515. * cvmx_pow_tag_sw_full() instead.
  1516. *
  1517. * This function waits for any previous tag switch to complete, and also
  1518. * displays an error on tag switches to NULL.
  1519. *
  1520. * @tag: new tag value
  1521. * @tag_type: new tag type (ordered or atomic)
  1522. */
  1523. static inline void cvmx_pow_tag_sw(uint32_t tag,
  1524. enum cvmx_pow_tag_type tag_type)
  1525. {
  1526. if (CVMX_ENABLE_POW_CHECKS)
  1527. __cvmx_pow_warn_if_pending_switch(__func__);
  1528. /*
  1529. * Note that WQE in DRAM is not updated here, as the POW does
  1530. * not read from DRAM once the WQE is in flight. See hardware
  1531. * manual for complete details. It is the application's
  1532. * responsibility to keep track of the current tag value if
  1533. * that is important.
  1534. */
  1535. /*
  1536. * Ensure that there is not a pending tag switch, as a tag
  1537. * switch cannot be started if a previous switch is still
  1538. * pending.
  1539. */
  1540. cvmx_pow_tag_sw_wait();
  1541. cvmx_pow_tag_sw_nocheck(tag, tag_type);
  1542. }
  1543. /**
  1544. * Starts a tag switch to the provided tag value and tag type.
  1545. * Completion for the tag switch must be checked for separately. This
  1546. * function does NOT update the work queue entry in dram to match tag
  1547. * value and type, so the application must keep track of these if they
  1548. * are important to the application. This tag switch command must not
  1549. * be used for switches to NULL, as the tag switch pending bit will be
  1550. * set by the switch request, but never cleared by the hardware.
  1551. *
  1552. * This function must be used for tag switches from NULL.
  1553. *
  1554. * This function does no checks, so the caller must ensure that any
  1555. * previous tag switch has completed.
  1556. *
  1557. * @wqp: pointer to work queue entry to submit. This entry is
  1558. * updated to match the other parameters
  1559. * @tag: tag value to be assigned to work queue entry
  1560. * @tag_type: type of tag
  1561. * @group: group value for the work queue entry.
  1562. */
  1563. static inline void cvmx_pow_tag_sw_full_nocheck(struct cvmx_wqe *wqp, uint32_t tag,
  1564. enum cvmx_pow_tag_type tag_type,
  1565. uint64_t group)
  1566. {
  1567. cvmx_addr_t ptr;
  1568. cvmx_pow_tag_req_t tag_req;
  1569. if (CVMX_ENABLE_POW_CHECKS) {
  1570. cvmx_pow_tag_req_t current_tag;
  1571. __cvmx_pow_warn_if_pending_switch(__func__);
  1572. current_tag = cvmx_pow_get_current_tag();
  1573. if (current_tag.s.type == CVMX_POW_TAG_TYPE_NULL_NULL)
  1574. pr_warn("%s called with NULL_NULL tag\n", __func__);
  1575. if ((current_tag.s.type == tag_type)
  1576. && (current_tag.s.tag == tag))
  1577. pr_warn("%s called to perform a tag switch to the same tag\n",
  1578. __func__);
  1579. if (tag_type == CVMX_POW_TAG_TYPE_NULL)
  1580. pr_warn("%s called to perform a tag switch to NULL. Use cvmx_pow_tag_sw_null() instead\n",
  1581. __func__);
  1582. if (wqp != cvmx_phys_to_ptr(0x80))
  1583. if (wqp != cvmx_pow_get_current_wqp())
  1584. pr_warn("%s passed WQE(%p) doesn't match the address in the POW(%p)\n",
  1585. __func__, wqp,
  1586. cvmx_pow_get_current_wqp());
  1587. }
  1588. /*
  1589. * Note that WQE in DRAM is not updated here, as the POW does
  1590. * not read from DRAM once the WQE is in flight. See hardware
  1591. * manual for complete details. It is the application's
  1592. * responsibility to keep track of the current tag value if
  1593. * that is important.
  1594. */
  1595. tag_req.u64 = 0;
  1596. tag_req.s.op = CVMX_POW_TAG_OP_SWTAG_FULL;
  1597. tag_req.s.tag = tag;
  1598. tag_req.s.type = tag_type;
  1599. tag_req.s.grp = group;
  1600. ptr.u64 = 0;
  1601. ptr.sio.mem_region = CVMX_IO_SEG;
  1602. ptr.sio.is_io = 1;
  1603. ptr.sio.did = CVMX_OCT_DID_TAG_SWTAG;
  1604. ptr.sio.offset = CAST64(wqp);
  1605. /*
  1606. * once this store arrives at POW, it will attempt the switch
  1607. * software must wait for the switch to complete separately.
  1608. */
  1609. cvmx_write_io(ptr.u64, tag_req.u64);
  1610. }
  1611. /**
  1612. * Starts a tag switch to the provided tag value and tag type.
  1613. * Completion for the tag switch must be checked for separately. This
  1614. * function does NOT update the work queue entry in dram to match tag
  1615. * value and type, so the application must keep track of these if they
  1616. * are important to the application. This tag switch command must not
  1617. * be used for switches to NULL, as the tag switch pending bit will be
  1618. * set by the switch request, but never cleared by the hardware.
  1619. *
  1620. * This function must be used for tag switches from NULL.
  1621. *
  1622. * This function waits for any pending tag switches to complete
  1623. * before requesting the tag switch.
  1624. *
  1625. * @wqp: pointer to work queue entry to submit. This entry is updated
  1626. * to match the other parameters
  1627. * @tag: tag value to be assigned to work queue entry
  1628. * @tag_type: type of tag
  1629. * @group: group value for the work queue entry.
  1630. */
  1631. static inline void cvmx_pow_tag_sw_full(struct cvmx_wqe *wqp, uint32_t tag,
  1632. enum cvmx_pow_tag_type tag_type,
  1633. uint64_t group)
  1634. {
  1635. if (CVMX_ENABLE_POW_CHECKS)
  1636. __cvmx_pow_warn_if_pending_switch(__func__);
  1637. /*
  1638. * Ensure that there is not a pending tag switch, as a tag
  1639. * switch cannot be started if a previous switch is still
  1640. * pending.
  1641. */
  1642. cvmx_pow_tag_sw_wait();
  1643. cvmx_pow_tag_sw_full_nocheck(wqp, tag, tag_type, group);
  1644. }
  1645. /**
  1646. * Switch to a NULL tag, which ends any ordering or
  1647. * synchronization provided by the POW for the current
  1648. * work queue entry. This operation completes immediately,
  1649. * so completion should not be waited for.
  1650. * This function does NOT wait for previous tag switches to complete,
  1651. * so the caller must ensure that any previous tag switches have completed.
  1652. */
  1653. static inline void cvmx_pow_tag_sw_null_nocheck(void)
  1654. {
  1655. cvmx_addr_t ptr;
  1656. cvmx_pow_tag_req_t tag_req;
  1657. if (CVMX_ENABLE_POW_CHECKS) {
  1658. cvmx_pow_tag_req_t current_tag;
  1659. __cvmx_pow_warn_if_pending_switch(__func__);
  1660. current_tag = cvmx_pow_get_current_tag();
  1661. if (current_tag.s.type == CVMX_POW_TAG_TYPE_NULL_NULL)
  1662. pr_warn("%s called with NULL_NULL tag\n", __func__);
  1663. if (current_tag.s.type == CVMX_POW_TAG_TYPE_NULL)
  1664. pr_warn("%s called when we already have a NULL tag\n",
  1665. __func__);
  1666. }
  1667. tag_req.u64 = 0;
  1668. tag_req.s.op = CVMX_POW_TAG_OP_SWTAG;
  1669. tag_req.s.type = CVMX_POW_TAG_TYPE_NULL;
  1670. ptr.u64 = 0;
  1671. ptr.sio.mem_region = CVMX_IO_SEG;
  1672. ptr.sio.is_io = 1;
  1673. ptr.sio.did = CVMX_OCT_DID_TAG_TAG1;
  1674. cvmx_write_io(ptr.u64, tag_req.u64);
  1675. /* switch to NULL completes immediately */
  1676. }
  1677. /**
  1678. * Switch to a NULL tag, which ends any ordering or
  1679. * synchronization provided by the POW for the current
  1680. * work queue entry. This operation completes immediately,
  1681. * so completion should not be waited for.
  1682. * This function waits for any pending tag switches to complete
  1683. * before requesting the switch to NULL.
  1684. */
  1685. static inline void cvmx_pow_tag_sw_null(void)
  1686. {
  1687. if (CVMX_ENABLE_POW_CHECKS)
  1688. __cvmx_pow_warn_if_pending_switch(__func__);
  1689. /*
  1690. * Ensure that there is not a pending tag switch, as a tag
  1691. * switch cannot be started if a previous switch is still
  1692. * pending.
  1693. */
  1694. cvmx_pow_tag_sw_wait();
  1695. cvmx_pow_tag_sw_null_nocheck();
  1696. /* switch to NULL completes immediately */
  1697. }
  1698. /**
  1699. * Submits work to an input queue. This function updates the work
  1700. * queue entry in DRAM to match the arguments given. Note that the
  1701. * tag provided is for the work queue entry submitted, and is
  1702. * unrelated to the tag that the core currently holds.
  1703. *
  1704. * @wqp: pointer to work queue entry to submit. This entry is
  1705. * updated to match the other parameters
  1706. * @tag: tag value to be assigned to work queue entry
  1707. * @tag_type: type of tag
  1708. * @qos: Input queue to add to.
  1709. * @grp: group value for the work queue entry.
  1710. */
  1711. static inline void cvmx_pow_work_submit(struct cvmx_wqe *wqp, uint32_t tag,
  1712. enum cvmx_pow_tag_type tag_type,
  1713. uint64_t qos, uint64_t grp)
  1714. {
  1715. cvmx_addr_t ptr;
  1716. cvmx_pow_tag_req_t tag_req;
  1717. wqp->word1.tag = tag;
  1718. wqp->word1.tag_type = tag_type;
  1719. cvmx_wqe_set_qos(wqp, qos);
  1720. cvmx_wqe_set_grp(wqp, grp);
  1721. tag_req.u64 = 0;
  1722. tag_req.s.op = CVMX_POW_TAG_OP_ADDWQ;
  1723. tag_req.s.type = tag_type;
  1724. tag_req.s.tag = tag;
  1725. tag_req.s.qos = qos;
  1726. tag_req.s.grp = grp;
  1727. ptr.u64 = 0;
  1728. ptr.sio.mem_region = CVMX_IO_SEG;
  1729. ptr.sio.is_io = 1;
  1730. ptr.sio.did = CVMX_OCT_DID_TAG_TAG1;
  1731. ptr.sio.offset = cvmx_ptr_to_phys(wqp);
  1732. /*
  1733. * SYNC write to memory before the work submit. This is
  1734. * necessary as POW may read values from DRAM at this time.
  1735. */
  1736. CVMX_SYNCWS;
  1737. cvmx_write_io(ptr.u64, tag_req.u64);
  1738. }
  1739. /**
  1740. * This function sets the group mask for a core. The group mask
  1741. * indicates which groups each core will accept work from. There are
  1742. * 16 groups.
  1743. *
  1744. * @core_num: core to apply mask to
  1745. * @mask: Group mask. There are 16 groups, so only bits 0-15 are valid,
  1746. * representing groups 0-15.
  1747. * Each 1 bit in the mask enables the core to accept work from
  1748. * the corresponding group.
  1749. */
  1750. static inline void cvmx_pow_set_group_mask(uint64_t core_num, uint64_t mask)
  1751. {
  1752. union cvmx_pow_pp_grp_mskx grp_msk;
  1753. grp_msk.u64 = cvmx_read_csr(CVMX_POW_PP_GRP_MSKX(core_num));
  1754. grp_msk.s.grp_msk = mask;
  1755. cvmx_write_csr(CVMX_POW_PP_GRP_MSKX(core_num), grp_msk.u64);
  1756. }
  1757. /**
  1758. * This function sets POW static priorities for a core. Each input queue has
  1759. * an associated priority value.
  1760. *
  1761. * @core_num: core to apply priorities to
  1762. * @priority: Vector of 8 priorities, one per POW Input Queue (0-7).
  1763. * Highest priority is 0 and lowest is 7. A priority value
  1764. * of 0xF instructs POW to skip the Input Queue when
  1765. * scheduling to this specific core.
  1766. * NOTE: priorities should not have gaps in values, meaning
  1767. * {0,1,1,1,1,1,1,1} is a valid configuration while
  1768. * {0,2,2,2,2,2,2,2} is not.
  1769. */
  1770. static inline void cvmx_pow_set_priority(uint64_t core_num,
  1771. const uint8_t priority[])
  1772. {
  1773. /* POW priorities are supported on CN5xxx and later */
  1774. if (!OCTEON_IS_MODEL(OCTEON_CN3XXX)) {
  1775. union cvmx_pow_pp_grp_mskx grp_msk;
  1776. grp_msk.u64 = cvmx_read_csr(CVMX_POW_PP_GRP_MSKX(core_num));
  1777. grp_msk.s.qos0_pri = priority[0];
  1778. grp_msk.s.qos1_pri = priority[1];
  1779. grp_msk.s.qos2_pri = priority[2];
  1780. grp_msk.s.qos3_pri = priority[3];
  1781. grp_msk.s.qos4_pri = priority[4];
  1782. grp_msk.s.qos5_pri = priority[5];
  1783. grp_msk.s.qos6_pri = priority[6];
  1784. grp_msk.s.qos7_pri = priority[7];
  1785. /* Detect gaps between priorities and flag error */
  1786. {
  1787. int i;
  1788. uint32_t prio_mask = 0;
  1789. for (i = 0; i < 8; i++)
  1790. if (priority[i] != 0xF)
  1791. prio_mask |= 1 << priority[i];
  1792. if (prio_mask ^ ((1 << cvmx_pop(prio_mask)) - 1)) {
  1793. pr_err("POW static priorities should be "
  1794. "contiguous (0x%llx)\n",
  1795. (unsigned long long)prio_mask);
  1796. return;
  1797. }
  1798. }
  1799. cvmx_write_csr(CVMX_POW_PP_GRP_MSKX(core_num), grp_msk.u64);
  1800. }
  1801. }
  1802. /**
  1803. * Performs a tag switch and then an immediate deschedule. This completes
  1804. * immediately, so completion must not be waited for. This function does NOT
  1805. * update the wqe in DRAM to match arguments.
  1806. *
  1807. * This function does NOT wait for any prior tag switches to complete, so the
  1808. * calling code must do this.
  1809. *
  1810. * Note the following CAVEAT of the Octeon HW behavior when
  1811. * re-scheduling DE-SCHEDULEd items whose (next) state is
  1812. * ORDERED:
  1813. * - If there are no switches pending at the time that the
  1814. * HW executes the de-schedule, the HW will only re-schedule
  1815. * the head of the FIFO associated with the given tag. This
  1816. * means that in many respects, the HW treats this ORDERED
  1817. * tag as an ATOMIC tag. Note that in the SWTAG_DESCH
  1818. * case (to an ORDERED tag), the HW will do the switch
  1819. * before the deschedule whenever it is possible to do
  1820. * the switch immediately, so it may often look like
  1821. * this case.
  1822. * - If there is a pending switch to ORDERED at the time
  1823. * the HW executes the de-schedule, the HW will perform
  1824. * the switch at the time it re-schedules, and will be
  1825. * able to reschedule any/all of the entries with the
  1826. * same tag.
  1827. * Due to this behavior, the RECOMMENDATION to software is
  1828. * that they have a (next) state of ATOMIC when they
  1829. * DE-SCHEDULE. If an ORDERED tag is what was really desired,
  1830. * SW can choose to immediately switch to an ORDERED tag
  1831. * after the work (that has an ATOMIC tag) is re-scheduled.
  1832. * Note that since there are never any tag switches pending
  1833. * when the HW re-schedules, this switch can be IMMEDIATE upon
  1834. * the reception of the pointer during the re-schedule.
  1835. *
  1836. * @tag: New tag value
  1837. * @tag_type: New tag type
  1838. * @group: New group value
  1839. * @no_sched: Control whether this work queue entry will be rescheduled.
  1840. * - 1 : don't schedule this work
  1841. * - 0 : allow this work to be scheduled.
  1842. */
  1843. static inline void cvmx_pow_tag_sw_desched_nocheck(
  1844. uint32_t tag,
  1845. enum cvmx_pow_tag_type tag_type,
  1846. uint64_t group,
  1847. uint64_t no_sched)
  1848. {
  1849. cvmx_addr_t ptr;
  1850. cvmx_pow_tag_req_t tag_req;
  1851. if (CVMX_ENABLE_POW_CHECKS) {
  1852. cvmx_pow_tag_req_t current_tag;
  1853. __cvmx_pow_warn_if_pending_switch(__func__);
  1854. current_tag = cvmx_pow_get_current_tag();
  1855. if (current_tag.s.type == CVMX_POW_TAG_TYPE_NULL_NULL)
  1856. pr_warn("%s called with NULL_NULL tag\n", __func__);
  1857. if (current_tag.s.type == CVMX_POW_TAG_TYPE_NULL)
  1858. pr_warn("%s called with NULL tag. Deschedule not allowed from NULL state\n",
  1859. __func__);
  1860. if ((current_tag.s.type != CVMX_POW_TAG_TYPE_ATOMIC)
  1861. && (tag_type != CVMX_POW_TAG_TYPE_ATOMIC))
  1862. pr_warn("%s called where neither the before or after tag is ATOMIC\n",
  1863. __func__);
  1864. }
  1865. tag_req.u64 = 0;
  1866. tag_req.s.op = CVMX_POW_TAG_OP_SWTAG_DESCH;
  1867. tag_req.s.tag = tag;
  1868. tag_req.s.type = tag_type;
  1869. tag_req.s.grp = group;
  1870. tag_req.s.no_sched = no_sched;
  1871. ptr.u64 = 0;
  1872. ptr.sio.mem_region = CVMX_IO_SEG;
  1873. ptr.sio.is_io = 1;
  1874. ptr.sio.did = CVMX_OCT_DID_TAG_TAG3;
  1875. /*
  1876. * since TAG3 is used, this store will clear the local pending
  1877. * switch bit.
  1878. */
  1879. cvmx_write_io(ptr.u64, tag_req.u64);
  1880. }
  1881. /**
  1882. * Performs a tag switch and then an immediate deschedule. This completes
  1883. * immediately, so completion must not be waited for. This function does NOT
  1884. * update the wqe in DRAM to match arguments.
  1885. *
  1886. * This function waits for any prior tag switches to complete, so the
  1887. * calling code may call this function with a pending tag switch.
  1888. *
  1889. * Note the following CAVEAT of the Octeon HW behavior when
  1890. * re-scheduling DE-SCHEDULEd items whose (next) state is
  1891. * ORDERED:
  1892. * - If there are no switches pending at the time that the
  1893. * HW executes the de-schedule, the HW will only re-schedule
  1894. * the head of the FIFO associated with the given tag. This
  1895. * means that in many respects, the HW treats this ORDERED
  1896. * tag as an ATOMIC tag. Note that in the SWTAG_DESCH
  1897. * case (to an ORDERED tag), the HW will do the switch
  1898. * before the deschedule whenever it is possible to do
  1899. * the switch immediately, so it may often look like
  1900. * this case.
  1901. * - If there is a pending switch to ORDERED at the time
  1902. * the HW executes the de-schedule, the HW will perform
  1903. * the switch at the time it re-schedules, and will be
  1904. * able to reschedule any/all of the entries with the
  1905. * same tag.
  1906. * Due to this behavior, the RECOMMENDATION to software is
  1907. * that they have a (next) state of ATOMIC when they
  1908. * DE-SCHEDULE. If an ORDERED tag is what was really desired,
  1909. * SW can choose to immediately switch to an ORDERED tag
  1910. * after the work (that has an ATOMIC tag) is re-scheduled.
  1911. * Note that since there are never any tag switches pending
  1912. * when the HW re-schedules, this switch can be IMMEDIATE upon
  1913. * the reception of the pointer during the re-schedule.
  1914. *
  1915. * @tag: New tag value
  1916. * @tag_type: New tag type
  1917. * @group: New group value
  1918. * @no_sched: Control whether this work queue entry will be rescheduled.
  1919. * - 1 : don't schedule this work
  1920. * - 0 : allow this work to be scheduled.
  1921. */
  1922. static inline void cvmx_pow_tag_sw_desched(uint32_t tag,
  1923. enum cvmx_pow_tag_type tag_type,
  1924. uint64_t group, uint64_t no_sched)
  1925. {
  1926. if (CVMX_ENABLE_POW_CHECKS)
  1927. __cvmx_pow_warn_if_pending_switch(__func__);
  1928. /* Need to make sure any writes to the work queue entry are complete */
  1929. CVMX_SYNCWS;
  1930. /*
  1931. * Ensure that there is not a pending tag switch, as a tag
  1932. * switch cannot be started if a previous switch is still
  1933. * pending.
  1934. */
  1935. cvmx_pow_tag_sw_wait();
  1936. cvmx_pow_tag_sw_desched_nocheck(tag, tag_type, group, no_sched);
  1937. }
  1938. /**
  1939. * Deschedules the current work queue entry.
  1940. *
  1941. * @no_sched: no schedule flag value to be set on the work queue
  1942. * entry. If this is set the entry will not be
  1943. * rescheduled.
  1944. */
  1945. static inline void cvmx_pow_desched(uint64_t no_sched)
  1946. {
  1947. cvmx_addr_t ptr;
  1948. cvmx_pow_tag_req_t tag_req;
  1949. if (CVMX_ENABLE_POW_CHECKS) {
  1950. cvmx_pow_tag_req_t current_tag;
  1951. __cvmx_pow_warn_if_pending_switch(__func__);
  1952. current_tag = cvmx_pow_get_current_tag();
  1953. if (current_tag.s.type == CVMX_POW_TAG_TYPE_NULL_NULL)
  1954. pr_warn("%s called with NULL_NULL tag\n", __func__);
  1955. if (current_tag.s.type == CVMX_POW_TAG_TYPE_NULL)
  1956. pr_warn("%s called with NULL tag. Deschedule not expected from NULL state\n",
  1957. __func__);
  1958. }
  1959. /* Need to make sure any writes to the work queue entry are complete */
  1960. CVMX_SYNCWS;
  1961. tag_req.u64 = 0;
  1962. tag_req.s.op = CVMX_POW_TAG_OP_DESCH;
  1963. tag_req.s.no_sched = no_sched;
  1964. ptr.u64 = 0;
  1965. ptr.sio.mem_region = CVMX_IO_SEG;
  1966. ptr.sio.is_io = 1;
  1967. ptr.sio.did = CVMX_OCT_DID_TAG_TAG3;
  1968. /*
  1969. * since TAG3 is used, this store will clear the local pending
  1970. * switch bit.
  1971. */
  1972. cvmx_write_io(ptr.u64, tag_req.u64);
  1973. }
  1974. /****************************************************
  1975. * Define usage of bits within the 32 bit tag values.
  1976. *****************************************************/
  1977. /*
  1978. * Number of bits of the tag used by software. The SW bits are always
  1979. * a contiguous block of the high starting at bit 31. The hardware
  1980. * bits are always the low bits. By default, the top 8 bits of the
  1981. * tag are reserved for software, and the low 24 are set by the IPD
  1982. * unit.
  1983. */
  1984. #define CVMX_TAG_SW_BITS (8)
  1985. #define CVMX_TAG_SW_SHIFT (32 - CVMX_TAG_SW_BITS)
  1986. /* Below is the list of values for the top 8 bits of the tag. */
  1987. /*
  1988. * Tag values with top byte of this value are reserved for internal
  1989. * executive uses.
  1990. */
  1991. #define CVMX_TAG_SW_BITS_INTERNAL 0x1
  1992. /* The executive divides the remaining 24 bits as follows:
  1993. * - the upper 8 bits (bits 23 - 16 of the tag) define a subgroup
  1994. *
  1995. * - the lower 16 bits (bits 15 - 0 of the tag) define are the value
  1996. * with the subgroup
  1997. *
  1998. * Note that this section describes the format of tags generated by
  1999. * software - refer to the hardware documentation for a description of
  2000. * the tags values generated by the packet input hardware. Subgroups
  2001. * are defined here.
  2002. */
  2003. /* Mask for the value portion of the tag */
  2004. #define CVMX_TAG_SUBGROUP_MASK 0xFFFF
  2005. #define CVMX_TAG_SUBGROUP_SHIFT 16
  2006. #define CVMX_TAG_SUBGROUP_PKO 0x1
  2007. /* End of executive tag subgroup definitions */
  2008. /*
  2009. * The remaining values software bit values 0x2 - 0xff are available
  2010. * for application use.
  2011. */
  2012. /**
  2013. * This function creates a 32 bit tag value from the two values provided.
  2014. *
  2015. * @sw_bits: The upper bits (number depends on configuration) are set
  2016. * to this value. The remainder of bits are set by the
  2017. * hw_bits parameter.
  2018. *
  2019. * @hw_bits: The lower bits (number depends on configuration) are set
  2020. * to this value. The remainder of bits are set by the
  2021. * sw_bits parameter.
  2022. *
  2023. * Returns 32 bit value of the combined hw and sw bits.
  2024. */
  2025. static inline uint32_t cvmx_pow_tag_compose(uint64_t sw_bits, uint64_t hw_bits)
  2026. {
  2027. return ((sw_bits & cvmx_build_mask(CVMX_TAG_SW_BITS)) <<
  2028. CVMX_TAG_SW_SHIFT) |
  2029. (hw_bits & cvmx_build_mask(32 - CVMX_TAG_SW_BITS));
  2030. }
  2031. /**
  2032. * Extracts the bits allocated for software use from the tag
  2033. *
  2034. * @tag: 32 bit tag value
  2035. *
  2036. * Returns N bit software tag value, where N is configurable with the
  2037. * CVMX_TAG_SW_BITS define
  2038. */
  2039. static inline uint32_t cvmx_pow_tag_get_sw_bits(uint64_t tag)
  2040. {
  2041. return (tag >> (32 - CVMX_TAG_SW_BITS)) &
  2042. cvmx_build_mask(CVMX_TAG_SW_BITS);
  2043. }
  2044. /**
  2045. *
  2046. * Extracts the bits allocated for hardware use from the tag
  2047. *
  2048. * @tag: 32 bit tag value
  2049. *
  2050. * Returns (32 - N) bit software tag value, where N is configurable
  2051. * with the CVMX_TAG_SW_BITS define
  2052. */
  2053. static inline uint32_t cvmx_pow_tag_get_hw_bits(uint64_t tag)
  2054. {
  2055. return tag & cvmx_build_mask(32 - CVMX_TAG_SW_BITS);
  2056. }
  2057. /**
  2058. * Store the current POW internal state into the supplied
  2059. * buffer. It is recommended that you pass a buffer of at least
  2060. * 128KB. The format of the capture may change based on SDK
  2061. * version and Octeon chip.
  2062. *
  2063. * @buffer: Buffer to store capture into
  2064. * @buffer_size:
  2065. * The size of the supplied buffer
  2066. *
  2067. * Returns Zero on success, negative on failure
  2068. */
  2069. extern int cvmx_pow_capture(void *buffer, int buffer_size);
  2070. /**
  2071. * Dump a POW capture to the console in a human readable format.
  2072. *
  2073. * @buffer: POW capture from cvmx_pow_capture()
  2074. * @buffer_size:
  2075. * Size of the buffer
  2076. */
  2077. extern void cvmx_pow_display(void *buffer, int buffer_size);
  2078. /**
  2079. * Return the number of POW entries supported by this chip
  2080. *
  2081. * Returns Number of POW entries
  2082. */
  2083. extern int cvmx_pow_get_num_entries(void);
  2084. #endif /* __CVMX_POW_H__ */