qman.h 43 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259
  1. /* Copyright 2008 - 2016 Freescale Semiconductor, Inc.
  2. *
  3. * Redistribution and use in source and binary forms, with or without
  4. * modification, are permitted provided that the following conditions are met:
  5. * * Redistributions of source code must retain the above copyright
  6. * notice, this list of conditions and the following disclaimer.
  7. * * Redistributions in binary form must reproduce the above copyright
  8. * notice, this list of conditions and the following disclaimer in the
  9. * documentation and/or other materials provided with the distribution.
  10. * * Neither the name of Freescale Semiconductor nor the
  11. * names of its contributors may be used to endorse or promote products
  12. * derived from this software without specific prior written permission.
  13. *
  14. * ALTERNATIVELY, this software may be distributed under the terms of the
  15. * GNU General Public License ("GPL") as published by the Free Software
  16. * Foundation, either version 2 of that License or (at your option) any
  17. * later version.
  18. *
  19. * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
  20. * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
  21. * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
  22. * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
  23. * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
  24. * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
  25. * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
  26. * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  27. * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
  28. * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  29. */
  30. #ifndef __FSL_QMAN_H
  31. #define __FSL_QMAN_H
  32. #include <linux/bitops.h>
  33. #include <linux/device.h>
  34. /* Hardware constants */
  35. #define QM_CHANNEL_SWPORTAL0 0
  36. #define QMAN_CHANNEL_POOL1 0x21
  37. #define QMAN_CHANNEL_CAAM 0x80
  38. #define QMAN_CHANNEL_POOL1_REV3 0x401
  39. #define QMAN_CHANNEL_CAAM_REV3 0x840
  40. extern u16 qm_channel_pool1;
  41. extern u16 qm_channel_caam;
  42. /* Portal processing (interrupt) sources */
  43. #define QM_PIRQ_CSCI 0x00100000 /* Congestion State Change */
  44. #define QM_PIRQ_EQCI 0x00080000 /* Enqueue Command Committed */
  45. #define QM_PIRQ_EQRI 0x00040000 /* EQCR Ring (below threshold) */
  46. #define QM_PIRQ_DQRI 0x00020000 /* DQRR Ring (non-empty) */
  47. #define QM_PIRQ_MRI 0x00010000 /* MR Ring (non-empty) */
  48. /*
  49. * This mask contains all the interrupt sources that need handling except DQRI,
  50. * ie. that if present should trigger slow-path processing.
  51. */
  52. #define QM_PIRQ_SLOW (QM_PIRQ_CSCI | QM_PIRQ_EQCI | QM_PIRQ_EQRI | \
  53. QM_PIRQ_MRI)
  54. /* For qman_static_dequeue_*** APIs */
  55. #define QM_SDQCR_CHANNELS_POOL_MASK 0x00007fff
  56. /* for n in [1,15] */
  57. #define QM_SDQCR_CHANNELS_POOL(n) (0x00008000 >> (n))
  58. /* for conversion from n of qm_channel */
  59. static inline u32 QM_SDQCR_CHANNELS_POOL_CONV(u16 channel)
  60. {
  61. return QM_SDQCR_CHANNELS_POOL(channel + 1 - qm_channel_pool1);
  62. }
  63. /* --- QMan data structures (and associated constants) --- */
  64. /* "Frame Descriptor (FD)" */
  65. struct qm_fd {
  66. union {
  67. struct {
  68. u8 cfg8b_w1;
  69. u8 bpid; /* Buffer Pool ID */
  70. u8 cfg8b_w3;
  71. u8 addr_hi; /* high 8-bits of 40-bit address */
  72. __be32 addr_lo; /* low 32-bits of 40-bit address */
  73. } __packed;
  74. __be64 data;
  75. };
  76. __be32 cfg; /* format, offset, length / congestion */
  77. union {
  78. __be32 cmd;
  79. __be32 status;
  80. };
  81. } __aligned(8);
  82. #define QM_FD_FORMAT_SG BIT(31)
  83. #define QM_FD_FORMAT_LONG BIT(30)
  84. #define QM_FD_FORMAT_COMPOUND BIT(29)
  85. #define QM_FD_FORMAT_MASK GENMASK(31, 29)
  86. #define QM_FD_OFF_SHIFT 20
  87. #define QM_FD_OFF_MASK GENMASK(28, 20)
  88. #define QM_FD_LEN_MASK GENMASK(19, 0)
  89. #define QM_FD_LEN_BIG_MASK GENMASK(28, 0)
  90. enum qm_fd_format {
  91. /*
  92. * 'contig' implies a contiguous buffer, whereas 'sg' implies a
  93. * scatter-gather table. 'big' implies a 29-bit length with no offset
  94. * field, otherwise length is 20-bit and offset is 9-bit. 'compound'
  95. * implies a s/g-like table, where each entry itself represents a frame
  96. * (contiguous or scatter-gather) and the 29-bit "length" is
  97. * interpreted purely for congestion calculations, ie. a "congestion
  98. * weight".
  99. */
  100. qm_fd_contig = 0,
  101. qm_fd_contig_big = QM_FD_FORMAT_LONG,
  102. qm_fd_sg = QM_FD_FORMAT_SG,
  103. qm_fd_sg_big = QM_FD_FORMAT_SG | QM_FD_FORMAT_LONG,
  104. qm_fd_compound = QM_FD_FORMAT_COMPOUND
  105. };
  106. static inline dma_addr_t qm_fd_addr(const struct qm_fd *fd)
  107. {
  108. return be64_to_cpu(fd->data) & 0xffffffffffLLU;
  109. }
  110. static inline u64 qm_fd_addr_get64(const struct qm_fd *fd)
  111. {
  112. return be64_to_cpu(fd->data) & 0xffffffffffLLU;
  113. }
  114. static inline void qm_fd_addr_set64(struct qm_fd *fd, u64 addr)
  115. {
  116. fd->addr_hi = upper_32_bits(addr);
  117. fd->addr_lo = cpu_to_be32(lower_32_bits(addr));
  118. }
  119. /*
  120. * The 'format' field indicates the interpretation of the remaining
  121. * 29 bits of the 32-bit word.
  122. * If 'format' is _contig or _sg, 20b length and 9b offset.
  123. * If 'format' is _contig_big or _sg_big, 29b length.
  124. * If 'format' is _compound, 29b "congestion weight".
  125. */
  126. static inline enum qm_fd_format qm_fd_get_format(const struct qm_fd *fd)
  127. {
  128. return be32_to_cpu(fd->cfg) & QM_FD_FORMAT_MASK;
  129. }
  130. static inline int qm_fd_get_offset(const struct qm_fd *fd)
  131. {
  132. return (be32_to_cpu(fd->cfg) & QM_FD_OFF_MASK) >> QM_FD_OFF_SHIFT;
  133. }
  134. static inline int qm_fd_get_length(const struct qm_fd *fd)
  135. {
  136. return be32_to_cpu(fd->cfg) & QM_FD_LEN_MASK;
  137. }
  138. static inline int qm_fd_get_len_big(const struct qm_fd *fd)
  139. {
  140. return be32_to_cpu(fd->cfg) & QM_FD_LEN_BIG_MASK;
  141. }
  142. static inline void qm_fd_set_param(struct qm_fd *fd, enum qm_fd_format fmt,
  143. int off, int len)
  144. {
  145. fd->cfg = cpu_to_be32(fmt | (len & QM_FD_LEN_BIG_MASK) |
  146. ((off << QM_FD_OFF_SHIFT) & QM_FD_OFF_MASK));
  147. }
  148. #define qm_fd_set_contig(fd, off, len) \
  149. qm_fd_set_param(fd, qm_fd_contig, off, len)
  150. #define qm_fd_set_sg(fd, off, len) qm_fd_set_param(fd, qm_fd_sg, off, len)
  151. #define qm_fd_set_contig_big(fd, len) \
  152. qm_fd_set_param(fd, qm_fd_contig_big, 0, len)
  153. #define qm_fd_set_sg_big(fd, len) qm_fd_set_param(fd, qm_fd_sg_big, 0, len)
  154. #define qm_fd_set_compound(fd, len) qm_fd_set_param(fd, qm_fd_compound, 0, len)
  155. static inline void qm_fd_clear_fd(struct qm_fd *fd)
  156. {
  157. fd->data = 0;
  158. fd->cfg = 0;
  159. fd->cmd = 0;
  160. }
  161. /* Scatter/Gather table entry */
  162. struct qm_sg_entry {
  163. union {
  164. struct {
  165. u8 __reserved1[3];
  166. u8 addr_hi; /* high 8-bits of 40-bit address */
  167. __be32 addr_lo; /* low 32-bits of 40-bit address */
  168. };
  169. __be64 data;
  170. };
  171. __be32 cfg; /* E bit, F bit, length */
  172. u8 __reserved2;
  173. u8 bpid;
  174. __be16 offset; /* 13-bit, _res[13-15]*/
  175. } __packed;
  176. #define QM_SG_LEN_MASK GENMASK(29, 0)
  177. #define QM_SG_OFF_MASK GENMASK(12, 0)
  178. #define QM_SG_FIN BIT(30)
  179. #define QM_SG_EXT BIT(31)
  180. static inline dma_addr_t qm_sg_addr(const struct qm_sg_entry *sg)
  181. {
  182. return be64_to_cpu(sg->data) & 0xffffffffffLLU;
  183. }
  184. static inline u64 qm_sg_entry_get64(const struct qm_sg_entry *sg)
  185. {
  186. return be64_to_cpu(sg->data) & 0xffffffffffLLU;
  187. }
  188. static inline void qm_sg_entry_set64(struct qm_sg_entry *sg, u64 addr)
  189. {
  190. sg->addr_hi = upper_32_bits(addr);
  191. sg->addr_lo = cpu_to_be32(lower_32_bits(addr));
  192. }
  193. static inline bool qm_sg_entry_is_final(const struct qm_sg_entry *sg)
  194. {
  195. return be32_to_cpu(sg->cfg) & QM_SG_FIN;
  196. }
  197. static inline bool qm_sg_entry_is_ext(const struct qm_sg_entry *sg)
  198. {
  199. return be32_to_cpu(sg->cfg) & QM_SG_EXT;
  200. }
  201. static inline int qm_sg_entry_get_len(const struct qm_sg_entry *sg)
  202. {
  203. return be32_to_cpu(sg->cfg) & QM_SG_LEN_MASK;
  204. }
  205. static inline void qm_sg_entry_set_len(struct qm_sg_entry *sg, int len)
  206. {
  207. sg->cfg = cpu_to_be32(len & QM_SG_LEN_MASK);
  208. }
  209. static inline void qm_sg_entry_set_f(struct qm_sg_entry *sg, int len)
  210. {
  211. sg->cfg = cpu_to_be32(QM_SG_FIN | (len & QM_SG_LEN_MASK));
  212. }
  213. static inline int qm_sg_entry_get_off(const struct qm_sg_entry *sg)
  214. {
  215. return be32_to_cpu(sg->offset) & QM_SG_OFF_MASK;
  216. }
  217. /* "Frame Dequeue Response" */
  218. struct qm_dqrr_entry {
  219. u8 verb;
  220. u8 stat;
  221. __be16 seqnum; /* 15-bit */
  222. u8 tok;
  223. u8 __reserved2[3];
  224. __be32 fqid; /* 24-bit */
  225. __be32 context_b;
  226. struct qm_fd fd;
  227. u8 __reserved4[32];
  228. } __packed __aligned(64);
  229. #define QM_DQRR_VERB_VBIT 0x80
  230. #define QM_DQRR_VERB_MASK 0x7f /* where the verb contains; */
  231. #define QM_DQRR_VERB_FRAME_DEQUEUE 0x60 /* "this format" */
  232. #define QM_DQRR_STAT_FQ_EMPTY 0x80 /* FQ empty */
  233. #define QM_DQRR_STAT_FQ_HELDACTIVE 0x40 /* FQ held active */
  234. #define QM_DQRR_STAT_FQ_FORCEELIGIBLE 0x20 /* FQ was force-eligible'd */
  235. #define QM_DQRR_STAT_FD_VALID 0x10 /* has a non-NULL FD */
  236. #define QM_DQRR_STAT_UNSCHEDULED 0x02 /* Unscheduled dequeue */
  237. #define QM_DQRR_STAT_DQCR_EXPIRED 0x01 /* VDQCR or PDQCR expired*/
  238. /* 'fqid' is a 24-bit field in every h/w descriptor */
  239. #define QM_FQID_MASK GENMASK(23, 0)
  240. #define qm_fqid_set(p, v) ((p)->fqid = cpu_to_be32((v) & QM_FQID_MASK))
  241. #define qm_fqid_get(p) (be32_to_cpu((p)->fqid) & QM_FQID_MASK)
  242. /* "ERN Message Response" */
  243. /* "FQ State Change Notification" */
  244. union qm_mr_entry {
  245. struct {
  246. u8 verb;
  247. u8 __reserved[63];
  248. };
  249. struct {
  250. u8 verb;
  251. u8 dca;
  252. __be16 seqnum;
  253. u8 rc; /* Rej Code: 8-bit */
  254. u8 __reserved[3];
  255. __be32 fqid; /* 24-bit */
  256. __be32 tag;
  257. struct qm_fd fd;
  258. u8 __reserved1[32];
  259. } __packed __aligned(64) ern;
  260. struct {
  261. u8 verb;
  262. u8 fqs; /* Frame Queue Status */
  263. u8 __reserved1[6];
  264. __be32 fqid; /* 24-bit */
  265. __be32 context_b;
  266. u8 __reserved2[48];
  267. } __packed fq; /* FQRN/FQRNI/FQRL/FQPN */
  268. };
  269. #define QM_MR_VERB_VBIT 0x80
  270. /*
  271. * ERNs originating from direct-connect portals ("dcern") use 0x20 as a verb
  272. * which would be invalid as a s/w enqueue verb. A s/w ERN can be distinguished
  273. * from the other MR types by noting if the 0x20 bit is unset.
  274. */
  275. #define QM_MR_VERB_TYPE_MASK 0x27
  276. #define QM_MR_VERB_DC_ERN 0x20
  277. #define QM_MR_VERB_FQRN 0x21
  278. #define QM_MR_VERB_FQRNI 0x22
  279. #define QM_MR_VERB_FQRL 0x23
  280. #define QM_MR_VERB_FQPN 0x24
  281. #define QM_MR_RC_MASK 0xf0 /* contains one of; */
  282. #define QM_MR_RC_CGR_TAILDROP 0x00
  283. #define QM_MR_RC_WRED 0x10
  284. #define QM_MR_RC_ERROR 0x20
  285. #define QM_MR_RC_ORPWINDOW_EARLY 0x30
  286. #define QM_MR_RC_ORPWINDOW_LATE 0x40
  287. #define QM_MR_RC_FQ_TAILDROP 0x50
  288. #define QM_MR_RC_ORPWINDOW_RETIRED 0x60
  289. #define QM_MR_RC_ORP_ZERO 0x70
  290. #define QM_MR_FQS_ORLPRESENT 0x02 /* ORL fragments to come */
  291. #define QM_MR_FQS_NOTEMPTY 0x01 /* FQ has enqueued frames */
  292. /*
  293. * An identical structure of FQD fields is present in the "Init FQ" command and
  294. * the "Query FQ" result, it's suctioned out into the "struct qm_fqd" type.
  295. * Within that, the 'stashing' and 'taildrop' pieces are also factored out, the
  296. * latter has two inlines to assist with converting to/from the mant+exp
  297. * representation.
  298. */
  299. struct qm_fqd_stashing {
  300. /* See QM_STASHING_EXCL_<...> */
  301. u8 exclusive;
  302. /* Numbers of cachelines */
  303. u8 cl; /* _res[6-7], as[4-5], ds[2-3], cs[0-1] */
  304. };
  305. struct qm_fqd_oac {
  306. /* "Overhead Accounting Control", see QM_OAC_<...> */
  307. u8 oac; /* oac[6-7], _res[0-5] */
  308. /* Two's-complement value (-128 to +127) */
  309. s8 oal; /* "Overhead Accounting Length" */
  310. };
  311. struct qm_fqd {
  312. /* _res[6-7], orprws[3-5], oa[2], olws[0-1] */
  313. u8 orpc;
  314. u8 cgid;
  315. __be16 fq_ctrl; /* See QM_FQCTRL_<...> */
  316. __be16 dest_wq; /* channel[3-15], wq[0-2] */
  317. __be16 ics_cred; /* 15-bit */
  318. /*
  319. * For "Initialize Frame Queue" commands, the write-enable mask
  320. * determines whether 'td' or 'oac_init' is observed. For query
  321. * commands, this field is always 'td', and 'oac_query' (below) reflects
  322. * the Overhead ACcounting values.
  323. */
  324. union {
  325. __be16 td; /* "Taildrop": _res[13-15], mant[5-12], exp[0-4] */
  326. struct qm_fqd_oac oac_init;
  327. };
  328. __be32 context_b;
  329. union {
  330. /* Treat it as 64-bit opaque */
  331. __be64 opaque;
  332. struct {
  333. __be32 hi;
  334. __be32 lo;
  335. };
  336. /* Treat it as s/w portal stashing config */
  337. /* see "FQD Context_A field used for [...]" */
  338. struct {
  339. struct qm_fqd_stashing stashing;
  340. /*
  341. * 48-bit address of FQ context to
  342. * stash, must be cacheline-aligned
  343. */
  344. __be16 context_hi;
  345. __be32 context_lo;
  346. } __packed;
  347. } context_a;
  348. struct qm_fqd_oac oac_query;
  349. } __packed;
  350. #define QM_FQD_CHAN_OFF 3
  351. #define QM_FQD_WQ_MASK GENMASK(2, 0)
  352. #define QM_FQD_TD_EXP_MASK GENMASK(4, 0)
  353. #define QM_FQD_TD_MANT_OFF 5
  354. #define QM_FQD_TD_MANT_MASK GENMASK(12, 5)
  355. #define QM_FQD_TD_MAX 0xe0000000
  356. #define QM_FQD_TD_MANT_MAX 0xff
  357. #define QM_FQD_OAC_OFF 6
  358. #define QM_FQD_AS_OFF 4
  359. #define QM_FQD_DS_OFF 2
  360. #define QM_FQD_XS_MASK 0x3
  361. /* 64-bit converters for context_hi/lo */
  362. static inline u64 qm_fqd_stashing_get64(const struct qm_fqd *fqd)
  363. {
  364. return be64_to_cpu(fqd->context_a.opaque) & 0xffffffffffffULL;
  365. }
  366. static inline dma_addr_t qm_fqd_stashing_addr(const struct qm_fqd *fqd)
  367. {
  368. return be64_to_cpu(fqd->context_a.opaque) & 0xffffffffffffULL;
  369. }
  370. static inline u64 qm_fqd_context_a_get64(const struct qm_fqd *fqd)
  371. {
  372. return qm_fqd_stashing_get64(fqd);
  373. }
  374. static inline void qm_fqd_stashing_set64(struct qm_fqd *fqd, u64 addr)
  375. {
  376. fqd->context_a.context_hi = cpu_to_be16(upper_32_bits(addr));
  377. fqd->context_a.context_lo = cpu_to_be32(lower_32_bits(addr));
  378. }
  379. static inline void qm_fqd_context_a_set64(struct qm_fqd *fqd, u64 addr)
  380. {
  381. fqd->context_a.hi = cpu_to_be32(upper_32_bits(addr));
  382. fqd->context_a.lo = cpu_to_be32(lower_32_bits(addr));
  383. }
  384. /* convert a threshold value into mant+exp representation */
  385. static inline int qm_fqd_set_taildrop(struct qm_fqd *fqd, u32 val,
  386. int roundup)
  387. {
  388. u32 e = 0;
  389. int td, oddbit = 0;
  390. if (val > QM_FQD_TD_MAX)
  391. return -ERANGE;
  392. while (val > QM_FQD_TD_MANT_MAX) {
  393. oddbit = val & 1;
  394. val >>= 1;
  395. e++;
  396. if (roundup && oddbit)
  397. val++;
  398. }
  399. td = (val << QM_FQD_TD_MANT_OFF) & QM_FQD_TD_MANT_MASK;
  400. td |= (e & QM_FQD_TD_EXP_MASK);
  401. fqd->td = cpu_to_be16(td);
  402. return 0;
  403. }
  404. /* and the other direction */
  405. static inline int qm_fqd_get_taildrop(const struct qm_fqd *fqd)
  406. {
  407. int td = be16_to_cpu(fqd->td);
  408. return ((td & QM_FQD_TD_MANT_MASK) >> QM_FQD_TD_MANT_OFF)
  409. << (td & QM_FQD_TD_EXP_MASK);
  410. }
  411. static inline void qm_fqd_set_stashing(struct qm_fqd *fqd, u8 as, u8 ds, u8 cs)
  412. {
  413. struct qm_fqd_stashing *st = &fqd->context_a.stashing;
  414. st->cl = ((as & QM_FQD_XS_MASK) << QM_FQD_AS_OFF) |
  415. ((ds & QM_FQD_XS_MASK) << QM_FQD_DS_OFF) |
  416. (cs & QM_FQD_XS_MASK);
  417. }
  418. static inline u8 qm_fqd_get_stashing(const struct qm_fqd *fqd)
  419. {
  420. return fqd->context_a.stashing.cl;
  421. }
  422. static inline void qm_fqd_set_oac(struct qm_fqd *fqd, u8 val)
  423. {
  424. fqd->oac_init.oac = val << QM_FQD_OAC_OFF;
  425. }
  426. static inline void qm_fqd_set_oal(struct qm_fqd *fqd, s8 val)
  427. {
  428. fqd->oac_init.oal = val;
  429. }
  430. static inline void qm_fqd_set_destwq(struct qm_fqd *fqd, int ch, int wq)
  431. {
  432. fqd->dest_wq = cpu_to_be16((ch << QM_FQD_CHAN_OFF) |
  433. (wq & QM_FQD_WQ_MASK));
  434. }
  435. static inline int qm_fqd_get_chan(const struct qm_fqd *fqd)
  436. {
  437. return be16_to_cpu(fqd->dest_wq) >> QM_FQD_CHAN_OFF;
  438. }
  439. static inline int qm_fqd_get_wq(const struct qm_fqd *fqd)
  440. {
  441. return be16_to_cpu(fqd->dest_wq) & QM_FQD_WQ_MASK;
  442. }
  443. /* See "Frame Queue Descriptor (FQD)" */
  444. /* Frame Queue Descriptor (FQD) field 'fq_ctrl' uses these constants */
  445. #define QM_FQCTRL_MASK 0x07ff /* 'fq_ctrl' flags; */
  446. #define QM_FQCTRL_CGE 0x0400 /* Congestion Group Enable */
  447. #define QM_FQCTRL_TDE 0x0200 /* Tail-Drop Enable */
  448. #define QM_FQCTRL_CTXASTASHING 0x0080 /* Context-A stashing */
  449. #define QM_FQCTRL_CPCSTASH 0x0040 /* CPC Stash Enable */
  450. #define QM_FQCTRL_FORCESFDR 0x0008 /* High-priority SFDRs */
  451. #define QM_FQCTRL_AVOIDBLOCK 0x0004 /* Don't block active */
  452. #define QM_FQCTRL_HOLDACTIVE 0x0002 /* Hold active in portal */
  453. #define QM_FQCTRL_PREFERINCACHE 0x0001 /* Aggressively cache FQD */
  454. #define QM_FQCTRL_LOCKINCACHE QM_FQCTRL_PREFERINCACHE /* older naming */
  455. /* See "FQD Context_A field used for [...] */
  456. /* Frame Queue Descriptor (FQD) field 'CONTEXT_A' uses these constants */
  457. #define QM_STASHING_EXCL_ANNOTATION 0x04
  458. #define QM_STASHING_EXCL_DATA 0x02
  459. #define QM_STASHING_EXCL_CTX 0x01
  460. /* See "Intra Class Scheduling" */
  461. /* FQD field 'OAC' (Overhead ACcounting) uses these constants */
  462. #define QM_OAC_ICS 0x2 /* Accounting for Intra-Class Scheduling */
  463. #define QM_OAC_CG 0x1 /* Accounting for Congestion Groups */
  464. /*
  465. * This struct represents the 32-bit "WR_PARM_[GYR]" parameters in CGR fields
  466. * and associated commands/responses. The WRED parameters are calculated from
  467. * these fields as follows;
  468. * MaxTH = MA * (2 ^ Mn)
  469. * Slope = SA / (2 ^ Sn)
  470. * MaxP = 4 * (Pn + 1)
  471. */
  472. struct qm_cgr_wr_parm {
  473. /* MA[24-31], Mn[19-23], SA[12-18], Sn[6-11], Pn[0-5] */
  474. __be32 word;
  475. };
  476. /*
  477. * This struct represents the 13-bit "CS_THRES" CGR field. In the corresponding
  478. * management commands, this is padded to a 16-bit structure field, so that's
  479. * how we represent it here. The congestion state threshold is calculated from
  480. * these fields as follows;
  481. * CS threshold = TA * (2 ^ Tn)
  482. */
  483. struct qm_cgr_cs_thres {
  484. /* _res[13-15], TA[5-12], Tn[0-4] */
  485. __be16 word;
  486. };
  487. /*
  488. * This identical structure of CGR fields is present in the "Init/Modify CGR"
  489. * commands and the "Query CGR" result. It's suctioned out here into its own
  490. * struct.
  491. */
  492. struct __qm_mc_cgr {
  493. struct qm_cgr_wr_parm wr_parm_g;
  494. struct qm_cgr_wr_parm wr_parm_y;
  495. struct qm_cgr_wr_parm wr_parm_r;
  496. u8 wr_en_g; /* boolean, use QM_CGR_EN */
  497. u8 wr_en_y; /* boolean, use QM_CGR_EN */
  498. u8 wr_en_r; /* boolean, use QM_CGR_EN */
  499. u8 cscn_en; /* boolean, use QM_CGR_EN */
  500. union {
  501. struct {
  502. __be16 cscn_targ_upd_ctrl; /* use QM_CGR_TARG_UDP_* */
  503. __be16 cscn_targ_dcp_low;
  504. };
  505. __be32 cscn_targ; /* use QM_CGR_TARG_* */
  506. };
  507. u8 cstd_en; /* boolean, use QM_CGR_EN */
  508. u8 cs; /* boolean, only used in query response */
  509. struct qm_cgr_cs_thres cs_thres; /* use qm_cgr_cs_thres_set64() */
  510. u8 mode; /* QMAN_CGR_MODE_FRAME not supported in rev1.0 */
  511. } __packed;
  512. #define QM_CGR_EN 0x01 /* For wr_en_*, cscn_en, cstd_en */
  513. #define QM_CGR_TARG_UDP_CTRL_WRITE_BIT 0x8000 /* value written to portal bit*/
  514. #define QM_CGR_TARG_UDP_CTRL_DCP 0x4000 /* 0: SWP, 1: DCP */
  515. #define QM_CGR_TARG_PORTAL(n) (0x80000000 >> (n)) /* s/w portal, 0-9 */
  516. #define QM_CGR_TARG_FMAN0 0x00200000 /* direct-connect portal: fman0 */
  517. #define QM_CGR_TARG_FMAN1 0x00100000 /* : fman1 */
  518. /* Convert CGR thresholds to/from "cs_thres" format */
  519. static inline u64 qm_cgr_cs_thres_get64(const struct qm_cgr_cs_thres *th)
  520. {
  521. int thres = be16_to_cpu(th->word);
  522. return ((thres >> 5) & 0xff) << (thres & 0x1f);
  523. }
  524. static inline int qm_cgr_cs_thres_set64(struct qm_cgr_cs_thres *th, u64 val,
  525. int roundup)
  526. {
  527. u32 e = 0;
  528. int oddbit = 0;
  529. while (val > 0xff) {
  530. oddbit = val & 1;
  531. val >>= 1;
  532. e++;
  533. if (roundup && oddbit)
  534. val++;
  535. }
  536. th->word = cpu_to_be16(((val & 0xff) << 5) | (e & 0x1f));
  537. return 0;
  538. }
  539. /* "Initialize FQ" */
  540. struct qm_mcc_initfq {
  541. u8 __reserved1[2];
  542. __be16 we_mask; /* Write Enable Mask */
  543. __be32 fqid; /* 24-bit */
  544. __be16 count; /* Initialises 'count+1' FQDs */
  545. struct qm_fqd fqd; /* the FQD fields go here */
  546. u8 __reserved2[30];
  547. } __packed;
  548. /* "Initialize/Modify CGR" */
  549. struct qm_mcc_initcgr {
  550. u8 __reserve1[2];
  551. __be16 we_mask; /* Write Enable Mask */
  552. struct __qm_mc_cgr cgr; /* CGR fields */
  553. u8 __reserved2[2];
  554. u8 cgid;
  555. u8 __reserved3[32];
  556. } __packed;
  557. /* INITFQ-specific flags */
  558. #define QM_INITFQ_WE_MASK 0x01ff /* 'Write Enable' flags; */
  559. #define QM_INITFQ_WE_OAC 0x0100
  560. #define QM_INITFQ_WE_ORPC 0x0080
  561. #define QM_INITFQ_WE_CGID 0x0040
  562. #define QM_INITFQ_WE_FQCTRL 0x0020
  563. #define QM_INITFQ_WE_DESTWQ 0x0010
  564. #define QM_INITFQ_WE_ICSCRED 0x0008
  565. #define QM_INITFQ_WE_TDTHRESH 0x0004
  566. #define QM_INITFQ_WE_CONTEXTB 0x0002
  567. #define QM_INITFQ_WE_CONTEXTA 0x0001
  568. /* INITCGR/MODIFYCGR-specific flags */
  569. #define QM_CGR_WE_MASK 0x07ff /* 'Write Enable Mask'; */
  570. #define QM_CGR_WE_WR_PARM_G 0x0400
  571. #define QM_CGR_WE_WR_PARM_Y 0x0200
  572. #define QM_CGR_WE_WR_PARM_R 0x0100
  573. #define QM_CGR_WE_WR_EN_G 0x0080
  574. #define QM_CGR_WE_WR_EN_Y 0x0040
  575. #define QM_CGR_WE_WR_EN_R 0x0020
  576. #define QM_CGR_WE_CSCN_EN 0x0010
  577. #define QM_CGR_WE_CSCN_TARG 0x0008
  578. #define QM_CGR_WE_CSTD_EN 0x0004
  579. #define QM_CGR_WE_CS_THRES 0x0002
  580. #define QM_CGR_WE_MODE 0x0001
  581. #define QMAN_CGR_FLAG_USE_INIT 0x00000001
  582. #define QMAN_CGR_MODE_FRAME 0x00000001
  583. /* Portal and Frame Queues */
  584. /* Represents a managed portal */
  585. struct qman_portal;
  586. /*
  587. * This object type represents QMan frame queue descriptors (FQD), it is
  588. * cacheline-aligned, and initialised by qman_create_fq(). The structure is
  589. * defined further down.
  590. */
  591. struct qman_fq;
  592. /*
  593. * This object type represents a QMan congestion group, it is defined further
  594. * down.
  595. */
  596. struct qman_cgr;
  597. /*
  598. * This enum, and the callback type that returns it, are used when handling
  599. * dequeued frames via DQRR. Note that for "null" callbacks registered with the
  600. * portal object (for handling dequeues that do not demux because context_b is
  601. * NULL), the return value *MUST* be qman_cb_dqrr_consume.
  602. */
  603. enum qman_cb_dqrr_result {
  604. /* DQRR entry can be consumed */
  605. qman_cb_dqrr_consume,
  606. /* Like _consume, but requests parking - FQ must be held-active */
  607. qman_cb_dqrr_park,
  608. /* Does not consume, for DCA mode only. */
  609. qman_cb_dqrr_defer,
  610. /*
  611. * Stop processing without consuming this ring entry. Exits the current
  612. * qman_p_poll_dqrr() or interrupt-handling, as appropriate. If within
  613. * an interrupt handler, the callback would typically call
  614. * qman_irqsource_remove(QM_PIRQ_DQRI) before returning this value,
  615. * otherwise the interrupt will reassert immediately.
  616. */
  617. qman_cb_dqrr_stop,
  618. /* Like qman_cb_dqrr_stop, but consumes the current entry. */
  619. qman_cb_dqrr_consume_stop
  620. };
  621. typedef enum qman_cb_dqrr_result (*qman_cb_dqrr)(struct qman_portal *qm,
  622. struct qman_fq *fq,
  623. const struct qm_dqrr_entry *dqrr,
  624. bool sched_napi);
  625. /*
  626. * This callback type is used when handling ERNs, FQRNs and FQRLs via MR. They
  627. * are always consumed after the callback returns.
  628. */
  629. typedef void (*qman_cb_mr)(struct qman_portal *qm, struct qman_fq *fq,
  630. const union qm_mr_entry *msg);
  631. /*
  632. * s/w-visible states. Ie. tentatively scheduled + truly scheduled + active +
  633. * held-active + held-suspended are just "sched". Things like "retired" will not
  634. * be assumed until it is complete (ie. QMAN_FQ_STATE_CHANGING is set until
  635. * then, to indicate it's completing and to gate attempts to retry the retire
  636. * command). Note, park commands do not set QMAN_FQ_STATE_CHANGING because it's
  637. * technically impossible in the case of enqueue DCAs (which refer to DQRR ring
  638. * index rather than the FQ that ring entry corresponds to), so repeated park
  639. * commands are allowed (if you're silly enough to try) but won't change FQ
  640. * state, and the resulting park notifications move FQs from "sched" to
  641. * "parked".
  642. */
  643. enum qman_fq_state {
  644. qman_fq_state_oos,
  645. qman_fq_state_parked,
  646. qman_fq_state_sched,
  647. qman_fq_state_retired
  648. };
  649. #define QMAN_FQ_STATE_CHANGING 0x80000000 /* 'state' is changing */
  650. #define QMAN_FQ_STATE_NE 0x40000000 /* retired FQ isn't empty */
  651. #define QMAN_FQ_STATE_ORL 0x20000000 /* retired FQ has ORL */
  652. #define QMAN_FQ_STATE_BLOCKOOS 0xe0000000 /* if any are set, no OOS */
  653. #define QMAN_FQ_STATE_CGR_EN 0x10000000 /* CGR enabled */
  654. #define QMAN_FQ_STATE_VDQCR 0x08000000 /* being volatile dequeued */
  655. /*
  656. * Frame queue objects (struct qman_fq) are stored within memory passed to
  657. * qman_create_fq(), as this allows stashing of caller-provided demux callback
  658. * pointers at no extra cost to stashing of (driver-internal) FQ state. If the
  659. * caller wishes to add per-FQ state and have it benefit from dequeue-stashing,
  660. * they should;
  661. *
  662. * (a) extend the qman_fq structure with their state; eg.
  663. *
  664. * // myfq is allocated and driver_fq callbacks filled in;
  665. * struct my_fq {
  666. * struct qman_fq base;
  667. * int an_extra_field;
  668. * [ ... add other fields to be associated with each FQ ...]
  669. * } *myfq = some_my_fq_allocator();
  670. * struct qman_fq *fq = qman_create_fq(fqid, flags, &myfq->base);
  671. *
  672. * // in a dequeue callback, access extra fields from 'fq' via a cast;
  673. * struct my_fq *myfq = (struct my_fq *)fq;
  674. * do_something_with(myfq->an_extra_field);
  675. * [...]
  676. *
  677. * (b) when and if configuring the FQ for context stashing, specify how ever
  678. * many cachelines are required to stash 'struct my_fq', to accelerate not
  679. * only the QMan driver but the callback as well.
  680. */
  681. struct qman_fq_cb {
  682. qman_cb_dqrr dqrr; /* for dequeued frames */
  683. qman_cb_mr ern; /* for s/w ERNs */
  684. qman_cb_mr fqs; /* frame-queue state changes*/
  685. };
  686. struct qman_fq {
  687. /* Caller of qman_create_fq() provides these demux callbacks */
  688. struct qman_fq_cb cb;
  689. /*
  690. * These are internal to the driver, don't touch. In particular, they
  691. * may change, be removed, or extended (so you shouldn't rely on
  692. * sizeof(qman_fq) being a constant).
  693. */
  694. u32 fqid, idx;
  695. unsigned long flags;
  696. enum qman_fq_state state;
  697. int cgr_groupid;
  698. };
  699. /*
  700. * This callback type is used when handling congestion group entry/exit.
  701. * 'congested' is non-zero on congestion-entry, and zero on congestion-exit.
  702. */
  703. typedef void (*qman_cb_cgr)(struct qman_portal *qm,
  704. struct qman_cgr *cgr, int congested);
  705. struct qman_cgr {
  706. /* Set these prior to qman_create_cgr() */
  707. u32 cgrid; /* 0..255, but u32 to allow specials like -1, 256, etc.*/
  708. qman_cb_cgr cb;
  709. /* These are private to the driver */
  710. u16 chan; /* portal channel this object is created on */
  711. struct list_head node;
  712. };
  713. /* Flags to qman_create_fq() */
  714. #define QMAN_FQ_FLAG_NO_ENQUEUE 0x00000001 /* can't enqueue */
  715. #define QMAN_FQ_FLAG_NO_MODIFY 0x00000002 /* can only enqueue */
  716. #define QMAN_FQ_FLAG_TO_DCPORTAL 0x00000004 /* consumed by CAAM/PME/Fman */
  717. #define QMAN_FQ_FLAG_DYNAMIC_FQID 0x00000020 /* (de)allocate fqid */
  718. /* Flags to qman_init_fq() */
  719. #define QMAN_INITFQ_FLAG_SCHED 0x00000001 /* schedule rather than park */
  720. #define QMAN_INITFQ_FLAG_LOCAL 0x00000004 /* set dest portal */
  721. /*
  722. * For qman_volatile_dequeue(); Choose one PRECEDENCE. EXACT is optional. Use
  723. * NUMFRAMES(n) (6-bit) or NUMFRAMES_TILLEMPTY to fill in the frame-count. Use
  724. * FQID(n) to fill in the frame queue ID.
  725. */
  726. #define QM_VDQCR_PRECEDENCE_VDQCR 0x0
  727. #define QM_VDQCR_PRECEDENCE_SDQCR 0x80000000
  728. #define QM_VDQCR_EXACT 0x40000000
  729. #define QM_VDQCR_NUMFRAMES_MASK 0x3f000000
  730. #define QM_VDQCR_NUMFRAMES_SET(n) (((n) & 0x3f) << 24)
  731. #define QM_VDQCR_NUMFRAMES_GET(n) (((n) >> 24) & 0x3f)
  732. #define QM_VDQCR_NUMFRAMES_TILLEMPTY QM_VDQCR_NUMFRAMES_SET(0)
  733. #define QMAN_VOLATILE_FLAG_WAIT 0x00000001 /* wait if VDQCR is in use */
  734. #define QMAN_VOLATILE_FLAG_WAIT_INT 0x00000002 /* if wait, interruptible? */
  735. #define QMAN_VOLATILE_FLAG_FINISH 0x00000004 /* wait till VDQCR completes */
  736. /* "Query FQ Non-Programmable Fields" */
  737. struct qm_mcr_queryfq_np {
  738. u8 verb;
  739. u8 result;
  740. u8 __reserved1;
  741. u8 state; /* QM_MCR_NP_STATE_*** */
  742. u32 fqd_link; /* 24-bit, _res2[24-31] */
  743. u16 odp_seq; /* 14-bit, _res3[14-15] */
  744. u16 orp_nesn; /* 14-bit, _res4[14-15] */
  745. u16 orp_ea_hseq; /* 15-bit, _res5[15] */
  746. u16 orp_ea_tseq; /* 15-bit, _res6[15] */
  747. u32 orp_ea_hptr; /* 24-bit, _res7[24-31] */
  748. u32 orp_ea_tptr; /* 24-bit, _res8[24-31] */
  749. u32 pfdr_hptr; /* 24-bit, _res9[24-31] */
  750. u32 pfdr_tptr; /* 24-bit, _res10[24-31] */
  751. u8 __reserved2[5];
  752. u8 is; /* 1-bit, _res12[1-7] */
  753. u16 ics_surp;
  754. u32 byte_cnt;
  755. u32 frm_cnt; /* 24-bit, _res13[24-31] */
  756. u32 __reserved3;
  757. u16 ra1_sfdr; /* QM_MCR_NP_RA1_*** */
  758. u16 ra2_sfdr; /* QM_MCR_NP_RA2_*** */
  759. u16 __reserved4;
  760. u16 od1_sfdr; /* QM_MCR_NP_OD1_*** */
  761. u16 od2_sfdr; /* QM_MCR_NP_OD2_*** */
  762. u16 od3_sfdr; /* QM_MCR_NP_OD3_*** */
  763. } __packed;
  764. #define QM_MCR_NP_STATE_FE 0x10
  765. #define QM_MCR_NP_STATE_R 0x08
  766. #define QM_MCR_NP_STATE_MASK 0x07 /* Reads FQD::STATE; */
  767. #define QM_MCR_NP_STATE_OOS 0x00
  768. #define QM_MCR_NP_STATE_RETIRED 0x01
  769. #define QM_MCR_NP_STATE_TEN_SCHED 0x02
  770. #define QM_MCR_NP_STATE_TRU_SCHED 0x03
  771. #define QM_MCR_NP_STATE_PARKED 0x04
  772. #define QM_MCR_NP_STATE_ACTIVE 0x05
  773. #define QM_MCR_NP_PTR_MASK 0x07ff /* for RA[12] & OD[123] */
  774. #define QM_MCR_NP_RA1_NRA(v) (((v) >> 14) & 0x3) /* FQD::NRA */
  775. #define QM_MCR_NP_RA2_IT(v) (((v) >> 14) & 0x1) /* FQD::IT */
  776. #define QM_MCR_NP_OD1_NOD(v) (((v) >> 14) & 0x3) /* FQD::NOD */
  777. #define QM_MCR_NP_OD3_NPC(v) (((v) >> 14) & 0x3) /* FQD::NPC */
  778. enum qm_mcr_queryfq_np_masks {
  779. qm_mcr_fqd_link_mask = BIT(24) - 1,
  780. qm_mcr_odp_seq_mask = BIT(14) - 1,
  781. qm_mcr_orp_nesn_mask = BIT(14) - 1,
  782. qm_mcr_orp_ea_hseq_mask = BIT(15) - 1,
  783. qm_mcr_orp_ea_tseq_mask = BIT(15) - 1,
  784. qm_mcr_orp_ea_hptr_mask = BIT(24) - 1,
  785. qm_mcr_orp_ea_tptr_mask = BIT(24) - 1,
  786. qm_mcr_pfdr_hptr_mask = BIT(24) - 1,
  787. qm_mcr_pfdr_tptr_mask = BIT(24) - 1,
  788. qm_mcr_is_mask = BIT(1) - 1,
  789. qm_mcr_frm_cnt_mask = BIT(24) - 1,
  790. };
  791. #define qm_mcr_np_get(np, field) \
  792. ((np)->field & (qm_mcr_##field##_mask))
  793. /* Portal Management */
  794. /**
  795. * qman_p_irqsource_add - add processing sources to be interrupt-driven
  796. * @bits: bitmask of QM_PIRQ_**I processing sources
  797. *
  798. * Adds processing sources that should be interrupt-driven (rather than
  799. * processed via qman_poll_***() functions).
  800. */
  801. void qman_p_irqsource_add(struct qman_portal *p, u32 bits);
  802. /**
  803. * qman_p_irqsource_remove - remove processing sources from being int-driven
  804. * @bits: bitmask of QM_PIRQ_**I processing sources
  805. *
  806. * Removes processing sources from being interrupt-driven, so that they will
  807. * instead be processed via qman_poll_***() functions.
  808. */
  809. void qman_p_irqsource_remove(struct qman_portal *p, u32 bits);
  810. /**
  811. * qman_affine_cpus - return a mask of cpus that have affine portals
  812. */
  813. const cpumask_t *qman_affine_cpus(void);
  814. /**
  815. * qman_affine_channel - return the channel ID of an portal
  816. * @cpu: the cpu whose affine portal is the subject of the query
  817. *
  818. * If @cpu is -1, the affine portal for the current CPU will be used. It is a
  819. * bug to call this function for any value of @cpu (other than -1) that is not a
  820. * member of the mask returned from qman_affine_cpus().
  821. */
  822. u16 qman_affine_channel(int cpu);
  823. /**
  824. * qman_get_affine_portal - return the portal pointer affine to cpu
  825. * @cpu: the cpu whose affine portal is the subject of the query
  826. */
  827. struct qman_portal *qman_get_affine_portal(int cpu);
  828. /**
  829. * qman_start_using_portal - register a device link for the portal user
  830. * @p: the portal that will be in use
  831. * @dev: the device that will use the portal
  832. *
  833. * Makes sure that the devices that use the portal are unbound when the
  834. * portal is unbound
  835. */
  836. int qman_start_using_portal(struct qman_portal *p, struct device *dev);
  837. /**
  838. * qman_p_poll_dqrr - process DQRR (fast-path) entries
  839. * @limit: the maximum number of DQRR entries to process
  840. *
  841. * Use of this function requires that DQRR processing not be interrupt-driven.
  842. * The return value represents the number of DQRR entries processed.
  843. */
  844. int qman_p_poll_dqrr(struct qman_portal *p, unsigned int limit);
  845. /**
  846. * qman_p_static_dequeue_add - Add pool channels to the portal SDQCR
  847. * @pools: bit-mask of pool channels, using QM_SDQCR_CHANNELS_POOL(n)
  848. *
  849. * Adds a set of pool channels to the portal's static dequeue command register
  850. * (SDQCR). The requested pools are limited to those the portal has dequeue
  851. * access to.
  852. */
  853. void qman_p_static_dequeue_add(struct qman_portal *p, u32 pools);
  854. /* FQ management */
  855. /**
  856. * qman_create_fq - Allocates a FQ
  857. * @fqid: the index of the FQD to encapsulate, must be "Out of Service"
  858. * @flags: bit-mask of QMAN_FQ_FLAG_*** options
  859. * @fq: memory for storing the 'fq', with callbacks filled in
  860. *
  861. * Creates a frame queue object for the given @fqid, unless the
  862. * QMAN_FQ_FLAG_DYNAMIC_FQID flag is set in @flags, in which case a FQID is
  863. * dynamically allocated (or the function fails if none are available). Once
  864. * created, the caller should not touch the memory at 'fq' except as extended to
  865. * adjacent memory for user-defined fields (see the definition of "struct
  866. * qman_fq" for more info). NO_MODIFY is only intended for enqueuing to
  867. * pre-existing frame-queues that aren't to be otherwise interfered with, it
  868. * prevents all other modifications to the frame queue. The TO_DCPORTAL flag
  869. * causes the driver to honour any context_b modifications requested in the
  870. * qm_init_fq() API, as this indicates the frame queue will be consumed by a
  871. * direct-connect portal (PME, CAAM, or Fman). When frame queues are consumed by
  872. * software portals, the context_b field is controlled by the driver and can't
  873. * be modified by the caller.
  874. */
  875. int qman_create_fq(u32 fqid, u32 flags, struct qman_fq *fq);
  876. /**
  877. * qman_destroy_fq - Deallocates a FQ
  878. * @fq: the frame queue object to release
  879. *
  880. * The memory for this frame queue object ('fq' provided in qman_create_fq()) is
  881. * not deallocated but the caller regains ownership, to do with as desired. The
  882. * FQ must be in the 'out-of-service' or in the 'parked' state.
  883. */
  884. void qman_destroy_fq(struct qman_fq *fq);
  885. /**
  886. * qman_fq_fqid - Queries the frame queue ID of a FQ object
  887. * @fq: the frame queue object to query
  888. */
  889. u32 qman_fq_fqid(struct qman_fq *fq);
  890. /**
  891. * qman_init_fq - Initialises FQ fields, leaves the FQ "parked" or "scheduled"
  892. * @fq: the frame queue object to modify, must be 'parked' or new.
  893. * @flags: bit-mask of QMAN_INITFQ_FLAG_*** options
  894. * @opts: the FQ-modification settings, as defined in the low-level API
  895. *
  896. * The @opts parameter comes from the low-level portal API. Select
  897. * QMAN_INITFQ_FLAG_SCHED in @flags to cause the frame queue to be scheduled
  898. * rather than parked. NB, @opts can be NULL.
  899. *
  900. * Note that some fields and options within @opts may be ignored or overwritten
  901. * by the driver;
  902. * 1. the 'count' and 'fqid' fields are always ignored (this operation only
  903. * affects one frame queue: @fq).
  904. * 2. the QM_INITFQ_WE_CONTEXTB option of the 'we_mask' field and the associated
  905. * 'fqd' structure's 'context_b' field are sometimes overwritten;
  906. * - if @fq was not created with QMAN_FQ_FLAG_TO_DCPORTAL, then context_b is
  907. * initialised to a value used by the driver for demux.
  908. * - if context_b is initialised for demux, so is context_a in case stashing
  909. * is requested (see item 4).
  910. * (So caller control of context_b is only possible for TO_DCPORTAL frame queue
  911. * objects.)
  912. * 3. if @flags contains QMAN_INITFQ_FLAG_LOCAL, the 'fqd' structure's
  913. * 'dest::channel' field will be overwritten to match the portal used to issue
  914. * the command. If the WE_DESTWQ write-enable bit had already been set by the
  915. * caller, the channel workqueue will be left as-is, otherwise the write-enable
  916. * bit is set and the workqueue is set to a default of 4. If the "LOCAL" flag
  917. * isn't set, the destination channel/workqueue fields and the write-enable bit
  918. * are left as-is.
  919. * 4. if the driver overwrites context_a/b for demux, then if
  920. * QM_INITFQ_WE_CONTEXTA is set, the driver will only overwrite
  921. * context_a.address fields and will leave the stashing fields provided by the
  922. * user alone, otherwise it will zero out the context_a.stashing fields.
  923. */
  924. int qman_init_fq(struct qman_fq *fq, u32 flags, struct qm_mcc_initfq *opts);
  925. /**
  926. * qman_schedule_fq - Schedules a FQ
  927. * @fq: the frame queue object to schedule, must be 'parked'
  928. *
  929. * Schedules the frame queue, which must be Parked, which takes it to
  930. * Tentatively-Scheduled or Truly-Scheduled depending on its fill-level.
  931. */
  932. int qman_schedule_fq(struct qman_fq *fq);
  933. /**
  934. * qman_retire_fq - Retires a FQ
  935. * @fq: the frame queue object to retire
  936. * @flags: FQ flags (QMAN_FQ_STATE*) if retirement completes immediately
  937. *
  938. * Retires the frame queue. This returns zero if it succeeds immediately, +1 if
  939. * the retirement was started asynchronously, otherwise it returns negative for
  940. * failure. When this function returns zero, @flags is set to indicate whether
  941. * the retired FQ is empty and/or whether it has any ORL fragments (to show up
  942. * as ERNs). Otherwise the corresponding flags will be known when a subsequent
  943. * FQRN message shows up on the portal's message ring.
  944. *
  945. * NB, if the retirement is asynchronous (the FQ was in the Truly Scheduled or
  946. * Active state), the completion will be via the message ring as a FQRN - but
  947. * the corresponding callback may occur before this function returns!! Ie. the
  948. * caller should be prepared to accept the callback as the function is called,
  949. * not only once it has returned.
  950. */
  951. int qman_retire_fq(struct qman_fq *fq, u32 *flags);
  952. /**
  953. * qman_oos_fq - Puts a FQ "out of service"
  954. * @fq: the frame queue object to be put out-of-service, must be 'retired'
  955. *
  956. * The frame queue must be retired and empty, and if any order restoration list
  957. * was released as ERNs at the time of retirement, they must all be consumed.
  958. */
  959. int qman_oos_fq(struct qman_fq *fq);
  960. /*
  961. * qman_volatile_dequeue - Issue a volatile dequeue command
  962. * @fq: the frame queue object to dequeue from
  963. * @flags: a bit-mask of QMAN_VOLATILE_FLAG_*** options
  964. * @vdqcr: bit mask of QM_VDQCR_*** options, as per qm_dqrr_vdqcr_set()
  965. *
  966. * Attempts to lock access to the portal's VDQCR volatile dequeue functionality.
  967. * The function will block and sleep if QMAN_VOLATILE_FLAG_WAIT is specified and
  968. * the VDQCR is already in use, otherwise returns non-zero for failure. If
  969. * QMAN_VOLATILE_FLAG_FINISH is specified, the function will only return once
  970. * the VDQCR command has finished executing (ie. once the callback for the last
  971. * DQRR entry resulting from the VDQCR command has been called). If not using
  972. * the FINISH flag, completion can be determined either by detecting the
  973. * presence of the QM_DQRR_STAT_UNSCHEDULED and QM_DQRR_STAT_DQCR_EXPIRED bits
  974. * in the "stat" parameter passed to the FQ's dequeue callback, or by waiting
  975. * for the QMAN_FQ_STATE_VDQCR bit to disappear.
  976. */
  977. int qman_volatile_dequeue(struct qman_fq *fq, u32 flags, u32 vdqcr);
  978. /**
  979. * qman_enqueue - Enqueue a frame to a frame queue
  980. * @fq: the frame queue object to enqueue to
  981. * @fd: a descriptor of the frame to be enqueued
  982. *
  983. * Fills an entry in the EQCR of portal @qm to enqueue the frame described by
  984. * @fd. The descriptor details are copied from @fd to the EQCR entry, the 'pid'
  985. * field is ignored. The return value is non-zero on error, such as ring full.
  986. */
  987. int qman_enqueue(struct qman_fq *fq, const struct qm_fd *fd);
  988. /**
  989. * qman_alloc_fqid_range - Allocate a contiguous range of FQIDs
  990. * @result: is set by the API to the base FQID of the allocated range
  991. * @count: the number of FQIDs required
  992. *
  993. * Returns 0 on success, or a negative error code.
  994. */
  995. int qman_alloc_fqid_range(u32 *result, u32 count);
  996. #define qman_alloc_fqid(result) qman_alloc_fqid_range(result, 1)
  997. /**
  998. * qman_release_fqid - Release the specified frame queue ID
  999. * @fqid: the FQID to be released back to the resource pool
  1000. *
  1001. * This function can also be used to seed the allocator with
  1002. * FQID ranges that it can subsequently allocate from.
  1003. * Returns 0 on success, or a negative error code.
  1004. */
  1005. int qman_release_fqid(u32 fqid);
  1006. /**
  1007. * qman_query_fq_np - Queries non-programmable FQD fields
  1008. * @fq: the frame queue object to be queried
  1009. * @np: storage for the queried FQD fields
  1010. */
  1011. int qman_query_fq_np(struct qman_fq *fq, struct qm_mcr_queryfq_np *np);
  1012. /* Pool-channel management */
  1013. /**
  1014. * qman_alloc_pool_range - Allocate a contiguous range of pool-channel IDs
  1015. * @result: is set by the API to the base pool-channel ID of the allocated range
  1016. * @count: the number of pool-channel IDs required
  1017. *
  1018. * Returns 0 on success, or a negative error code.
  1019. */
  1020. int qman_alloc_pool_range(u32 *result, u32 count);
  1021. #define qman_alloc_pool(result) qman_alloc_pool_range(result, 1)
  1022. /**
  1023. * qman_release_pool - Release the specified pool-channel ID
  1024. * @id: the pool-chan ID to be released back to the resource pool
  1025. *
  1026. * This function can also be used to seed the allocator with
  1027. * pool-channel ID ranges that it can subsequently allocate from.
  1028. * Returns 0 on success, or a negative error code.
  1029. */
  1030. int qman_release_pool(u32 id);
  1031. /* CGR management */
  1032. /**
  1033. * qman_create_cgr - Register a congestion group object
  1034. * @cgr: the 'cgr' object, with fields filled in
  1035. * @flags: QMAN_CGR_FLAG_* values
  1036. * @opts: optional state of CGR settings
  1037. *
  1038. * Registers this object to receiving congestion entry/exit callbacks on the
  1039. * portal affine to the cpu portal on which this API is executed. If opts is
  1040. * NULL then only the callback (cgr->cb) function is registered. If @flags
  1041. * contains QMAN_CGR_FLAG_USE_INIT, then an init hw command (which will reset
  1042. * any unspecified parameters) will be used rather than a modify hw hardware
  1043. * (which only modifies the specified parameters).
  1044. */
  1045. int qman_create_cgr(struct qman_cgr *cgr, u32 flags,
  1046. struct qm_mcc_initcgr *opts);
  1047. /**
  1048. * qman_delete_cgr - Deregisters a congestion group object
  1049. * @cgr: the 'cgr' object to deregister
  1050. *
  1051. * "Unplugs" this CGR object from the portal affine to the cpu on which this API
  1052. * is executed. This must be excuted on the same affine portal on which it was
  1053. * created.
  1054. */
  1055. int qman_delete_cgr(struct qman_cgr *cgr);
  1056. /**
  1057. * qman_delete_cgr_safe - Deregisters a congestion group object from any CPU
  1058. * @cgr: the 'cgr' object to deregister
  1059. *
  1060. * This will select the proper CPU and run there qman_delete_cgr().
  1061. */
  1062. void qman_delete_cgr_safe(struct qman_cgr *cgr);
  1063. /**
  1064. * qman_update_cgr_safe - Modifies a congestion group object from any CPU
  1065. * @cgr: the 'cgr' object to modify
  1066. * @opts: state of the CGR settings
  1067. *
  1068. * This will select the proper CPU and modify the CGR settings.
  1069. */
  1070. int qman_update_cgr_safe(struct qman_cgr *cgr, struct qm_mcc_initcgr *opts);
  1071. /**
  1072. * qman_query_cgr_congested - Queries CGR's congestion status
  1073. * @cgr: the 'cgr' object to query
  1074. * @result: returns 'cgr's congestion status, 1 (true) if congested
  1075. */
  1076. int qman_query_cgr_congested(struct qman_cgr *cgr, bool *result);
  1077. /**
  1078. * qman_alloc_cgrid_range - Allocate a contiguous range of CGR IDs
  1079. * @result: is set by the API to the base CGR ID of the allocated range
  1080. * @count: the number of CGR IDs required
  1081. *
  1082. * Returns 0 on success, or a negative error code.
  1083. */
  1084. int qman_alloc_cgrid_range(u32 *result, u32 count);
  1085. #define qman_alloc_cgrid(result) qman_alloc_cgrid_range(result, 1)
  1086. /**
  1087. * qman_release_cgrid - Release the specified CGR ID
  1088. * @id: the CGR ID to be released back to the resource pool
  1089. *
  1090. * This function can also be used to seed the allocator with
  1091. * CGR ID ranges that it can subsequently allocate from.
  1092. * Returns 0 on success, or a negative error code.
  1093. */
  1094. int qman_release_cgrid(u32 id);
  1095. /**
  1096. * qman_is_probed - Check if qman is probed
  1097. *
  1098. * Returns 1 if the qman driver successfully probed, -1 if the qman driver
  1099. * failed to probe or 0 if the qman driver did not probed yet.
  1100. */
  1101. int qman_is_probed(void);
  1102. /**
  1103. * qman_portals_probed - Check if all cpu bound qman portals are probed
  1104. *
  1105. * Returns 1 if all the required cpu bound qman portals successfully probed,
  1106. * -1 if probe errors appeared or 0 if the qman portals did not yet finished
  1107. * probing.
  1108. */
  1109. int qman_portals_probed(void);
  1110. /**
  1111. * qman_dqrr_get_ithresh - Get coalesce interrupt threshold
  1112. * @portal: portal to get the value for
  1113. * @ithresh: threshold pointer
  1114. */
  1115. void qman_dqrr_get_ithresh(struct qman_portal *portal, u8 *ithresh);
  1116. /**
  1117. * qman_dqrr_set_ithresh - Set coalesce interrupt threshold
  1118. * @portal: portal to set the new value on
  1119. * @ithresh: new threshold value
  1120. *
  1121. * Returns 0 on success, or a negative error code.
  1122. */
  1123. int qman_dqrr_set_ithresh(struct qman_portal *portal, u8 ithresh);
  1124. /**
  1125. * qman_dqrr_get_iperiod - Get coalesce interrupt period
  1126. * @portal: portal to get the value for
  1127. * @iperiod: period pointer
  1128. */
  1129. void qman_portal_get_iperiod(struct qman_portal *portal, u32 *iperiod);
  1130. /**
  1131. * qman_dqrr_set_iperiod - Set coalesce interrupt period
  1132. * @portal: portal to set the new value on
  1133. * @ithresh: new period value
  1134. *
  1135. * Returns 0 on success, or a negative error code.
  1136. */
  1137. int qman_portal_set_iperiod(struct qman_portal *portal, u32 iperiod);
  1138. #endif /* __FSL_QMAN_H */