rxe_hdr.h 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931
  1. /* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
  2. /*
  3. * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
  4. * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
  5. */
  6. #ifndef RXE_HDR_H
  7. #define RXE_HDR_H
  8. /* extracted information about a packet carried in an sk_buff struct fits in
  9. * the skbuff cb array. Must be at most 48 bytes. stored in control block of
  10. * sk_buff for received packets.
  11. */
  12. struct rxe_pkt_info {
  13. struct rxe_dev *rxe; /* device that owns packet */
  14. struct rxe_qp *qp; /* qp that owns packet */
  15. struct rxe_send_wqe *wqe; /* send wqe */
  16. u8 *hdr; /* points to bth */
  17. u32 mask; /* useful info about pkt */
  18. u32 psn; /* bth psn of packet */
  19. u16 pkey_index; /* partition of pkt */
  20. u16 paylen; /* length of bth - icrc */
  21. u8 port_num; /* port pkt received on */
  22. u8 opcode; /* bth opcode of packet */
  23. };
  24. /* Macros should be used only for received skb */
  25. static inline struct rxe_pkt_info *SKB_TO_PKT(struct sk_buff *skb)
  26. {
  27. BUILD_BUG_ON(sizeof(struct rxe_pkt_info) > sizeof(skb->cb));
  28. return (void *)skb->cb;
  29. }
  30. static inline struct sk_buff *PKT_TO_SKB(struct rxe_pkt_info *pkt)
  31. {
  32. return container_of((void *)pkt, struct sk_buff, cb);
  33. }
  34. /*
  35. * IBA header types and methods
  36. *
  37. * Some of these are for reference and completeness only since
  38. * rxe does not currently support RD transport
  39. * most of this could be moved into IB core. ib_pack.h has
  40. * part of this but is incomplete
  41. *
  42. * Header specific routines to insert/extract values to/from headers
  43. * the routines that are named __hhh_(set_)fff() take a pointer to a
  44. * hhh header and get(set) the fff field. The routines named
  45. * hhh_(set_)fff take a packet info struct and find the
  46. * header and field based on the opcode in the packet.
  47. * Conversion to/from network byte order from cpu order is also done.
  48. */
  49. #define RXE_ICRC_SIZE (4)
  50. #define RXE_MAX_HDR_LENGTH (80)
  51. /******************************************************************************
  52. * Base Transport Header
  53. ******************************************************************************/
  54. struct rxe_bth {
  55. u8 opcode;
  56. u8 flags;
  57. __be16 pkey;
  58. __be32 qpn;
  59. __be32 apsn;
  60. };
  61. #define BTH_TVER (0)
  62. #define BTH_DEF_PKEY (0xffff)
  63. #define BTH_SE_MASK (0x80)
  64. #define BTH_MIG_MASK (0x40)
  65. #define BTH_PAD_MASK (0x30)
  66. #define BTH_TVER_MASK (0x0f)
  67. #define BTH_FECN_MASK (0x80000000)
  68. #define BTH_BECN_MASK (0x40000000)
  69. #define BTH_RESV6A_MASK (0x3f000000)
  70. #define BTH_QPN_MASK (0x00ffffff)
  71. #define BTH_ACK_MASK (0x80000000)
  72. #define BTH_RESV7_MASK (0x7f000000)
  73. #define BTH_PSN_MASK (0x00ffffff)
  74. static inline u8 __bth_opcode(void *arg)
  75. {
  76. struct rxe_bth *bth = arg;
  77. return bth->opcode;
  78. }
  79. static inline void __bth_set_opcode(void *arg, u8 opcode)
  80. {
  81. struct rxe_bth *bth = arg;
  82. bth->opcode = opcode;
  83. }
  84. static inline u8 __bth_se(void *arg)
  85. {
  86. struct rxe_bth *bth = arg;
  87. return 0 != (BTH_SE_MASK & bth->flags);
  88. }
  89. static inline void __bth_set_se(void *arg, int se)
  90. {
  91. struct rxe_bth *bth = arg;
  92. if (se)
  93. bth->flags |= BTH_SE_MASK;
  94. else
  95. bth->flags &= ~BTH_SE_MASK;
  96. }
  97. static inline u8 __bth_mig(void *arg)
  98. {
  99. struct rxe_bth *bth = arg;
  100. return 0 != (BTH_MIG_MASK & bth->flags);
  101. }
  102. static inline void __bth_set_mig(void *arg, u8 mig)
  103. {
  104. struct rxe_bth *bth = arg;
  105. if (mig)
  106. bth->flags |= BTH_MIG_MASK;
  107. else
  108. bth->flags &= ~BTH_MIG_MASK;
  109. }
  110. static inline u8 __bth_pad(void *arg)
  111. {
  112. struct rxe_bth *bth = arg;
  113. return (BTH_PAD_MASK & bth->flags) >> 4;
  114. }
  115. static inline void __bth_set_pad(void *arg, u8 pad)
  116. {
  117. struct rxe_bth *bth = arg;
  118. bth->flags = (BTH_PAD_MASK & (pad << 4)) |
  119. (~BTH_PAD_MASK & bth->flags);
  120. }
  121. static inline u8 __bth_tver(void *arg)
  122. {
  123. struct rxe_bth *bth = arg;
  124. return BTH_TVER_MASK & bth->flags;
  125. }
  126. static inline void __bth_set_tver(void *arg, u8 tver)
  127. {
  128. struct rxe_bth *bth = arg;
  129. bth->flags = (BTH_TVER_MASK & tver) |
  130. (~BTH_TVER_MASK & bth->flags);
  131. }
  132. static inline u16 __bth_pkey(void *arg)
  133. {
  134. struct rxe_bth *bth = arg;
  135. return be16_to_cpu(bth->pkey);
  136. }
  137. static inline void __bth_set_pkey(void *arg, u16 pkey)
  138. {
  139. struct rxe_bth *bth = arg;
  140. bth->pkey = cpu_to_be16(pkey);
  141. }
  142. static inline u32 __bth_qpn(void *arg)
  143. {
  144. struct rxe_bth *bth = arg;
  145. return BTH_QPN_MASK & be32_to_cpu(bth->qpn);
  146. }
  147. static inline void __bth_set_qpn(void *arg, u32 qpn)
  148. {
  149. struct rxe_bth *bth = arg;
  150. u32 resvqpn = be32_to_cpu(bth->qpn);
  151. bth->qpn = cpu_to_be32((BTH_QPN_MASK & qpn) |
  152. (~BTH_QPN_MASK & resvqpn));
  153. }
  154. static inline int __bth_fecn(void *arg)
  155. {
  156. struct rxe_bth *bth = arg;
  157. return 0 != (cpu_to_be32(BTH_FECN_MASK) & bth->qpn);
  158. }
  159. static inline void __bth_set_fecn(void *arg, int fecn)
  160. {
  161. struct rxe_bth *bth = arg;
  162. if (fecn)
  163. bth->qpn |= cpu_to_be32(BTH_FECN_MASK);
  164. else
  165. bth->qpn &= ~cpu_to_be32(BTH_FECN_MASK);
  166. }
  167. static inline int __bth_becn(void *arg)
  168. {
  169. struct rxe_bth *bth = arg;
  170. return 0 != (cpu_to_be32(BTH_BECN_MASK) & bth->qpn);
  171. }
  172. static inline void __bth_set_becn(void *arg, int becn)
  173. {
  174. struct rxe_bth *bth = arg;
  175. if (becn)
  176. bth->qpn |= cpu_to_be32(BTH_BECN_MASK);
  177. else
  178. bth->qpn &= ~cpu_to_be32(BTH_BECN_MASK);
  179. }
  180. static inline u8 __bth_resv6a(void *arg)
  181. {
  182. struct rxe_bth *bth = arg;
  183. return (BTH_RESV6A_MASK & be32_to_cpu(bth->qpn)) >> 24;
  184. }
  185. static inline void __bth_set_resv6a(void *arg)
  186. {
  187. struct rxe_bth *bth = arg;
  188. bth->qpn = cpu_to_be32(~BTH_RESV6A_MASK);
  189. }
  190. static inline int __bth_ack(void *arg)
  191. {
  192. struct rxe_bth *bth = arg;
  193. return 0 != (cpu_to_be32(BTH_ACK_MASK) & bth->apsn);
  194. }
  195. static inline void __bth_set_ack(void *arg, int ack)
  196. {
  197. struct rxe_bth *bth = arg;
  198. if (ack)
  199. bth->apsn |= cpu_to_be32(BTH_ACK_MASK);
  200. else
  201. bth->apsn &= ~cpu_to_be32(BTH_ACK_MASK);
  202. }
  203. static inline void __bth_set_resv7(void *arg)
  204. {
  205. struct rxe_bth *bth = arg;
  206. bth->apsn &= ~cpu_to_be32(BTH_RESV7_MASK);
  207. }
  208. static inline u32 __bth_psn(void *arg)
  209. {
  210. struct rxe_bth *bth = arg;
  211. return BTH_PSN_MASK & be32_to_cpu(bth->apsn);
  212. }
  213. static inline void __bth_set_psn(void *arg, u32 psn)
  214. {
  215. struct rxe_bth *bth = arg;
  216. u32 apsn = be32_to_cpu(bth->apsn);
  217. bth->apsn = cpu_to_be32((BTH_PSN_MASK & psn) |
  218. (~BTH_PSN_MASK & apsn));
  219. }
  220. static inline u8 bth_opcode(struct rxe_pkt_info *pkt)
  221. {
  222. return __bth_opcode(pkt->hdr);
  223. }
  224. static inline void bth_set_opcode(struct rxe_pkt_info *pkt, u8 opcode)
  225. {
  226. __bth_set_opcode(pkt->hdr, opcode);
  227. }
  228. static inline u8 bth_se(struct rxe_pkt_info *pkt)
  229. {
  230. return __bth_se(pkt->hdr);
  231. }
  232. static inline void bth_set_se(struct rxe_pkt_info *pkt, int se)
  233. {
  234. __bth_set_se(pkt->hdr, se);
  235. }
  236. static inline u8 bth_mig(struct rxe_pkt_info *pkt)
  237. {
  238. return __bth_mig(pkt->hdr);
  239. }
  240. static inline void bth_set_mig(struct rxe_pkt_info *pkt, u8 mig)
  241. {
  242. __bth_set_mig(pkt->hdr, mig);
  243. }
  244. static inline u8 bth_pad(struct rxe_pkt_info *pkt)
  245. {
  246. return __bth_pad(pkt->hdr);
  247. }
  248. static inline void bth_set_pad(struct rxe_pkt_info *pkt, u8 pad)
  249. {
  250. __bth_set_pad(pkt->hdr, pad);
  251. }
  252. static inline u8 bth_tver(struct rxe_pkt_info *pkt)
  253. {
  254. return __bth_tver(pkt->hdr);
  255. }
  256. static inline void bth_set_tver(struct rxe_pkt_info *pkt, u8 tver)
  257. {
  258. __bth_set_tver(pkt->hdr, tver);
  259. }
  260. static inline u16 bth_pkey(struct rxe_pkt_info *pkt)
  261. {
  262. return __bth_pkey(pkt->hdr);
  263. }
  264. static inline void bth_set_pkey(struct rxe_pkt_info *pkt, u16 pkey)
  265. {
  266. __bth_set_pkey(pkt->hdr, pkey);
  267. }
  268. static inline u32 bth_qpn(struct rxe_pkt_info *pkt)
  269. {
  270. return __bth_qpn(pkt->hdr);
  271. }
  272. static inline void bth_set_qpn(struct rxe_pkt_info *pkt, u32 qpn)
  273. {
  274. __bth_set_qpn(pkt->hdr, qpn);
  275. }
  276. static inline int bth_fecn(struct rxe_pkt_info *pkt)
  277. {
  278. return __bth_fecn(pkt->hdr);
  279. }
  280. static inline void bth_set_fecn(struct rxe_pkt_info *pkt, int fecn)
  281. {
  282. __bth_set_fecn(pkt->hdr, fecn);
  283. }
  284. static inline int bth_becn(struct rxe_pkt_info *pkt)
  285. {
  286. return __bth_becn(pkt->hdr);
  287. }
  288. static inline void bth_set_becn(struct rxe_pkt_info *pkt, int becn)
  289. {
  290. __bth_set_becn(pkt->hdr, becn);
  291. }
  292. static inline u8 bth_resv6a(struct rxe_pkt_info *pkt)
  293. {
  294. return __bth_resv6a(pkt->hdr);
  295. }
  296. static inline void bth_set_resv6a(struct rxe_pkt_info *pkt)
  297. {
  298. __bth_set_resv6a(pkt->hdr);
  299. }
  300. static inline int bth_ack(struct rxe_pkt_info *pkt)
  301. {
  302. return __bth_ack(pkt->hdr);
  303. }
  304. static inline void bth_set_ack(struct rxe_pkt_info *pkt, int ack)
  305. {
  306. __bth_set_ack(pkt->hdr, ack);
  307. }
  308. static inline void bth_set_resv7(struct rxe_pkt_info *pkt)
  309. {
  310. __bth_set_resv7(pkt->hdr);
  311. }
  312. static inline u32 bth_psn(struct rxe_pkt_info *pkt)
  313. {
  314. return __bth_psn(pkt->hdr);
  315. }
  316. static inline void bth_set_psn(struct rxe_pkt_info *pkt, u32 psn)
  317. {
  318. __bth_set_psn(pkt->hdr, psn);
  319. }
  320. static inline void bth_init(struct rxe_pkt_info *pkt, u8 opcode, int se,
  321. int mig, int pad, u16 pkey, u32 qpn, int ack_req,
  322. u32 psn)
  323. {
  324. struct rxe_bth *bth = (struct rxe_bth *)(pkt->hdr);
  325. bth->opcode = opcode;
  326. bth->flags = (pad << 4) & BTH_PAD_MASK;
  327. if (se)
  328. bth->flags |= BTH_SE_MASK;
  329. if (mig)
  330. bth->flags |= BTH_MIG_MASK;
  331. bth->pkey = cpu_to_be16(pkey);
  332. bth->qpn = cpu_to_be32(qpn & BTH_QPN_MASK);
  333. psn &= BTH_PSN_MASK;
  334. if (ack_req)
  335. psn |= BTH_ACK_MASK;
  336. bth->apsn = cpu_to_be32(psn);
  337. }
  338. /******************************************************************************
  339. * Reliable Datagram Extended Transport Header
  340. ******************************************************************************/
  341. struct rxe_rdeth {
  342. __be32 een;
  343. };
  344. #define RDETH_EEN_MASK (0x00ffffff)
  345. static inline u8 __rdeth_een(void *arg)
  346. {
  347. struct rxe_rdeth *rdeth = arg;
  348. return RDETH_EEN_MASK & be32_to_cpu(rdeth->een);
  349. }
  350. static inline void __rdeth_set_een(void *arg, u32 een)
  351. {
  352. struct rxe_rdeth *rdeth = arg;
  353. rdeth->een = cpu_to_be32(RDETH_EEN_MASK & een);
  354. }
  355. static inline u8 rdeth_een(struct rxe_pkt_info *pkt)
  356. {
  357. return __rdeth_een(pkt->hdr +
  358. rxe_opcode[pkt->opcode].offset[RXE_RDETH]);
  359. }
  360. static inline void rdeth_set_een(struct rxe_pkt_info *pkt, u32 een)
  361. {
  362. __rdeth_set_een(pkt->hdr +
  363. rxe_opcode[pkt->opcode].offset[RXE_RDETH], een);
  364. }
  365. /******************************************************************************
  366. * Datagram Extended Transport Header
  367. ******************************************************************************/
  368. struct rxe_deth {
  369. __be32 qkey;
  370. __be32 sqp;
  371. };
  372. #define GSI_QKEY (0x80010000)
  373. #define DETH_SQP_MASK (0x00ffffff)
  374. static inline u32 __deth_qkey(void *arg)
  375. {
  376. struct rxe_deth *deth = arg;
  377. return be32_to_cpu(deth->qkey);
  378. }
  379. static inline void __deth_set_qkey(void *arg, u32 qkey)
  380. {
  381. struct rxe_deth *deth = arg;
  382. deth->qkey = cpu_to_be32(qkey);
  383. }
  384. static inline u32 __deth_sqp(void *arg)
  385. {
  386. struct rxe_deth *deth = arg;
  387. return DETH_SQP_MASK & be32_to_cpu(deth->sqp);
  388. }
  389. static inline void __deth_set_sqp(void *arg, u32 sqp)
  390. {
  391. struct rxe_deth *deth = arg;
  392. deth->sqp = cpu_to_be32(DETH_SQP_MASK & sqp);
  393. }
  394. static inline u32 deth_qkey(struct rxe_pkt_info *pkt)
  395. {
  396. return __deth_qkey(pkt->hdr +
  397. rxe_opcode[pkt->opcode].offset[RXE_DETH]);
  398. }
  399. static inline void deth_set_qkey(struct rxe_pkt_info *pkt, u32 qkey)
  400. {
  401. __deth_set_qkey(pkt->hdr +
  402. rxe_opcode[pkt->opcode].offset[RXE_DETH], qkey);
  403. }
  404. static inline u32 deth_sqp(struct rxe_pkt_info *pkt)
  405. {
  406. return __deth_sqp(pkt->hdr +
  407. rxe_opcode[pkt->opcode].offset[RXE_DETH]);
  408. }
  409. static inline void deth_set_sqp(struct rxe_pkt_info *pkt, u32 sqp)
  410. {
  411. __deth_set_sqp(pkt->hdr +
  412. rxe_opcode[pkt->opcode].offset[RXE_DETH], sqp);
  413. }
  414. /******************************************************************************
  415. * RDMA Extended Transport Header
  416. ******************************************************************************/
  417. struct rxe_reth {
  418. __be64 va;
  419. __be32 rkey;
  420. __be32 len;
  421. };
  422. static inline u64 __reth_va(void *arg)
  423. {
  424. struct rxe_reth *reth = arg;
  425. return be64_to_cpu(reth->va);
  426. }
  427. static inline void __reth_set_va(void *arg, u64 va)
  428. {
  429. struct rxe_reth *reth = arg;
  430. reth->va = cpu_to_be64(va);
  431. }
  432. static inline u32 __reth_rkey(void *arg)
  433. {
  434. struct rxe_reth *reth = arg;
  435. return be32_to_cpu(reth->rkey);
  436. }
  437. static inline void __reth_set_rkey(void *arg, u32 rkey)
  438. {
  439. struct rxe_reth *reth = arg;
  440. reth->rkey = cpu_to_be32(rkey);
  441. }
  442. static inline u32 __reth_len(void *arg)
  443. {
  444. struct rxe_reth *reth = arg;
  445. return be32_to_cpu(reth->len);
  446. }
  447. static inline void __reth_set_len(void *arg, u32 len)
  448. {
  449. struct rxe_reth *reth = arg;
  450. reth->len = cpu_to_be32(len);
  451. }
  452. static inline u64 reth_va(struct rxe_pkt_info *pkt)
  453. {
  454. return __reth_va(pkt->hdr +
  455. rxe_opcode[pkt->opcode].offset[RXE_RETH]);
  456. }
  457. static inline void reth_set_va(struct rxe_pkt_info *pkt, u64 va)
  458. {
  459. __reth_set_va(pkt->hdr +
  460. rxe_opcode[pkt->opcode].offset[RXE_RETH], va);
  461. }
  462. static inline u32 reth_rkey(struct rxe_pkt_info *pkt)
  463. {
  464. return __reth_rkey(pkt->hdr +
  465. rxe_opcode[pkt->opcode].offset[RXE_RETH]);
  466. }
  467. static inline void reth_set_rkey(struct rxe_pkt_info *pkt, u32 rkey)
  468. {
  469. __reth_set_rkey(pkt->hdr +
  470. rxe_opcode[pkt->opcode].offset[RXE_RETH], rkey);
  471. }
  472. static inline u32 reth_len(struct rxe_pkt_info *pkt)
  473. {
  474. return __reth_len(pkt->hdr +
  475. rxe_opcode[pkt->opcode].offset[RXE_RETH]);
  476. }
  477. static inline void reth_set_len(struct rxe_pkt_info *pkt, u32 len)
  478. {
  479. __reth_set_len(pkt->hdr +
  480. rxe_opcode[pkt->opcode].offset[RXE_RETH], len);
  481. }
  482. /******************************************************************************
  483. * Atomic Extended Transport Header
  484. ******************************************************************************/
  485. struct rxe_atmeth {
  486. __be64 va;
  487. __be32 rkey;
  488. __be64 swap_add;
  489. __be64 comp;
  490. } __packed;
  491. static inline u64 __atmeth_va(void *arg)
  492. {
  493. struct rxe_atmeth *atmeth = arg;
  494. return be64_to_cpu(atmeth->va);
  495. }
  496. static inline void __atmeth_set_va(void *arg, u64 va)
  497. {
  498. struct rxe_atmeth *atmeth = arg;
  499. atmeth->va = cpu_to_be64(va);
  500. }
  501. static inline u32 __atmeth_rkey(void *arg)
  502. {
  503. struct rxe_atmeth *atmeth = arg;
  504. return be32_to_cpu(atmeth->rkey);
  505. }
  506. static inline void __atmeth_set_rkey(void *arg, u32 rkey)
  507. {
  508. struct rxe_atmeth *atmeth = arg;
  509. atmeth->rkey = cpu_to_be32(rkey);
  510. }
  511. static inline u64 __atmeth_swap_add(void *arg)
  512. {
  513. struct rxe_atmeth *atmeth = arg;
  514. return be64_to_cpu(atmeth->swap_add);
  515. }
  516. static inline void __atmeth_set_swap_add(void *arg, u64 swap_add)
  517. {
  518. struct rxe_atmeth *atmeth = arg;
  519. atmeth->swap_add = cpu_to_be64(swap_add);
  520. }
  521. static inline u64 __atmeth_comp(void *arg)
  522. {
  523. struct rxe_atmeth *atmeth = arg;
  524. return be64_to_cpu(atmeth->comp);
  525. }
  526. static inline void __atmeth_set_comp(void *arg, u64 comp)
  527. {
  528. struct rxe_atmeth *atmeth = arg;
  529. atmeth->comp = cpu_to_be64(comp);
  530. }
  531. static inline u64 atmeth_va(struct rxe_pkt_info *pkt)
  532. {
  533. return __atmeth_va(pkt->hdr +
  534. rxe_opcode[pkt->opcode].offset[RXE_ATMETH]);
  535. }
  536. static inline void atmeth_set_va(struct rxe_pkt_info *pkt, u64 va)
  537. {
  538. __atmeth_set_va(pkt->hdr +
  539. rxe_opcode[pkt->opcode].offset[RXE_ATMETH], va);
  540. }
  541. static inline u32 atmeth_rkey(struct rxe_pkt_info *pkt)
  542. {
  543. return __atmeth_rkey(pkt->hdr +
  544. rxe_opcode[pkt->opcode].offset[RXE_ATMETH]);
  545. }
  546. static inline void atmeth_set_rkey(struct rxe_pkt_info *pkt, u32 rkey)
  547. {
  548. __atmeth_set_rkey(pkt->hdr +
  549. rxe_opcode[pkt->opcode].offset[RXE_ATMETH], rkey);
  550. }
  551. static inline u64 atmeth_swap_add(struct rxe_pkt_info *pkt)
  552. {
  553. return __atmeth_swap_add(pkt->hdr +
  554. rxe_opcode[pkt->opcode].offset[RXE_ATMETH]);
  555. }
  556. static inline void atmeth_set_swap_add(struct rxe_pkt_info *pkt, u64 swap_add)
  557. {
  558. __atmeth_set_swap_add(pkt->hdr +
  559. rxe_opcode[pkt->opcode].offset[RXE_ATMETH], swap_add);
  560. }
  561. static inline u64 atmeth_comp(struct rxe_pkt_info *pkt)
  562. {
  563. return __atmeth_comp(pkt->hdr +
  564. rxe_opcode[pkt->opcode].offset[RXE_ATMETH]);
  565. }
  566. static inline void atmeth_set_comp(struct rxe_pkt_info *pkt, u64 comp)
  567. {
  568. __atmeth_set_comp(pkt->hdr +
  569. rxe_opcode[pkt->opcode].offset[RXE_ATMETH], comp);
  570. }
  571. /******************************************************************************
  572. * Ack Extended Transport Header
  573. ******************************************************************************/
  574. struct rxe_aeth {
  575. __be32 smsn;
  576. };
  577. #define AETH_SYN_MASK (0xff000000)
  578. #define AETH_MSN_MASK (0x00ffffff)
  579. enum aeth_syndrome {
  580. AETH_TYPE_MASK = 0xe0,
  581. AETH_ACK = 0x00,
  582. AETH_RNR_NAK = 0x20,
  583. AETH_RSVD = 0x40,
  584. AETH_NAK = 0x60,
  585. AETH_ACK_UNLIMITED = 0x1f,
  586. AETH_NAK_PSN_SEQ_ERROR = 0x60,
  587. AETH_NAK_INVALID_REQ = 0x61,
  588. AETH_NAK_REM_ACC_ERR = 0x62,
  589. AETH_NAK_REM_OP_ERR = 0x63,
  590. AETH_NAK_INV_RD_REQ = 0x64,
  591. };
  592. static inline u8 __aeth_syn(void *arg)
  593. {
  594. struct rxe_aeth *aeth = arg;
  595. return (AETH_SYN_MASK & be32_to_cpu(aeth->smsn)) >> 24;
  596. }
  597. static inline void __aeth_set_syn(void *arg, u8 syn)
  598. {
  599. struct rxe_aeth *aeth = arg;
  600. u32 smsn = be32_to_cpu(aeth->smsn);
  601. aeth->smsn = cpu_to_be32((AETH_SYN_MASK & (syn << 24)) |
  602. (~AETH_SYN_MASK & smsn));
  603. }
  604. static inline u32 __aeth_msn(void *arg)
  605. {
  606. struct rxe_aeth *aeth = arg;
  607. return AETH_MSN_MASK & be32_to_cpu(aeth->smsn);
  608. }
  609. static inline void __aeth_set_msn(void *arg, u32 msn)
  610. {
  611. struct rxe_aeth *aeth = arg;
  612. u32 smsn = be32_to_cpu(aeth->smsn);
  613. aeth->smsn = cpu_to_be32((AETH_MSN_MASK & msn) |
  614. (~AETH_MSN_MASK & smsn));
  615. }
  616. static inline u8 aeth_syn(struct rxe_pkt_info *pkt)
  617. {
  618. return __aeth_syn(pkt->hdr +
  619. rxe_opcode[pkt->opcode].offset[RXE_AETH]);
  620. }
  621. static inline void aeth_set_syn(struct rxe_pkt_info *pkt, u8 syn)
  622. {
  623. __aeth_set_syn(pkt->hdr +
  624. rxe_opcode[pkt->opcode].offset[RXE_AETH], syn);
  625. }
  626. static inline u32 aeth_msn(struct rxe_pkt_info *pkt)
  627. {
  628. return __aeth_msn(pkt->hdr +
  629. rxe_opcode[pkt->opcode].offset[RXE_AETH]);
  630. }
  631. static inline void aeth_set_msn(struct rxe_pkt_info *pkt, u32 msn)
  632. {
  633. __aeth_set_msn(pkt->hdr +
  634. rxe_opcode[pkt->opcode].offset[RXE_AETH], msn);
  635. }
  636. /******************************************************************************
  637. * Atomic Ack Extended Transport Header
  638. ******************************************************************************/
  639. struct rxe_atmack {
  640. __be64 orig;
  641. };
  642. static inline u64 __atmack_orig(void *arg)
  643. {
  644. struct rxe_atmack *atmack = arg;
  645. return be64_to_cpu(atmack->orig);
  646. }
  647. static inline void __atmack_set_orig(void *arg, u64 orig)
  648. {
  649. struct rxe_atmack *atmack = arg;
  650. atmack->orig = cpu_to_be64(orig);
  651. }
  652. static inline u64 atmack_orig(struct rxe_pkt_info *pkt)
  653. {
  654. return __atmack_orig(pkt->hdr +
  655. rxe_opcode[pkt->opcode].offset[RXE_ATMACK]);
  656. }
  657. static inline void atmack_set_orig(struct rxe_pkt_info *pkt, u64 orig)
  658. {
  659. __atmack_set_orig(pkt->hdr +
  660. rxe_opcode[pkt->opcode].offset[RXE_ATMACK], orig);
  661. }
  662. /******************************************************************************
  663. * Immediate Extended Transport Header
  664. ******************************************************************************/
  665. struct rxe_immdt {
  666. __be32 imm;
  667. };
  668. static inline __be32 __immdt_imm(void *arg)
  669. {
  670. struct rxe_immdt *immdt = arg;
  671. return immdt->imm;
  672. }
  673. static inline void __immdt_set_imm(void *arg, __be32 imm)
  674. {
  675. struct rxe_immdt *immdt = arg;
  676. immdt->imm = imm;
  677. }
  678. static inline __be32 immdt_imm(struct rxe_pkt_info *pkt)
  679. {
  680. return __immdt_imm(pkt->hdr +
  681. rxe_opcode[pkt->opcode].offset[RXE_IMMDT]);
  682. }
  683. static inline void immdt_set_imm(struct rxe_pkt_info *pkt, __be32 imm)
  684. {
  685. __immdt_set_imm(pkt->hdr +
  686. rxe_opcode[pkt->opcode].offset[RXE_IMMDT], imm);
  687. }
  688. /******************************************************************************
  689. * Invalidate Extended Transport Header
  690. ******************************************************************************/
  691. struct rxe_ieth {
  692. __be32 rkey;
  693. };
  694. static inline u32 __ieth_rkey(void *arg)
  695. {
  696. struct rxe_ieth *ieth = arg;
  697. return be32_to_cpu(ieth->rkey);
  698. }
  699. static inline void __ieth_set_rkey(void *arg, u32 rkey)
  700. {
  701. struct rxe_ieth *ieth = arg;
  702. ieth->rkey = cpu_to_be32(rkey);
  703. }
  704. static inline u32 ieth_rkey(struct rxe_pkt_info *pkt)
  705. {
  706. return __ieth_rkey(pkt->hdr +
  707. rxe_opcode[pkt->opcode].offset[RXE_IETH]);
  708. }
  709. static inline void ieth_set_rkey(struct rxe_pkt_info *pkt, u32 rkey)
  710. {
  711. __ieth_set_rkey(pkt->hdr +
  712. rxe_opcode[pkt->opcode].offset[RXE_IETH], rkey);
  713. }
  714. enum rxe_hdr_length {
  715. RXE_BTH_BYTES = sizeof(struct rxe_bth),
  716. RXE_DETH_BYTES = sizeof(struct rxe_deth),
  717. RXE_IMMDT_BYTES = sizeof(struct rxe_immdt),
  718. RXE_RETH_BYTES = sizeof(struct rxe_reth),
  719. RXE_AETH_BYTES = sizeof(struct rxe_aeth),
  720. RXE_ATMACK_BYTES = sizeof(struct rxe_atmack),
  721. RXE_ATMETH_BYTES = sizeof(struct rxe_atmeth),
  722. RXE_IETH_BYTES = sizeof(struct rxe_ieth),
  723. RXE_RDETH_BYTES = sizeof(struct rxe_rdeth),
  724. };
  725. static inline size_t header_size(struct rxe_pkt_info *pkt)
  726. {
  727. return rxe_opcode[pkt->opcode].length;
  728. }
  729. static inline void *payload_addr(struct rxe_pkt_info *pkt)
  730. {
  731. return pkt->hdr + rxe_opcode[pkt->opcode].offset[RXE_PAYLOAD];
  732. }
  733. static inline size_t payload_size(struct rxe_pkt_info *pkt)
  734. {
  735. return pkt->paylen - rxe_opcode[pkt->opcode].offset[RXE_PAYLOAD]
  736. - bth_pad(pkt) - RXE_ICRC_SIZE;
  737. }
  738. #endif /* RXE_HDR_H */