svc_rdma_pcl.h 3.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. /*
  3. * Copyright (c) 2020, Oracle and/or its affiliates
  4. */
  5. #ifndef SVC_RDMA_PCL_H
  6. #define SVC_RDMA_PCL_H
  7. #include <linux/list.h>
  8. struct svc_rdma_segment {
  9. u32 rs_handle;
  10. u32 rs_length;
  11. u64 rs_offset;
  12. };
  13. struct svc_rdma_chunk {
  14. struct list_head ch_list;
  15. u32 ch_position;
  16. u32 ch_length;
  17. u32 ch_payload_length;
  18. u32 ch_segcount;
  19. struct svc_rdma_segment ch_segments[];
  20. };
  21. struct svc_rdma_pcl {
  22. unsigned int cl_count;
  23. struct list_head cl_chunks;
  24. };
  25. /**
  26. * pcl_init - Initialize a parsed chunk list
  27. * @pcl: parsed chunk list to initialize
  28. *
  29. */
  30. static inline void pcl_init(struct svc_rdma_pcl *pcl)
  31. {
  32. INIT_LIST_HEAD(&pcl->cl_chunks);
  33. }
  34. /**
  35. * pcl_is_empty - Return true if parsed chunk list is empty
  36. * @pcl: parsed chunk list
  37. *
  38. */
  39. static inline bool pcl_is_empty(const struct svc_rdma_pcl *pcl)
  40. {
  41. return list_empty(&pcl->cl_chunks);
  42. }
  43. /**
  44. * pcl_first_chunk - Return first chunk in a parsed chunk list
  45. * @pcl: parsed chunk list
  46. *
  47. * Returns the first chunk in the list, or NULL if the list is empty.
  48. */
  49. static inline struct svc_rdma_chunk *
  50. pcl_first_chunk(const struct svc_rdma_pcl *pcl)
  51. {
  52. if (pcl_is_empty(pcl))
  53. return NULL;
  54. return list_first_entry(&pcl->cl_chunks, struct svc_rdma_chunk,
  55. ch_list);
  56. }
  57. /**
  58. * pcl_next_chunk - Return next chunk in a parsed chunk list
  59. * @pcl: a parsed chunk list
  60. * @chunk: chunk in @pcl
  61. *
  62. * Returns the next chunk in the list, or NULL if @chunk is already last.
  63. */
  64. static inline struct svc_rdma_chunk *
  65. pcl_next_chunk(const struct svc_rdma_pcl *pcl, struct svc_rdma_chunk *chunk)
  66. {
  67. if (list_is_last(&chunk->ch_list, &pcl->cl_chunks))
  68. return NULL;
  69. return list_next_entry(chunk, ch_list);
  70. }
  71. /**
  72. * pcl_for_each_chunk - Iterate over chunks in a parsed chunk list
  73. * @pos: the loop cursor
  74. * @pcl: a parsed chunk list
  75. */
  76. #define pcl_for_each_chunk(pos, pcl) \
  77. for (pos = list_first_entry(&(pcl)->cl_chunks, struct svc_rdma_chunk, ch_list); \
  78. &pos->ch_list != &(pcl)->cl_chunks; \
  79. pos = list_next_entry(pos, ch_list))
  80. /**
  81. * pcl_for_each_segment - Iterate over segments in a parsed chunk
  82. * @pos: the loop cursor
  83. * @chunk: a parsed chunk
  84. */
  85. #define pcl_for_each_segment(pos, chunk) \
  86. for (pos = &(chunk)->ch_segments[0]; \
  87. pos <= &(chunk)->ch_segments[(chunk)->ch_segcount - 1]; \
  88. pos++)
  89. /**
  90. * pcl_chunk_end_offset - Return offset of byte range following @chunk
  91. * @chunk: chunk in @pcl
  92. *
  93. * Returns starting offset of the region just after @chunk
  94. */
  95. static inline unsigned int
  96. pcl_chunk_end_offset(const struct svc_rdma_chunk *chunk)
  97. {
  98. return xdr_align_size(chunk->ch_position + chunk->ch_payload_length);
  99. }
  100. struct svc_rdma_recv_ctxt;
  101. extern void pcl_free(struct svc_rdma_pcl *pcl);
  102. extern bool pcl_alloc_call(struct svc_rdma_recv_ctxt *rctxt, __be32 *p);
  103. extern bool pcl_alloc_read(struct svc_rdma_recv_ctxt *rctxt, __be32 *p);
  104. extern bool pcl_alloc_write(struct svc_rdma_recv_ctxt *rctxt,
  105. struct svc_rdma_pcl *pcl, __be32 *p);
  106. extern int pcl_process_nonpayloads(const struct svc_rdma_pcl *pcl,
  107. const struct xdr_buf *xdr,
  108. int (*actor)(const struct xdr_buf *,
  109. void *),
  110. void *data);
  111. #endif /* SVC_RDMA_PCL_H */