ring.c 5.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright (C) 2022 Linaro Ltd.
  4. * Author: Manivannan Sadhasivam <[email protected]>
  5. */
  6. #include <linux/mhi_ep.h>
  7. #include "internal.h"
  8. size_t mhi_ep_ring_addr2offset(struct mhi_ep_ring *ring, u64 ptr)
  9. {
  10. return (ptr - ring->rbase) / sizeof(struct mhi_ring_element);
  11. }
  12. static u32 mhi_ep_ring_num_elems(struct mhi_ep_ring *ring)
  13. {
  14. __le64 rlen;
  15. memcpy_fromio(&rlen, (void __iomem *) &ring->ring_ctx->generic.rlen, sizeof(u64));
  16. return le64_to_cpu(rlen) / sizeof(struct mhi_ring_element);
  17. }
  18. void mhi_ep_ring_inc_index(struct mhi_ep_ring *ring)
  19. {
  20. ring->rd_offset = (ring->rd_offset + 1) % ring->ring_size;
  21. }
  22. static int __mhi_ep_cache_ring(struct mhi_ep_ring *ring, size_t end)
  23. {
  24. struct mhi_ep_cntrl *mhi_cntrl = ring->mhi_cntrl;
  25. struct device *dev = &mhi_cntrl->mhi_dev->dev;
  26. size_t start, copy_size;
  27. int ret;
  28. /* Don't proceed in the case of event ring. This happens during mhi_ep_ring_start(). */
  29. if (ring->type == RING_TYPE_ER)
  30. return 0;
  31. /* No need to cache the ring if write pointer is unmodified */
  32. if (ring->wr_offset == end)
  33. return 0;
  34. start = ring->wr_offset;
  35. if (start < end) {
  36. copy_size = (end - start) * sizeof(struct mhi_ring_element);
  37. ret = mhi_cntrl->read_from_host(mhi_cntrl, ring->rbase +
  38. (start * sizeof(struct mhi_ring_element)),
  39. &ring->ring_cache[start], copy_size);
  40. if (ret < 0)
  41. return ret;
  42. } else {
  43. copy_size = (ring->ring_size - start) * sizeof(struct mhi_ring_element);
  44. ret = mhi_cntrl->read_from_host(mhi_cntrl, ring->rbase +
  45. (start * sizeof(struct mhi_ring_element)),
  46. &ring->ring_cache[start], copy_size);
  47. if (ret < 0)
  48. return ret;
  49. if (end) {
  50. ret = mhi_cntrl->read_from_host(mhi_cntrl, ring->rbase,
  51. &ring->ring_cache[0],
  52. end * sizeof(struct mhi_ring_element));
  53. if (ret < 0)
  54. return ret;
  55. }
  56. }
  57. dev_dbg(dev, "Cached ring: start %zu end %zu size %zu\n", start, end, copy_size);
  58. return 0;
  59. }
  60. static int mhi_ep_cache_ring(struct mhi_ep_ring *ring, u64 wr_ptr)
  61. {
  62. size_t wr_offset;
  63. int ret;
  64. wr_offset = mhi_ep_ring_addr2offset(ring, wr_ptr);
  65. /* Cache the host ring till write offset */
  66. ret = __mhi_ep_cache_ring(ring, wr_offset);
  67. if (ret)
  68. return ret;
  69. ring->wr_offset = wr_offset;
  70. return 0;
  71. }
  72. int mhi_ep_update_wr_offset(struct mhi_ep_ring *ring)
  73. {
  74. u64 wr_ptr;
  75. wr_ptr = mhi_ep_mmio_get_db(ring);
  76. return mhi_ep_cache_ring(ring, wr_ptr);
  77. }
  78. /* TODO: Support for adding multiple ring elements to the ring */
  79. int mhi_ep_ring_add_element(struct mhi_ep_ring *ring, struct mhi_ring_element *el)
  80. {
  81. struct mhi_ep_cntrl *mhi_cntrl = ring->mhi_cntrl;
  82. struct device *dev = &mhi_cntrl->mhi_dev->dev;
  83. size_t old_offset = 0;
  84. u32 num_free_elem;
  85. __le64 rp;
  86. int ret;
  87. ret = mhi_ep_update_wr_offset(ring);
  88. if (ret) {
  89. dev_err(dev, "Error updating write pointer\n");
  90. return ret;
  91. }
  92. if (ring->rd_offset < ring->wr_offset)
  93. num_free_elem = (ring->wr_offset - ring->rd_offset) - 1;
  94. else
  95. num_free_elem = ((ring->ring_size - ring->rd_offset) + ring->wr_offset) - 1;
  96. /* Check if there is space in ring for adding at least an element */
  97. if (!num_free_elem) {
  98. dev_err(dev, "No space left in the ring\n");
  99. return -ENOSPC;
  100. }
  101. old_offset = ring->rd_offset;
  102. mhi_ep_ring_inc_index(ring);
  103. dev_dbg(dev, "Adding an element to ring at offset (%zu)\n", ring->rd_offset);
  104. /* Update rp in ring context */
  105. rp = cpu_to_le64(ring->rd_offset * sizeof(*el) + ring->rbase);
  106. memcpy_toio((void __iomem *) &ring->ring_ctx->generic.rp, &rp, sizeof(u64));
  107. ret = mhi_cntrl->write_to_host(mhi_cntrl, el, ring->rbase + (old_offset * sizeof(*el)),
  108. sizeof(*el));
  109. if (ret < 0)
  110. return ret;
  111. return 0;
  112. }
  113. void mhi_ep_ring_init(struct mhi_ep_ring *ring, enum mhi_ep_ring_type type, u32 id)
  114. {
  115. ring->type = type;
  116. if (ring->type == RING_TYPE_CMD) {
  117. ring->db_offset_h = EP_CRDB_HIGHER;
  118. ring->db_offset_l = EP_CRDB_LOWER;
  119. } else if (ring->type == RING_TYPE_CH) {
  120. ring->db_offset_h = CHDB_HIGHER_n(id);
  121. ring->db_offset_l = CHDB_LOWER_n(id);
  122. ring->ch_id = id;
  123. } else {
  124. ring->db_offset_h = ERDB_HIGHER_n(id);
  125. ring->db_offset_l = ERDB_LOWER_n(id);
  126. }
  127. }
  128. int mhi_ep_ring_start(struct mhi_ep_cntrl *mhi_cntrl, struct mhi_ep_ring *ring,
  129. union mhi_ep_ring_ctx *ctx)
  130. {
  131. struct device *dev = &mhi_cntrl->mhi_dev->dev;
  132. __le64 val;
  133. int ret;
  134. ring->mhi_cntrl = mhi_cntrl;
  135. ring->ring_ctx = ctx;
  136. ring->ring_size = mhi_ep_ring_num_elems(ring);
  137. memcpy_fromio(&val, (void __iomem *) &ring->ring_ctx->generic.rbase, sizeof(u64));
  138. ring->rbase = le64_to_cpu(val);
  139. if (ring->type == RING_TYPE_CH)
  140. ring->er_index = le32_to_cpu(ring->ring_ctx->ch.erindex);
  141. if (ring->type == RING_TYPE_ER)
  142. ring->irq_vector = le32_to_cpu(ring->ring_ctx->ev.msivec);
  143. /* During ring init, both rp and wp are equal */
  144. memcpy_fromio(&val, (void __iomem *) &ring->ring_ctx->generic.rp, sizeof(u64));
  145. ring->rd_offset = mhi_ep_ring_addr2offset(ring, le64_to_cpu(val));
  146. ring->wr_offset = mhi_ep_ring_addr2offset(ring, le64_to_cpu(val));
  147. /* Allocate ring cache memory for holding the copy of host ring */
  148. ring->ring_cache = kcalloc(ring->ring_size, sizeof(struct mhi_ring_element), GFP_KERNEL);
  149. if (!ring->ring_cache)
  150. return -ENOMEM;
  151. memcpy_fromio(&val, (void __iomem *) &ring->ring_ctx->generic.wp, sizeof(u64));
  152. ret = mhi_ep_cache_ring(ring, le64_to_cpu(val));
  153. if (ret) {
  154. dev_err(dev, "Failed to cache ring\n");
  155. kfree(ring->ring_cache);
  156. return ret;
  157. }
  158. ring->started = true;
  159. return 0;
  160. }
  161. void mhi_ep_ring_reset(struct mhi_ep_cntrl *mhi_cntrl, struct mhi_ep_ring *ring)
  162. {
  163. ring->started = false;
  164. kfree(ring->ring_cache);
  165. ring->ring_cache = NULL;
  166. }