ccm.c 4.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217
  1. // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
  2. /* Copyright (C) 2016-2019 Netronome Systems, Inc. */
  3. #include <linux/bitops.h>
  4. #include "ccm.h"
  5. #include "nfp_app.h"
  6. #include "nfp_net.h"
  7. #define ccm_warn(app, msg...) nn_dp_warn(&(app)->ctrl->dp, msg)
  8. #define NFP_CCM_TAG_ALLOC_SPAN (U16_MAX / 4)
  9. static bool nfp_ccm_all_tags_busy(struct nfp_ccm *ccm)
  10. {
  11. u16 used_tags;
  12. used_tags = ccm->tag_alloc_next - ccm->tag_alloc_last;
  13. return used_tags > NFP_CCM_TAG_ALLOC_SPAN;
  14. }
  15. static int nfp_ccm_alloc_tag(struct nfp_ccm *ccm)
  16. {
  17. /* CCM is for FW communication which is request-reply. To make sure
  18. * we don't reuse the message ID too early after timeout - limit the
  19. * number of requests in flight.
  20. */
  21. if (unlikely(nfp_ccm_all_tags_busy(ccm))) {
  22. ccm_warn(ccm->app, "all FW request contexts busy!\n");
  23. return -EAGAIN;
  24. }
  25. WARN_ON(__test_and_set_bit(ccm->tag_alloc_next, ccm->tag_allocator));
  26. return ccm->tag_alloc_next++;
  27. }
  28. static void nfp_ccm_free_tag(struct nfp_ccm *ccm, u16 tag)
  29. {
  30. WARN_ON(!__test_and_clear_bit(tag, ccm->tag_allocator));
  31. while (!test_bit(ccm->tag_alloc_last, ccm->tag_allocator) &&
  32. ccm->tag_alloc_last != ccm->tag_alloc_next)
  33. ccm->tag_alloc_last++;
  34. }
  35. static struct sk_buff *__nfp_ccm_reply(struct nfp_ccm *ccm, u16 tag)
  36. {
  37. unsigned int msg_tag;
  38. struct sk_buff *skb;
  39. skb_queue_walk(&ccm->replies, skb) {
  40. msg_tag = nfp_ccm_get_tag(skb);
  41. if (msg_tag == tag) {
  42. nfp_ccm_free_tag(ccm, tag);
  43. __skb_unlink(skb, &ccm->replies);
  44. return skb;
  45. }
  46. }
  47. return NULL;
  48. }
  49. static struct sk_buff *
  50. nfp_ccm_reply(struct nfp_ccm *ccm, struct nfp_app *app, u16 tag)
  51. {
  52. struct sk_buff *skb;
  53. nfp_ctrl_lock(app->ctrl);
  54. skb = __nfp_ccm_reply(ccm, tag);
  55. nfp_ctrl_unlock(app->ctrl);
  56. return skb;
  57. }
  58. static struct sk_buff *
  59. nfp_ccm_reply_drop_tag(struct nfp_ccm *ccm, struct nfp_app *app, u16 tag)
  60. {
  61. struct sk_buff *skb;
  62. nfp_ctrl_lock(app->ctrl);
  63. skb = __nfp_ccm_reply(ccm, tag);
  64. if (!skb)
  65. nfp_ccm_free_tag(ccm, tag);
  66. nfp_ctrl_unlock(app->ctrl);
  67. return skb;
  68. }
  69. static struct sk_buff *
  70. nfp_ccm_wait_reply(struct nfp_ccm *ccm, struct nfp_app *app,
  71. enum nfp_ccm_type type, int tag)
  72. {
  73. struct sk_buff *skb;
  74. int i, err;
  75. for (i = 0; i < 50; i++) {
  76. udelay(4);
  77. skb = nfp_ccm_reply(ccm, app, tag);
  78. if (skb)
  79. return skb;
  80. }
  81. err = wait_event_interruptible_timeout(ccm->wq,
  82. skb = nfp_ccm_reply(ccm, app,
  83. tag),
  84. msecs_to_jiffies(5000));
  85. /* We didn't get a response - try last time and atomically drop
  86. * the tag even if no response is matched.
  87. */
  88. if (!skb)
  89. skb = nfp_ccm_reply_drop_tag(ccm, app, tag);
  90. if (err < 0) {
  91. ccm_warn(app, "%s waiting for response to 0x%02x: %d\n",
  92. err == ERESTARTSYS ? "interrupted" : "error",
  93. type, err);
  94. return ERR_PTR(err);
  95. }
  96. if (!skb) {
  97. ccm_warn(app, "timeout waiting for response to 0x%02x\n", type);
  98. return ERR_PTR(-ETIMEDOUT);
  99. }
  100. return skb;
  101. }
  102. struct sk_buff *
  103. nfp_ccm_communicate(struct nfp_ccm *ccm, struct sk_buff *skb,
  104. enum nfp_ccm_type type, unsigned int reply_size)
  105. {
  106. struct nfp_app *app = ccm->app;
  107. struct nfp_ccm_hdr *hdr;
  108. int reply_type, tag;
  109. nfp_ctrl_lock(app->ctrl);
  110. tag = nfp_ccm_alloc_tag(ccm);
  111. if (tag < 0) {
  112. nfp_ctrl_unlock(app->ctrl);
  113. dev_kfree_skb_any(skb);
  114. return ERR_PTR(tag);
  115. }
  116. hdr = (void *)skb->data;
  117. hdr->ver = NFP_CCM_ABI_VERSION;
  118. hdr->type = type;
  119. hdr->tag = cpu_to_be16(tag);
  120. __nfp_app_ctrl_tx(app, skb);
  121. nfp_ctrl_unlock(app->ctrl);
  122. skb = nfp_ccm_wait_reply(ccm, app, type, tag);
  123. if (IS_ERR(skb))
  124. return skb;
  125. reply_type = nfp_ccm_get_type(skb);
  126. if (reply_type != __NFP_CCM_REPLY(type)) {
  127. ccm_warn(app, "cmsg drop - wrong type 0x%02x != 0x%02lx!\n",
  128. reply_type, __NFP_CCM_REPLY(type));
  129. goto err_free;
  130. }
  131. /* 0 reply_size means caller will do the validation */
  132. if (reply_size && skb->len != reply_size) {
  133. ccm_warn(app, "cmsg drop - type 0x%02x wrong size %d != %d!\n",
  134. type, skb->len, reply_size);
  135. goto err_free;
  136. }
  137. return skb;
  138. err_free:
  139. dev_kfree_skb_any(skb);
  140. return ERR_PTR(-EIO);
  141. }
  142. void nfp_ccm_rx(struct nfp_ccm *ccm, struct sk_buff *skb)
  143. {
  144. struct nfp_app *app = ccm->app;
  145. unsigned int tag;
  146. if (unlikely(skb->len < sizeof(struct nfp_ccm_hdr))) {
  147. ccm_warn(app, "cmsg drop - too short %d!\n", skb->len);
  148. goto err_free;
  149. }
  150. nfp_ctrl_lock(app->ctrl);
  151. tag = nfp_ccm_get_tag(skb);
  152. if (unlikely(!test_bit(tag, ccm->tag_allocator))) {
  153. ccm_warn(app, "cmsg drop - no one is waiting for tag %u!\n",
  154. tag);
  155. goto err_unlock;
  156. }
  157. __skb_queue_tail(&ccm->replies, skb);
  158. wake_up_interruptible_all(&ccm->wq);
  159. nfp_ctrl_unlock(app->ctrl);
  160. return;
  161. err_unlock:
  162. nfp_ctrl_unlock(app->ctrl);
  163. err_free:
  164. dev_kfree_skb_any(skb);
  165. }
  166. int nfp_ccm_init(struct nfp_ccm *ccm, struct nfp_app *app)
  167. {
  168. ccm->app = app;
  169. skb_queue_head_init(&ccm->replies);
  170. init_waitqueue_head(&ccm->wq);
  171. return 0;
  172. }
  173. void nfp_ccm_clean(struct nfp_ccm *ccm)
  174. {
  175. WARN_ON(!skb_queue_empty(&ccm->replies));
  176. }