stream_sched.c 6.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /* SCTP kernel implementation
  3. * (C) Copyright Red Hat Inc. 2017
  4. *
  5. * This file is part of the SCTP kernel implementation
  6. *
  7. * These functions manipulate sctp stream queue/scheduling.
  8. *
  9. * Please send any bug reports or fixes you make to the
  10. * email addresched(es):
  11. * lksctp developers <[email protected]>
  12. *
  13. * Written or modified by:
  14. * Marcelo Ricardo Leitner <[email protected]>
  15. */
  16. #include <linux/list.h>
  17. #include <net/sctp/sctp.h>
  18. #include <net/sctp/sm.h>
  19. #include <net/sctp/stream_sched.h>
  20. /* First Come First Serve (a.k.a. FIFO)
  21. * RFC DRAFT ndata Section 3.1
  22. */
  23. static int sctp_sched_fcfs_set(struct sctp_stream *stream, __u16 sid,
  24. __u16 value, gfp_t gfp)
  25. {
  26. return 0;
  27. }
  28. static int sctp_sched_fcfs_get(struct sctp_stream *stream, __u16 sid,
  29. __u16 *value)
  30. {
  31. *value = 0;
  32. return 0;
  33. }
  34. static int sctp_sched_fcfs_init(struct sctp_stream *stream)
  35. {
  36. return 0;
  37. }
  38. static int sctp_sched_fcfs_init_sid(struct sctp_stream *stream, __u16 sid,
  39. gfp_t gfp)
  40. {
  41. return 0;
  42. }
  43. static void sctp_sched_fcfs_free_sid(struct sctp_stream *stream, __u16 sid)
  44. {
  45. }
  46. static void sctp_sched_fcfs_free(struct sctp_stream *stream)
  47. {
  48. }
  49. static void sctp_sched_fcfs_enqueue(struct sctp_outq *q,
  50. struct sctp_datamsg *msg)
  51. {
  52. }
  53. static struct sctp_chunk *sctp_sched_fcfs_dequeue(struct sctp_outq *q)
  54. {
  55. struct sctp_stream *stream = &q->asoc->stream;
  56. struct sctp_chunk *ch = NULL;
  57. struct list_head *entry;
  58. if (list_empty(&q->out_chunk_list))
  59. goto out;
  60. if (stream->out_curr) {
  61. ch = list_entry(stream->out_curr->ext->outq.next,
  62. struct sctp_chunk, stream_list);
  63. } else {
  64. entry = q->out_chunk_list.next;
  65. ch = list_entry(entry, struct sctp_chunk, list);
  66. }
  67. sctp_sched_dequeue_common(q, ch);
  68. out:
  69. return ch;
  70. }
  71. static void sctp_sched_fcfs_dequeue_done(struct sctp_outq *q,
  72. struct sctp_chunk *chunk)
  73. {
  74. }
  75. static void sctp_sched_fcfs_sched_all(struct sctp_stream *stream)
  76. {
  77. }
  78. static void sctp_sched_fcfs_unsched_all(struct sctp_stream *stream)
  79. {
  80. }
  81. static struct sctp_sched_ops sctp_sched_fcfs = {
  82. .set = sctp_sched_fcfs_set,
  83. .get = sctp_sched_fcfs_get,
  84. .init = sctp_sched_fcfs_init,
  85. .init_sid = sctp_sched_fcfs_init_sid,
  86. .free_sid = sctp_sched_fcfs_free_sid,
  87. .free = sctp_sched_fcfs_free,
  88. .enqueue = sctp_sched_fcfs_enqueue,
  89. .dequeue = sctp_sched_fcfs_dequeue,
  90. .dequeue_done = sctp_sched_fcfs_dequeue_done,
  91. .sched_all = sctp_sched_fcfs_sched_all,
  92. .unsched_all = sctp_sched_fcfs_unsched_all,
  93. };
  94. static void sctp_sched_ops_fcfs_init(void)
  95. {
  96. sctp_sched_ops_register(SCTP_SS_FCFS, &sctp_sched_fcfs);
  97. }
  98. /* API to other parts of the stack */
  99. static struct sctp_sched_ops *sctp_sched_ops[SCTP_SS_MAX + 1];
  100. void sctp_sched_ops_register(enum sctp_sched_type sched,
  101. struct sctp_sched_ops *sched_ops)
  102. {
  103. sctp_sched_ops[sched] = sched_ops;
  104. }
  105. void sctp_sched_ops_init(void)
  106. {
  107. sctp_sched_ops_fcfs_init();
  108. sctp_sched_ops_prio_init();
  109. sctp_sched_ops_rr_init();
  110. }
  111. int sctp_sched_set_sched(struct sctp_association *asoc,
  112. enum sctp_sched_type sched)
  113. {
  114. struct sctp_sched_ops *n = sctp_sched_ops[sched];
  115. struct sctp_sched_ops *old = asoc->outqueue.sched;
  116. struct sctp_datamsg *msg = NULL;
  117. struct sctp_chunk *ch;
  118. int i, ret = 0;
  119. if (old == n)
  120. return ret;
  121. if (sched > SCTP_SS_MAX)
  122. return -EINVAL;
  123. if (old) {
  124. old->free(&asoc->stream);
  125. /* Give the next scheduler a clean slate. */
  126. for (i = 0; i < asoc->stream.outcnt; i++) {
  127. struct sctp_stream_out_ext *ext = SCTP_SO(&asoc->stream, i)->ext;
  128. if (!ext)
  129. continue;
  130. memset_after(ext, 0, outq);
  131. }
  132. }
  133. asoc->outqueue.sched = n;
  134. n->init(&asoc->stream);
  135. for (i = 0; i < asoc->stream.outcnt; i++) {
  136. if (!SCTP_SO(&asoc->stream, i)->ext)
  137. continue;
  138. ret = n->init_sid(&asoc->stream, i, GFP_ATOMIC);
  139. if (ret)
  140. goto err;
  141. }
  142. /* We have to requeue all chunks already queued. */
  143. list_for_each_entry(ch, &asoc->outqueue.out_chunk_list, list) {
  144. if (ch->msg == msg)
  145. continue;
  146. msg = ch->msg;
  147. n->enqueue(&asoc->outqueue, msg);
  148. }
  149. return ret;
  150. err:
  151. n->free(&asoc->stream);
  152. asoc->outqueue.sched = &sctp_sched_fcfs; /* Always safe */
  153. return ret;
  154. }
  155. int sctp_sched_get_sched(struct sctp_association *asoc)
  156. {
  157. int i;
  158. for (i = 0; i <= SCTP_SS_MAX; i++)
  159. if (asoc->outqueue.sched == sctp_sched_ops[i])
  160. return i;
  161. return 0;
  162. }
  163. int sctp_sched_set_value(struct sctp_association *asoc, __u16 sid,
  164. __u16 value, gfp_t gfp)
  165. {
  166. if (sid >= asoc->stream.outcnt)
  167. return -EINVAL;
  168. if (!SCTP_SO(&asoc->stream, sid)->ext) {
  169. int ret;
  170. ret = sctp_stream_init_ext(&asoc->stream, sid);
  171. if (ret)
  172. return ret;
  173. }
  174. return asoc->outqueue.sched->set(&asoc->stream, sid, value, gfp);
  175. }
  176. int sctp_sched_get_value(struct sctp_association *asoc, __u16 sid,
  177. __u16 *value)
  178. {
  179. if (sid >= asoc->stream.outcnt)
  180. return -EINVAL;
  181. if (!SCTP_SO(&asoc->stream, sid)->ext)
  182. return 0;
  183. return asoc->outqueue.sched->get(&asoc->stream, sid, value);
  184. }
  185. void sctp_sched_dequeue_done(struct sctp_outq *q, struct sctp_chunk *ch)
  186. {
  187. if (!list_is_last(&ch->frag_list, &ch->msg->chunks) &&
  188. !q->asoc->peer.intl_capable) {
  189. struct sctp_stream_out *sout;
  190. __u16 sid;
  191. /* datamsg is not finish, so save it as current one,
  192. * in case application switch scheduler or a higher
  193. * priority stream comes in.
  194. */
  195. sid = sctp_chunk_stream_no(ch);
  196. sout = SCTP_SO(&q->asoc->stream, sid);
  197. q->asoc->stream.out_curr = sout;
  198. return;
  199. }
  200. q->asoc->stream.out_curr = NULL;
  201. q->sched->dequeue_done(q, ch);
  202. }
  203. /* Auxiliary functions for the schedulers */
  204. void sctp_sched_dequeue_common(struct sctp_outq *q, struct sctp_chunk *ch)
  205. {
  206. list_del_init(&ch->list);
  207. list_del_init(&ch->stream_list);
  208. q->out_qlen -= ch->skb->len;
  209. }
  210. int sctp_sched_init_sid(struct sctp_stream *stream, __u16 sid, gfp_t gfp)
  211. {
  212. struct sctp_sched_ops *sched = sctp_sched_ops_from_stream(stream);
  213. struct sctp_stream_out_ext *ext = SCTP_SO(stream, sid)->ext;
  214. INIT_LIST_HEAD(&ext->outq);
  215. return sched->init_sid(stream, sid, gfp);
  216. }
  217. struct sctp_sched_ops *sctp_sched_ops_from_stream(struct sctp_stream *stream)
  218. {
  219. struct sctp_association *asoc;
  220. asoc = container_of(stream, struct sctp_association, stream);
  221. return asoc->outqueue.sched;
  222. }