fq_impl.h 7.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389
  1. /* SPDX-License-Identifier: GPL-2.0-only */
  2. /*
  3. * Copyright (c) 2016 Qualcomm Atheros, Inc
  4. *
  5. * Based on net/sched/sch_fq_codel.c
  6. */
  7. #ifndef __NET_SCHED_FQ_IMPL_H
  8. #define __NET_SCHED_FQ_IMPL_H
  9. #include <net/fq.h>
  10. /* functions that are embedded into includer */
  11. static void
  12. __fq_adjust_removal(struct fq *fq, struct fq_flow *flow, unsigned int packets,
  13. unsigned int bytes, unsigned int truesize)
  14. {
  15. struct fq_tin *tin = flow->tin;
  16. int idx;
  17. tin->backlog_bytes -= bytes;
  18. tin->backlog_packets -= packets;
  19. flow->backlog -= bytes;
  20. fq->backlog -= packets;
  21. fq->memory_usage -= truesize;
  22. if (flow->backlog)
  23. return;
  24. if (flow == &tin->default_flow) {
  25. list_del_init(&tin->tin_list);
  26. return;
  27. }
  28. idx = flow - fq->flows;
  29. __clear_bit(idx, fq->flows_bitmap);
  30. }
  31. static void fq_adjust_removal(struct fq *fq,
  32. struct fq_flow *flow,
  33. struct sk_buff *skb)
  34. {
  35. __fq_adjust_removal(fq, flow, 1, skb->len, skb->truesize);
  36. }
  37. static struct sk_buff *fq_flow_dequeue(struct fq *fq,
  38. struct fq_flow *flow)
  39. {
  40. struct sk_buff *skb;
  41. lockdep_assert_held(&fq->lock);
  42. skb = __skb_dequeue(&flow->queue);
  43. if (!skb)
  44. return NULL;
  45. fq_adjust_removal(fq, flow, skb);
  46. return skb;
  47. }
  48. static int fq_flow_drop(struct fq *fq, struct fq_flow *flow,
  49. fq_skb_free_t free_func)
  50. {
  51. unsigned int packets = 0, bytes = 0, truesize = 0;
  52. struct fq_tin *tin = flow->tin;
  53. struct sk_buff *skb;
  54. int pending;
  55. lockdep_assert_held(&fq->lock);
  56. pending = min_t(int, 32, skb_queue_len(&flow->queue) / 2);
  57. do {
  58. skb = __skb_dequeue(&flow->queue);
  59. if (!skb)
  60. break;
  61. packets++;
  62. bytes += skb->len;
  63. truesize += skb->truesize;
  64. free_func(fq, tin, flow, skb);
  65. } while (packets < pending);
  66. __fq_adjust_removal(fq, flow, packets, bytes, truesize);
  67. return packets;
  68. }
  69. static struct sk_buff *fq_tin_dequeue(struct fq *fq,
  70. struct fq_tin *tin,
  71. fq_tin_dequeue_t dequeue_func)
  72. {
  73. struct fq_flow *flow;
  74. struct list_head *head;
  75. struct sk_buff *skb;
  76. lockdep_assert_held(&fq->lock);
  77. begin:
  78. head = &tin->new_flows;
  79. if (list_empty(head)) {
  80. head = &tin->old_flows;
  81. if (list_empty(head))
  82. return NULL;
  83. }
  84. flow = list_first_entry(head, struct fq_flow, flowchain);
  85. if (flow->deficit <= 0) {
  86. flow->deficit += fq->quantum;
  87. list_move_tail(&flow->flowchain,
  88. &tin->old_flows);
  89. goto begin;
  90. }
  91. skb = dequeue_func(fq, tin, flow);
  92. if (!skb) {
  93. /* force a pass through old_flows to prevent starvation */
  94. if ((head == &tin->new_flows) &&
  95. !list_empty(&tin->old_flows)) {
  96. list_move_tail(&flow->flowchain, &tin->old_flows);
  97. } else {
  98. list_del_init(&flow->flowchain);
  99. flow->tin = NULL;
  100. }
  101. goto begin;
  102. }
  103. flow->deficit -= skb->len;
  104. tin->tx_bytes += skb->len;
  105. tin->tx_packets++;
  106. return skb;
  107. }
  108. static u32 fq_flow_idx(struct fq *fq, struct sk_buff *skb)
  109. {
  110. u32 hash = skb_get_hash(skb);
  111. return reciprocal_scale(hash, fq->flows_cnt);
  112. }
  113. static struct fq_flow *fq_flow_classify(struct fq *fq,
  114. struct fq_tin *tin, u32 idx,
  115. struct sk_buff *skb)
  116. {
  117. struct fq_flow *flow;
  118. lockdep_assert_held(&fq->lock);
  119. flow = &fq->flows[idx];
  120. if (flow->tin && flow->tin != tin) {
  121. flow = &tin->default_flow;
  122. tin->collisions++;
  123. fq->collisions++;
  124. }
  125. if (!flow->tin)
  126. tin->flows++;
  127. return flow;
  128. }
  129. static struct fq_flow *fq_find_fattest_flow(struct fq *fq)
  130. {
  131. struct fq_tin *tin;
  132. struct fq_flow *flow = NULL;
  133. u32 len = 0;
  134. int i;
  135. for_each_set_bit(i, fq->flows_bitmap, fq->flows_cnt) {
  136. struct fq_flow *cur = &fq->flows[i];
  137. unsigned int cur_len;
  138. cur_len = cur->backlog;
  139. if (cur_len <= len)
  140. continue;
  141. flow = cur;
  142. len = cur_len;
  143. }
  144. list_for_each_entry(tin, &fq->tin_backlog, tin_list) {
  145. unsigned int cur_len = tin->default_flow.backlog;
  146. if (cur_len <= len)
  147. continue;
  148. flow = &tin->default_flow;
  149. len = cur_len;
  150. }
  151. return flow;
  152. }
  153. static void fq_tin_enqueue(struct fq *fq,
  154. struct fq_tin *tin, u32 idx,
  155. struct sk_buff *skb,
  156. fq_skb_free_t free_func)
  157. {
  158. struct fq_flow *flow;
  159. bool oom;
  160. lockdep_assert_held(&fq->lock);
  161. flow = fq_flow_classify(fq, tin, idx, skb);
  162. if (!flow->backlog) {
  163. if (flow != &tin->default_flow)
  164. __set_bit(idx, fq->flows_bitmap);
  165. else if (list_empty(&tin->tin_list))
  166. list_add(&tin->tin_list, &fq->tin_backlog);
  167. }
  168. flow->tin = tin;
  169. flow->backlog += skb->len;
  170. tin->backlog_bytes += skb->len;
  171. tin->backlog_packets++;
  172. fq->memory_usage += skb->truesize;
  173. fq->backlog++;
  174. if (list_empty(&flow->flowchain)) {
  175. flow->deficit = fq->quantum;
  176. list_add_tail(&flow->flowchain,
  177. &tin->new_flows);
  178. }
  179. __skb_queue_tail(&flow->queue, skb);
  180. oom = (fq->memory_usage > fq->memory_limit);
  181. while (fq->backlog > fq->limit || oom) {
  182. flow = fq_find_fattest_flow(fq);
  183. if (!flow)
  184. return;
  185. if (!fq_flow_drop(fq, flow, free_func))
  186. return;
  187. flow->tin->overlimit++;
  188. fq->overlimit++;
  189. if (oom) {
  190. fq->overmemory++;
  191. oom = (fq->memory_usage > fq->memory_limit);
  192. }
  193. }
  194. }
  195. static void fq_flow_filter(struct fq *fq,
  196. struct fq_flow *flow,
  197. fq_skb_filter_t filter_func,
  198. void *filter_data,
  199. fq_skb_free_t free_func)
  200. {
  201. struct fq_tin *tin = flow->tin;
  202. struct sk_buff *skb, *tmp;
  203. lockdep_assert_held(&fq->lock);
  204. skb_queue_walk_safe(&flow->queue, skb, tmp) {
  205. if (!filter_func(fq, tin, flow, skb, filter_data))
  206. continue;
  207. __skb_unlink(skb, &flow->queue);
  208. fq_adjust_removal(fq, flow, skb);
  209. free_func(fq, tin, flow, skb);
  210. }
  211. }
  212. static void fq_tin_filter(struct fq *fq,
  213. struct fq_tin *tin,
  214. fq_skb_filter_t filter_func,
  215. void *filter_data,
  216. fq_skb_free_t free_func)
  217. {
  218. struct fq_flow *flow;
  219. lockdep_assert_held(&fq->lock);
  220. list_for_each_entry(flow, &tin->new_flows, flowchain)
  221. fq_flow_filter(fq, flow, filter_func, filter_data, free_func);
  222. list_for_each_entry(flow, &tin->old_flows, flowchain)
  223. fq_flow_filter(fq, flow, filter_func, filter_data, free_func);
  224. }
  225. static void fq_flow_reset(struct fq *fq,
  226. struct fq_flow *flow,
  227. fq_skb_free_t free_func)
  228. {
  229. struct fq_tin *tin = flow->tin;
  230. struct sk_buff *skb;
  231. while ((skb = fq_flow_dequeue(fq, flow)))
  232. free_func(fq, tin, flow, skb);
  233. if (!list_empty(&flow->flowchain)) {
  234. list_del_init(&flow->flowchain);
  235. if (list_empty(&tin->new_flows) &&
  236. list_empty(&tin->old_flows))
  237. list_del_init(&tin->tin_list);
  238. }
  239. flow->tin = NULL;
  240. WARN_ON_ONCE(flow->backlog);
  241. }
  242. static void fq_tin_reset(struct fq *fq,
  243. struct fq_tin *tin,
  244. fq_skb_free_t free_func)
  245. {
  246. struct list_head *head;
  247. struct fq_flow *flow;
  248. for (;;) {
  249. head = &tin->new_flows;
  250. if (list_empty(head)) {
  251. head = &tin->old_flows;
  252. if (list_empty(head))
  253. break;
  254. }
  255. flow = list_first_entry(head, struct fq_flow, flowchain);
  256. fq_flow_reset(fq, flow, free_func);
  257. }
  258. WARN_ON_ONCE(!list_empty(&tin->tin_list));
  259. WARN_ON_ONCE(tin->backlog_bytes);
  260. WARN_ON_ONCE(tin->backlog_packets);
  261. }
  262. static void fq_flow_init(struct fq_flow *flow)
  263. {
  264. INIT_LIST_HEAD(&flow->flowchain);
  265. __skb_queue_head_init(&flow->queue);
  266. }
  267. static void fq_tin_init(struct fq_tin *tin)
  268. {
  269. INIT_LIST_HEAD(&tin->new_flows);
  270. INIT_LIST_HEAD(&tin->old_flows);
  271. INIT_LIST_HEAD(&tin->tin_list);
  272. fq_flow_init(&tin->default_flow);
  273. }
  274. static int fq_init(struct fq *fq, int flows_cnt)
  275. {
  276. int i;
  277. memset(fq, 0, sizeof(fq[0]));
  278. spin_lock_init(&fq->lock);
  279. INIT_LIST_HEAD(&fq->tin_backlog);
  280. fq->flows_cnt = max_t(u32, flows_cnt, 1);
  281. fq->quantum = 300;
  282. fq->limit = 8192;
  283. fq->memory_limit = 16 << 20; /* 16 MBytes */
  284. fq->flows = kvcalloc(fq->flows_cnt, sizeof(fq->flows[0]), GFP_KERNEL);
  285. if (!fq->flows)
  286. return -ENOMEM;
  287. fq->flows_bitmap = bitmap_zalloc(fq->flows_cnt, GFP_KERNEL);
  288. if (!fq->flows_bitmap) {
  289. kvfree(fq->flows);
  290. fq->flows = NULL;
  291. return -ENOMEM;
  292. }
  293. for (i = 0; i < fq->flows_cnt; i++)
  294. fq_flow_init(&fq->flows[i]);
  295. return 0;
  296. }
  297. static void fq_reset(struct fq *fq,
  298. fq_skb_free_t free_func)
  299. {
  300. int i;
  301. for (i = 0; i < fq->flows_cnt; i++)
  302. fq_flow_reset(fq, &fq->flows[i], free_func);
  303. kvfree(fq->flows);
  304. fq->flows = NULL;
  305. bitmap_free(fq->flows_bitmap);
  306. fq->flows_bitmap = NULL;
  307. }
  308. #endif