agg-rx.c 6.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301
  1. // SPDX-License-Identifier: ISC
  2. /*
  3. * Copyright (C) 2018 Felix Fietkau <[email protected]>
  4. */
  5. #include "mt76.h"
  6. static unsigned long mt76_aggr_tid_to_timeo(u8 tidno)
  7. {
  8. /* Currently voice traffic (AC_VO) always runs without aggregation,
  9. * no special handling is needed. AC_BE/AC_BK use tids 0-3. Just check
  10. * for non AC_BK/AC_BE and set smaller timeout for it. */
  11. return HZ / (tidno >= 4 ? 25 : 10);
  12. }
  13. static void
  14. mt76_aggr_release(struct mt76_rx_tid *tid, struct sk_buff_head *frames, int idx)
  15. {
  16. struct sk_buff *skb;
  17. tid->head = ieee80211_sn_inc(tid->head);
  18. skb = tid->reorder_buf[idx];
  19. if (!skb)
  20. return;
  21. tid->reorder_buf[idx] = NULL;
  22. tid->nframes--;
  23. __skb_queue_tail(frames, skb);
  24. }
  25. static void
  26. mt76_rx_aggr_release_frames(struct mt76_rx_tid *tid,
  27. struct sk_buff_head *frames,
  28. u16 head)
  29. {
  30. int idx;
  31. while (ieee80211_sn_less(tid->head, head)) {
  32. idx = tid->head % tid->size;
  33. mt76_aggr_release(tid, frames, idx);
  34. }
  35. }
  36. static void
  37. mt76_rx_aggr_release_head(struct mt76_rx_tid *tid, struct sk_buff_head *frames)
  38. {
  39. int idx = tid->head % tid->size;
  40. while (tid->reorder_buf[idx]) {
  41. mt76_aggr_release(tid, frames, idx);
  42. idx = tid->head % tid->size;
  43. }
  44. }
  45. static void
  46. mt76_rx_aggr_check_release(struct mt76_rx_tid *tid, struct sk_buff_head *frames)
  47. {
  48. struct mt76_rx_status *status;
  49. struct sk_buff *skb;
  50. int start, idx, nframes;
  51. if (!tid->nframes)
  52. return;
  53. mt76_rx_aggr_release_head(tid, frames);
  54. start = tid->head % tid->size;
  55. nframes = tid->nframes;
  56. for (idx = (tid->head + 1) % tid->size;
  57. idx != start && nframes;
  58. idx = (idx + 1) % tid->size) {
  59. skb = tid->reorder_buf[idx];
  60. if (!skb)
  61. continue;
  62. nframes--;
  63. status = (struct mt76_rx_status *)skb->cb;
  64. if (!time_after32(jiffies,
  65. status->reorder_time +
  66. mt76_aggr_tid_to_timeo(tid->num)))
  67. continue;
  68. mt76_rx_aggr_release_frames(tid, frames, status->seqno);
  69. }
  70. mt76_rx_aggr_release_head(tid, frames);
  71. }
  72. static void
  73. mt76_rx_aggr_reorder_work(struct work_struct *work)
  74. {
  75. struct mt76_rx_tid *tid = container_of(work, struct mt76_rx_tid,
  76. reorder_work.work);
  77. struct mt76_dev *dev = tid->dev;
  78. struct sk_buff_head frames;
  79. int nframes;
  80. __skb_queue_head_init(&frames);
  81. local_bh_disable();
  82. rcu_read_lock();
  83. spin_lock(&tid->lock);
  84. mt76_rx_aggr_check_release(tid, &frames);
  85. nframes = tid->nframes;
  86. spin_unlock(&tid->lock);
  87. if (nframes)
  88. ieee80211_queue_delayed_work(tid->dev->hw, &tid->reorder_work,
  89. mt76_aggr_tid_to_timeo(tid->num));
  90. mt76_rx_complete(dev, &frames, NULL);
  91. rcu_read_unlock();
  92. local_bh_enable();
  93. }
  94. static void
  95. mt76_rx_aggr_check_ctl(struct sk_buff *skb, struct sk_buff_head *frames)
  96. {
  97. struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
  98. struct ieee80211_bar *bar = mt76_skb_get_hdr(skb);
  99. struct mt76_wcid *wcid = status->wcid;
  100. struct mt76_rx_tid *tid;
  101. u8 tidno = status->qos_ctl & IEEE80211_QOS_CTL_TID_MASK;
  102. u16 seqno;
  103. if (!ieee80211_is_ctl(bar->frame_control))
  104. return;
  105. if (!ieee80211_is_back_req(bar->frame_control))
  106. return;
  107. status->qos_ctl = tidno = le16_to_cpu(bar->control) >> 12;
  108. seqno = IEEE80211_SEQ_TO_SN(le16_to_cpu(bar->start_seq_num));
  109. tid = rcu_dereference(wcid->aggr[tidno]);
  110. if (!tid)
  111. return;
  112. spin_lock_bh(&tid->lock);
  113. if (!tid->stopped) {
  114. mt76_rx_aggr_release_frames(tid, frames, seqno);
  115. mt76_rx_aggr_release_head(tid, frames);
  116. }
  117. spin_unlock_bh(&tid->lock);
  118. }
  119. void mt76_rx_aggr_reorder(struct sk_buff *skb, struct sk_buff_head *frames)
  120. {
  121. struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
  122. struct mt76_wcid *wcid = status->wcid;
  123. struct ieee80211_sta *sta;
  124. struct mt76_rx_tid *tid;
  125. bool sn_less;
  126. u16 seqno, head, size, idx;
  127. u8 tidno = status->qos_ctl & IEEE80211_QOS_CTL_TID_MASK;
  128. u8 ackp;
  129. __skb_queue_tail(frames, skb);
  130. sta = wcid_to_sta(wcid);
  131. if (!sta)
  132. return;
  133. if (!status->aggr) {
  134. if (!(status->flag & RX_FLAG_8023))
  135. mt76_rx_aggr_check_ctl(skb, frames);
  136. return;
  137. }
  138. /* not part of a BA session */
  139. ackp = status->qos_ctl & IEEE80211_QOS_CTL_ACK_POLICY_MASK;
  140. if (ackp == IEEE80211_QOS_CTL_ACK_POLICY_NOACK)
  141. return;
  142. tid = rcu_dereference(wcid->aggr[tidno]);
  143. if (!tid)
  144. return;
  145. status->flag |= RX_FLAG_DUP_VALIDATED;
  146. spin_lock_bh(&tid->lock);
  147. if (tid->stopped)
  148. goto out;
  149. head = tid->head;
  150. seqno = status->seqno;
  151. size = tid->size;
  152. sn_less = ieee80211_sn_less(seqno, head);
  153. if (!tid->started) {
  154. if (sn_less)
  155. goto out;
  156. tid->started = true;
  157. }
  158. if (sn_less) {
  159. __skb_unlink(skb, frames);
  160. dev_kfree_skb(skb);
  161. goto out;
  162. }
  163. if (seqno == head) {
  164. tid->head = ieee80211_sn_inc(head);
  165. if (tid->nframes)
  166. mt76_rx_aggr_release_head(tid, frames);
  167. goto out;
  168. }
  169. __skb_unlink(skb, frames);
  170. /*
  171. * Frame sequence number exceeds buffering window, free up some space
  172. * by releasing previous frames
  173. */
  174. if (!ieee80211_sn_less(seqno, head + size)) {
  175. head = ieee80211_sn_inc(ieee80211_sn_sub(seqno, size));
  176. mt76_rx_aggr_release_frames(tid, frames, head);
  177. }
  178. idx = seqno % size;
  179. /* Discard if the current slot is already in use */
  180. if (tid->reorder_buf[idx]) {
  181. dev_kfree_skb(skb);
  182. goto out;
  183. }
  184. status->reorder_time = jiffies;
  185. tid->reorder_buf[idx] = skb;
  186. tid->nframes++;
  187. mt76_rx_aggr_release_head(tid, frames);
  188. ieee80211_queue_delayed_work(tid->dev->hw, &tid->reorder_work,
  189. mt76_aggr_tid_to_timeo(tid->num));
  190. out:
  191. spin_unlock_bh(&tid->lock);
  192. }
  193. int mt76_rx_aggr_start(struct mt76_dev *dev, struct mt76_wcid *wcid, u8 tidno,
  194. u16 ssn, u16 size)
  195. {
  196. struct mt76_rx_tid *tid;
  197. mt76_rx_aggr_stop(dev, wcid, tidno);
  198. tid = kzalloc(struct_size(tid, reorder_buf, size), GFP_KERNEL);
  199. if (!tid)
  200. return -ENOMEM;
  201. tid->dev = dev;
  202. tid->head = ssn;
  203. tid->size = size;
  204. tid->num = tidno;
  205. INIT_DELAYED_WORK(&tid->reorder_work, mt76_rx_aggr_reorder_work);
  206. spin_lock_init(&tid->lock);
  207. rcu_assign_pointer(wcid->aggr[tidno], tid);
  208. return 0;
  209. }
  210. EXPORT_SYMBOL_GPL(mt76_rx_aggr_start);
  211. static void mt76_rx_aggr_shutdown(struct mt76_dev *dev, struct mt76_rx_tid *tid)
  212. {
  213. u16 size = tid->size;
  214. int i;
  215. spin_lock_bh(&tid->lock);
  216. tid->stopped = true;
  217. for (i = 0; tid->nframes && i < size; i++) {
  218. struct sk_buff *skb = tid->reorder_buf[i];
  219. if (!skb)
  220. continue;
  221. tid->reorder_buf[i] = NULL;
  222. tid->nframes--;
  223. dev_kfree_skb(skb);
  224. }
  225. spin_unlock_bh(&tid->lock);
  226. cancel_delayed_work_sync(&tid->reorder_work);
  227. }
  228. void mt76_rx_aggr_stop(struct mt76_dev *dev, struct mt76_wcid *wcid, u8 tidno)
  229. {
  230. struct mt76_rx_tid *tid = NULL;
  231. tid = rcu_replace_pointer(wcid->aggr[tidno], tid,
  232. lockdep_is_held(&dev->mutex));
  233. if (tid) {
  234. mt76_rx_aggr_shutdown(dev, tid);
  235. kfree_rcu(tid, rcu_head);
  236. }
  237. }
  238. EXPORT_SYMBOL_GPL(mt76_rx_aggr_stop);