wmm.c 40 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * NXP Wireless LAN device driver: WMM
  4. *
  5. * Copyright 2011-2020 NXP
  6. */
  7. #include "decl.h"
  8. #include "ioctl.h"
  9. #include "util.h"
  10. #include "fw.h"
  11. #include "main.h"
  12. #include "wmm.h"
  13. #include "11n.h"
  14. /* Maximum value FW can accept for driver delay in packet transmission */
  15. #define DRV_PKT_DELAY_TO_FW_MAX 512
  16. #define WMM_QUEUED_PACKET_LOWER_LIMIT 180
  17. #define WMM_QUEUED_PACKET_UPPER_LIMIT 200
  18. /* Offset for TOS field in the IP header */
  19. #define IPTOS_OFFSET 5
  20. static bool disable_tx_amsdu;
  21. module_param(disable_tx_amsdu, bool, 0644);
  22. /* This table inverses the tos_to_tid operation to get a priority
  23. * which is in sequential order, and can be compared.
  24. * Use this to compare the priority of two different TIDs.
  25. */
  26. const u8 tos_to_tid_inv[] = {
  27. 0x02, /* from tos_to_tid[2] = 0 */
  28. 0x00, /* from tos_to_tid[0] = 1 */
  29. 0x01, /* from tos_to_tid[1] = 2 */
  30. 0x03,
  31. 0x04,
  32. 0x05,
  33. 0x06,
  34. 0x07
  35. };
  36. /* WMM information IE */
  37. static const u8 wmm_info_ie[] = { WLAN_EID_VENDOR_SPECIFIC, 0x07,
  38. 0x00, 0x50, 0xf2, 0x02,
  39. 0x00, 0x01, 0x00
  40. };
  41. static const u8 wmm_aci_to_qidx_map[] = { WMM_AC_BE,
  42. WMM_AC_BK,
  43. WMM_AC_VI,
  44. WMM_AC_VO
  45. };
  46. static u8 tos_to_tid[] = {
  47. /* TID DSCP_P2 DSCP_P1 DSCP_P0 WMM_AC */
  48. 0x01, /* 0 1 0 AC_BK */
  49. 0x02, /* 0 0 0 AC_BK */
  50. 0x00, /* 0 0 1 AC_BE */
  51. 0x03, /* 0 1 1 AC_BE */
  52. 0x04, /* 1 0 0 AC_VI */
  53. 0x05, /* 1 0 1 AC_VI */
  54. 0x06, /* 1 1 0 AC_VO */
  55. 0x07 /* 1 1 1 AC_VO */
  56. };
  57. static u8 ac_to_tid[4][2] = { {1, 2}, {0, 3}, {4, 5}, {6, 7} };
  58. /*
  59. * This function debug prints the priority parameters for a WMM AC.
  60. */
  61. static void
  62. mwifiex_wmm_ac_debug_print(const struct ieee_types_wmm_ac_parameters *ac_param)
  63. {
  64. const char *ac_str[] = { "BK", "BE", "VI", "VO" };
  65. pr_debug("info: WMM AC_%s: ACI=%d, ACM=%d, Aifsn=%d, "
  66. "EcwMin=%d, EcwMax=%d, TxopLimit=%d\n",
  67. ac_str[wmm_aci_to_qidx_map[(ac_param->aci_aifsn_bitmap
  68. & MWIFIEX_ACI) >> 5]],
  69. (ac_param->aci_aifsn_bitmap & MWIFIEX_ACI) >> 5,
  70. (ac_param->aci_aifsn_bitmap & MWIFIEX_ACM) >> 4,
  71. ac_param->aci_aifsn_bitmap & MWIFIEX_AIFSN,
  72. ac_param->ecw_bitmap & MWIFIEX_ECW_MIN,
  73. (ac_param->ecw_bitmap & MWIFIEX_ECW_MAX) >> 4,
  74. le16_to_cpu(ac_param->tx_op_limit));
  75. }
  76. /*
  77. * This function allocates a route address list.
  78. *
  79. * The function also initializes the list with the provided RA.
  80. */
  81. static struct mwifiex_ra_list_tbl *
  82. mwifiex_wmm_allocate_ralist_node(struct mwifiex_adapter *adapter, const u8 *ra)
  83. {
  84. struct mwifiex_ra_list_tbl *ra_list;
  85. ra_list = kzalloc(sizeof(struct mwifiex_ra_list_tbl), GFP_ATOMIC);
  86. if (!ra_list)
  87. return NULL;
  88. INIT_LIST_HEAD(&ra_list->list);
  89. skb_queue_head_init(&ra_list->skb_head);
  90. memcpy(ra_list->ra, ra, ETH_ALEN);
  91. ra_list->total_pkt_count = 0;
  92. mwifiex_dbg(adapter, INFO, "info: allocated ra_list %p\n", ra_list);
  93. return ra_list;
  94. }
  95. /* This function returns random no between 16 and 32 to be used as threshold
  96. * for no of packets after which BA setup is initiated.
  97. */
  98. static u8 mwifiex_get_random_ba_threshold(void)
  99. {
  100. u64 ns;
  101. /* setup ba_packet_threshold here random number between
  102. * [BA_SETUP_PACKET_OFFSET,
  103. * BA_SETUP_PACKET_OFFSET+BA_SETUP_MAX_PACKET_THRESHOLD-1]
  104. */
  105. ns = ktime_get_ns();
  106. ns += (ns >> 32) + (ns >> 16);
  107. return ((u8)ns % BA_SETUP_MAX_PACKET_THRESHOLD) + BA_SETUP_PACKET_OFFSET;
  108. }
  109. /*
  110. * This function allocates and adds a RA list for all TIDs
  111. * with the given RA.
  112. */
  113. void mwifiex_ralist_add(struct mwifiex_private *priv, const u8 *ra)
  114. {
  115. int i;
  116. struct mwifiex_ra_list_tbl *ra_list;
  117. struct mwifiex_adapter *adapter = priv->adapter;
  118. struct mwifiex_sta_node *node;
  119. for (i = 0; i < MAX_NUM_TID; ++i) {
  120. ra_list = mwifiex_wmm_allocate_ralist_node(adapter, ra);
  121. mwifiex_dbg(adapter, INFO,
  122. "info: created ra_list %p\n", ra_list);
  123. if (!ra_list)
  124. break;
  125. ra_list->is_11n_enabled = 0;
  126. ra_list->tdls_link = false;
  127. ra_list->ba_status = BA_SETUP_NONE;
  128. ra_list->amsdu_in_ampdu = false;
  129. if (!mwifiex_queuing_ra_based(priv)) {
  130. if (mwifiex_is_tdls_link_setup
  131. (mwifiex_get_tdls_link_status(priv, ra))) {
  132. ra_list->tdls_link = true;
  133. ra_list->is_11n_enabled =
  134. mwifiex_tdls_peer_11n_enabled(priv, ra);
  135. } else {
  136. ra_list->is_11n_enabled = IS_11N_ENABLED(priv);
  137. }
  138. } else {
  139. spin_lock_bh(&priv->sta_list_spinlock);
  140. node = mwifiex_get_sta_entry(priv, ra);
  141. if (node)
  142. ra_list->tx_paused = node->tx_pause;
  143. ra_list->is_11n_enabled =
  144. mwifiex_is_sta_11n_enabled(priv, node);
  145. if (ra_list->is_11n_enabled)
  146. ra_list->max_amsdu = node->max_amsdu;
  147. spin_unlock_bh(&priv->sta_list_spinlock);
  148. }
  149. mwifiex_dbg(adapter, DATA, "data: ralist %p: is_11n_enabled=%d\n",
  150. ra_list, ra_list->is_11n_enabled);
  151. if (ra_list->is_11n_enabled) {
  152. ra_list->ba_pkt_count = 0;
  153. ra_list->ba_packet_thr =
  154. mwifiex_get_random_ba_threshold();
  155. }
  156. list_add_tail(&ra_list->list,
  157. &priv->wmm.tid_tbl_ptr[i].ra_list);
  158. }
  159. }
  160. /*
  161. * This function sets the WMM queue priorities to their default values.
  162. */
  163. static void mwifiex_wmm_default_queue_priorities(struct mwifiex_private *priv)
  164. {
  165. /* Default queue priorities: VO->VI->BE->BK */
  166. priv->wmm.queue_priority[0] = WMM_AC_VO;
  167. priv->wmm.queue_priority[1] = WMM_AC_VI;
  168. priv->wmm.queue_priority[2] = WMM_AC_BE;
  169. priv->wmm.queue_priority[3] = WMM_AC_BK;
  170. }
  171. /*
  172. * This function map ACs to TIDs.
  173. */
  174. static void
  175. mwifiex_wmm_queue_priorities_tid(struct mwifiex_private *priv)
  176. {
  177. struct mwifiex_wmm_desc *wmm = &priv->wmm;
  178. u8 *queue_priority = wmm->queue_priority;
  179. int i;
  180. for (i = 0; i < 4; ++i) {
  181. tos_to_tid[7 - (i * 2)] = ac_to_tid[queue_priority[i]][1];
  182. tos_to_tid[6 - (i * 2)] = ac_to_tid[queue_priority[i]][0];
  183. }
  184. for (i = 0; i < MAX_NUM_TID; ++i)
  185. priv->tos_to_tid_inv[tos_to_tid[i]] = (u8)i;
  186. atomic_set(&wmm->highest_queued_prio, HIGH_PRIO_TID);
  187. }
  188. /*
  189. * This function initializes WMM priority queues.
  190. */
  191. void
  192. mwifiex_wmm_setup_queue_priorities(struct mwifiex_private *priv,
  193. struct ieee_types_wmm_parameter *wmm_ie)
  194. {
  195. u16 cw_min, avg_back_off, tmp[4];
  196. u32 i, j, num_ac;
  197. u8 ac_idx;
  198. if (!wmm_ie || !priv->wmm_enabled) {
  199. /* WMM is not enabled, just set the defaults and return */
  200. mwifiex_wmm_default_queue_priorities(priv);
  201. return;
  202. }
  203. mwifiex_dbg(priv->adapter, INFO,
  204. "info: WMM Parameter IE: version=%d,\t"
  205. "qos_info Parameter Set Count=%d, Reserved=%#x\n",
  206. wmm_ie->version, wmm_ie->qos_info_bitmap &
  207. IEEE80211_WMM_IE_AP_QOSINFO_PARAM_SET_CNT_MASK,
  208. wmm_ie->reserved);
  209. for (num_ac = 0; num_ac < ARRAY_SIZE(wmm_ie->ac_params); num_ac++) {
  210. u8 ecw = wmm_ie->ac_params[num_ac].ecw_bitmap;
  211. u8 aci_aifsn = wmm_ie->ac_params[num_ac].aci_aifsn_bitmap;
  212. cw_min = (1 << (ecw & MWIFIEX_ECW_MIN)) - 1;
  213. avg_back_off = (cw_min >> 1) + (aci_aifsn & MWIFIEX_AIFSN);
  214. ac_idx = wmm_aci_to_qidx_map[(aci_aifsn & MWIFIEX_ACI) >> 5];
  215. priv->wmm.queue_priority[ac_idx] = ac_idx;
  216. tmp[ac_idx] = avg_back_off;
  217. mwifiex_dbg(priv->adapter, INFO,
  218. "info: WMM: CWmax=%d CWmin=%d Avg Back-off=%d\n",
  219. (1 << ((ecw & MWIFIEX_ECW_MAX) >> 4)) - 1,
  220. cw_min, avg_back_off);
  221. mwifiex_wmm_ac_debug_print(&wmm_ie->ac_params[num_ac]);
  222. }
  223. /* Bubble sort */
  224. for (i = 0; i < num_ac; i++) {
  225. for (j = 1; j < num_ac - i; j++) {
  226. if (tmp[j - 1] > tmp[j]) {
  227. swap(tmp[j - 1], tmp[j]);
  228. swap(priv->wmm.queue_priority[j - 1],
  229. priv->wmm.queue_priority[j]);
  230. } else if (tmp[j - 1] == tmp[j]) {
  231. if (priv->wmm.queue_priority[j - 1]
  232. < priv->wmm.queue_priority[j])
  233. swap(priv->wmm.queue_priority[j - 1],
  234. priv->wmm.queue_priority[j]);
  235. }
  236. }
  237. }
  238. mwifiex_wmm_queue_priorities_tid(priv);
  239. }
  240. /*
  241. * This function evaluates whether or not an AC is to be downgraded.
  242. *
  243. * In case the AC is not enabled, the highest AC is returned that is
  244. * enabled and does not require admission control.
  245. */
  246. static enum mwifiex_wmm_ac_e
  247. mwifiex_wmm_eval_downgrade_ac(struct mwifiex_private *priv,
  248. enum mwifiex_wmm_ac_e eval_ac)
  249. {
  250. int down_ac;
  251. enum mwifiex_wmm_ac_e ret_ac;
  252. struct mwifiex_wmm_ac_status *ac_status;
  253. ac_status = &priv->wmm.ac_status[eval_ac];
  254. if (!ac_status->disabled)
  255. /* Okay to use this AC, its enabled */
  256. return eval_ac;
  257. /* Setup a default return value of the lowest priority */
  258. ret_ac = WMM_AC_BK;
  259. /*
  260. * Find the highest AC that is enabled and does not require
  261. * admission control. The spec disallows downgrading to an AC,
  262. * which is enabled due to a completed admission control.
  263. * Unadmitted traffic is not to be sent on an AC with admitted
  264. * traffic.
  265. */
  266. for (down_ac = WMM_AC_BK; down_ac < eval_ac; down_ac++) {
  267. ac_status = &priv->wmm.ac_status[down_ac];
  268. if (!ac_status->disabled && !ac_status->flow_required)
  269. /* AC is enabled and does not require admission
  270. control */
  271. ret_ac = (enum mwifiex_wmm_ac_e) down_ac;
  272. }
  273. return ret_ac;
  274. }
  275. /*
  276. * This function downgrades WMM priority queue.
  277. */
  278. void
  279. mwifiex_wmm_setup_ac_downgrade(struct mwifiex_private *priv)
  280. {
  281. int ac_val;
  282. mwifiex_dbg(priv->adapter, INFO, "info: WMM: AC Priorities:\t"
  283. "BK(0), BE(1), VI(2), VO(3)\n");
  284. if (!priv->wmm_enabled) {
  285. /* WMM is not enabled, default priorities */
  286. for (ac_val = WMM_AC_BK; ac_val <= WMM_AC_VO; ac_val++)
  287. priv->wmm.ac_down_graded_vals[ac_val] =
  288. (enum mwifiex_wmm_ac_e) ac_val;
  289. } else {
  290. for (ac_val = WMM_AC_BK; ac_val <= WMM_AC_VO; ac_val++) {
  291. priv->wmm.ac_down_graded_vals[ac_val]
  292. = mwifiex_wmm_eval_downgrade_ac(priv,
  293. (enum mwifiex_wmm_ac_e) ac_val);
  294. mwifiex_dbg(priv->adapter, INFO,
  295. "info: WMM: AC PRIO %d maps to %d\n",
  296. ac_val,
  297. priv->wmm.ac_down_graded_vals[ac_val]);
  298. }
  299. }
  300. }
  301. /*
  302. * This function converts the IP TOS field to an WMM AC
  303. * Queue assignment.
  304. */
  305. static enum mwifiex_wmm_ac_e
  306. mwifiex_wmm_convert_tos_to_ac(struct mwifiex_adapter *adapter, u32 tos)
  307. {
  308. /* Map of TOS UP values to WMM AC */
  309. static const enum mwifiex_wmm_ac_e tos_to_ac[] = {
  310. WMM_AC_BE,
  311. WMM_AC_BK,
  312. WMM_AC_BK,
  313. WMM_AC_BE,
  314. WMM_AC_VI,
  315. WMM_AC_VI,
  316. WMM_AC_VO,
  317. WMM_AC_VO
  318. };
  319. if (tos >= ARRAY_SIZE(tos_to_ac))
  320. return WMM_AC_BE;
  321. return tos_to_ac[tos];
  322. }
  323. /*
  324. * This function evaluates a given TID and downgrades it to a lower
  325. * TID if the WMM Parameter IE received from the AP indicates that the
  326. * AP is disabled (due to call admission control (ACM bit). Mapping
  327. * of TID to AC is taken care of internally.
  328. */
  329. u8 mwifiex_wmm_downgrade_tid(struct mwifiex_private *priv, u32 tid)
  330. {
  331. enum mwifiex_wmm_ac_e ac, ac_down;
  332. u8 new_tid;
  333. ac = mwifiex_wmm_convert_tos_to_ac(priv->adapter, tid);
  334. ac_down = priv->wmm.ac_down_graded_vals[ac];
  335. /* Send the index to tid array, picking from the array will be
  336. * taken care by dequeuing function
  337. */
  338. new_tid = ac_to_tid[ac_down][tid % 2];
  339. return new_tid;
  340. }
  341. /*
  342. * This function initializes the WMM state information and the
  343. * WMM data path queues.
  344. */
  345. void
  346. mwifiex_wmm_init(struct mwifiex_adapter *adapter)
  347. {
  348. int i, j;
  349. struct mwifiex_private *priv;
  350. for (j = 0; j < adapter->priv_num; ++j) {
  351. priv = adapter->priv[j];
  352. if (!priv)
  353. continue;
  354. for (i = 0; i < MAX_NUM_TID; ++i) {
  355. if (!disable_tx_amsdu &&
  356. adapter->tx_buf_size > MWIFIEX_TX_DATA_BUF_SIZE_2K)
  357. priv->aggr_prio_tbl[i].amsdu =
  358. priv->tos_to_tid_inv[i];
  359. else
  360. priv->aggr_prio_tbl[i].amsdu =
  361. BA_STREAM_NOT_ALLOWED;
  362. priv->aggr_prio_tbl[i].ampdu_ap =
  363. priv->tos_to_tid_inv[i];
  364. priv->aggr_prio_tbl[i].ampdu_user =
  365. priv->tos_to_tid_inv[i];
  366. }
  367. priv->aggr_prio_tbl[6].amsdu
  368. = priv->aggr_prio_tbl[6].ampdu_ap
  369. = priv->aggr_prio_tbl[6].ampdu_user
  370. = BA_STREAM_NOT_ALLOWED;
  371. priv->aggr_prio_tbl[7].amsdu = priv->aggr_prio_tbl[7].ampdu_ap
  372. = priv->aggr_prio_tbl[7].ampdu_user
  373. = BA_STREAM_NOT_ALLOWED;
  374. mwifiex_set_ba_params(priv);
  375. mwifiex_reset_11n_rx_seq_num(priv);
  376. priv->wmm.drv_pkt_delay_max = MWIFIEX_WMM_DRV_DELAY_MAX;
  377. atomic_set(&priv->wmm.tx_pkts_queued, 0);
  378. atomic_set(&priv->wmm.highest_queued_prio, HIGH_PRIO_TID);
  379. }
  380. }
  381. int mwifiex_bypass_txlist_empty(struct mwifiex_adapter *adapter)
  382. {
  383. struct mwifiex_private *priv;
  384. int i;
  385. for (i = 0; i < adapter->priv_num; i++) {
  386. priv = adapter->priv[i];
  387. if (!priv)
  388. continue;
  389. if (adapter->if_ops.is_port_ready &&
  390. !adapter->if_ops.is_port_ready(priv))
  391. continue;
  392. if (!skb_queue_empty(&priv->bypass_txq))
  393. return false;
  394. }
  395. return true;
  396. }
  397. /*
  398. * This function checks if WMM Tx queue is empty.
  399. */
  400. int
  401. mwifiex_wmm_lists_empty(struct mwifiex_adapter *adapter)
  402. {
  403. int i;
  404. struct mwifiex_private *priv;
  405. for (i = 0; i < adapter->priv_num; ++i) {
  406. priv = adapter->priv[i];
  407. if (!priv)
  408. continue;
  409. if (!priv->port_open &&
  410. (priv->bss_mode != NL80211_IFTYPE_ADHOC))
  411. continue;
  412. if (adapter->if_ops.is_port_ready &&
  413. !adapter->if_ops.is_port_ready(priv))
  414. continue;
  415. if (atomic_read(&priv->wmm.tx_pkts_queued))
  416. return false;
  417. }
  418. return true;
  419. }
  420. /*
  421. * This function deletes all packets in an RA list node.
  422. *
  423. * The packet sent completion callback handler are called with
  424. * status failure, after they are dequeued to ensure proper
  425. * cleanup. The RA list node itself is freed at the end.
  426. */
  427. static void
  428. mwifiex_wmm_del_pkts_in_ralist_node(struct mwifiex_private *priv,
  429. struct mwifiex_ra_list_tbl *ra_list)
  430. {
  431. struct mwifiex_adapter *adapter = priv->adapter;
  432. struct sk_buff *skb, *tmp;
  433. skb_queue_walk_safe(&ra_list->skb_head, skb, tmp) {
  434. skb_unlink(skb, &ra_list->skb_head);
  435. mwifiex_write_data_complete(adapter, skb, 0, -1);
  436. }
  437. }
  438. /*
  439. * This function deletes all packets in an RA list.
  440. *
  441. * Each nodes in the RA list are freed individually first, and then
  442. * the RA list itself is freed.
  443. */
  444. static void
  445. mwifiex_wmm_del_pkts_in_ralist(struct mwifiex_private *priv,
  446. struct list_head *ra_list_head)
  447. {
  448. struct mwifiex_ra_list_tbl *ra_list;
  449. list_for_each_entry(ra_list, ra_list_head, list)
  450. mwifiex_wmm_del_pkts_in_ralist_node(priv, ra_list);
  451. }
  452. /*
  453. * This function deletes all packets in all RA lists.
  454. */
  455. static void mwifiex_wmm_cleanup_queues(struct mwifiex_private *priv)
  456. {
  457. int i;
  458. for (i = 0; i < MAX_NUM_TID; i++)
  459. mwifiex_wmm_del_pkts_in_ralist(priv, &priv->wmm.tid_tbl_ptr[i].
  460. ra_list);
  461. atomic_set(&priv->wmm.tx_pkts_queued, 0);
  462. atomic_set(&priv->wmm.highest_queued_prio, HIGH_PRIO_TID);
  463. }
  464. /*
  465. * This function deletes all route addresses from all RA lists.
  466. */
  467. static void mwifiex_wmm_delete_all_ralist(struct mwifiex_private *priv)
  468. {
  469. struct mwifiex_ra_list_tbl *ra_list, *tmp_node;
  470. int i;
  471. for (i = 0; i < MAX_NUM_TID; ++i) {
  472. mwifiex_dbg(priv->adapter, INFO,
  473. "info: ra_list: freeing buf for tid %d\n", i);
  474. list_for_each_entry_safe(ra_list, tmp_node,
  475. &priv->wmm.tid_tbl_ptr[i].ra_list,
  476. list) {
  477. list_del(&ra_list->list);
  478. kfree(ra_list);
  479. }
  480. INIT_LIST_HEAD(&priv->wmm.tid_tbl_ptr[i].ra_list);
  481. }
  482. }
  483. static int mwifiex_free_ack_frame(int id, void *p, void *data)
  484. {
  485. pr_warn("Have pending ack frames!\n");
  486. kfree_skb(p);
  487. return 0;
  488. }
  489. /*
  490. * This function cleans up the Tx and Rx queues.
  491. *
  492. * Cleanup includes -
  493. * - All packets in RA lists
  494. * - All entries in Rx reorder table
  495. * - All entries in Tx BA stream table
  496. * - MPA buffer (if required)
  497. * - All RA lists
  498. */
  499. void
  500. mwifiex_clean_txrx(struct mwifiex_private *priv)
  501. {
  502. struct sk_buff *skb, *tmp;
  503. mwifiex_11n_cleanup_reorder_tbl(priv);
  504. spin_lock_bh(&priv->wmm.ra_list_spinlock);
  505. mwifiex_wmm_cleanup_queues(priv);
  506. mwifiex_11n_delete_all_tx_ba_stream_tbl(priv);
  507. if (priv->adapter->if_ops.cleanup_mpa_buf)
  508. priv->adapter->if_ops.cleanup_mpa_buf(priv->adapter);
  509. mwifiex_wmm_delete_all_ralist(priv);
  510. memcpy(tos_to_tid, ac_to_tid, sizeof(tos_to_tid));
  511. if (priv->adapter->if_ops.clean_pcie_ring &&
  512. !test_bit(MWIFIEX_SURPRISE_REMOVED, &priv->adapter->work_flags))
  513. priv->adapter->if_ops.clean_pcie_ring(priv->adapter);
  514. spin_unlock_bh(&priv->wmm.ra_list_spinlock);
  515. skb_queue_walk_safe(&priv->tdls_txq, skb, tmp) {
  516. skb_unlink(skb, &priv->tdls_txq);
  517. mwifiex_write_data_complete(priv->adapter, skb, 0, -1);
  518. }
  519. skb_queue_walk_safe(&priv->bypass_txq, skb, tmp) {
  520. skb_unlink(skb, &priv->bypass_txq);
  521. mwifiex_write_data_complete(priv->adapter, skb, 0, -1);
  522. }
  523. atomic_set(&priv->adapter->bypass_tx_pending, 0);
  524. idr_for_each(&priv->ack_status_frames, mwifiex_free_ack_frame, NULL);
  525. idr_destroy(&priv->ack_status_frames);
  526. }
  527. /*
  528. * This function retrieves a particular RA list node, matching with the
  529. * given TID and RA address.
  530. */
  531. struct mwifiex_ra_list_tbl *
  532. mwifiex_wmm_get_ralist_node(struct mwifiex_private *priv, u8 tid,
  533. const u8 *ra_addr)
  534. {
  535. struct mwifiex_ra_list_tbl *ra_list;
  536. list_for_each_entry(ra_list, &priv->wmm.tid_tbl_ptr[tid].ra_list,
  537. list) {
  538. if (!memcmp(ra_list->ra, ra_addr, ETH_ALEN))
  539. return ra_list;
  540. }
  541. return NULL;
  542. }
  543. void mwifiex_update_ralist_tx_pause(struct mwifiex_private *priv, u8 *mac,
  544. u8 tx_pause)
  545. {
  546. struct mwifiex_ra_list_tbl *ra_list;
  547. u32 pkt_cnt = 0, tx_pkts_queued;
  548. int i;
  549. spin_lock_bh(&priv->wmm.ra_list_spinlock);
  550. for (i = 0; i < MAX_NUM_TID; ++i) {
  551. ra_list = mwifiex_wmm_get_ralist_node(priv, i, mac);
  552. if (ra_list && ra_list->tx_paused != tx_pause) {
  553. pkt_cnt += ra_list->total_pkt_count;
  554. ra_list->tx_paused = tx_pause;
  555. if (tx_pause)
  556. priv->wmm.pkts_paused[i] +=
  557. ra_list->total_pkt_count;
  558. else
  559. priv->wmm.pkts_paused[i] -=
  560. ra_list->total_pkt_count;
  561. }
  562. }
  563. if (pkt_cnt) {
  564. tx_pkts_queued = atomic_read(&priv->wmm.tx_pkts_queued);
  565. if (tx_pause)
  566. tx_pkts_queued -= pkt_cnt;
  567. else
  568. tx_pkts_queued += pkt_cnt;
  569. atomic_set(&priv->wmm.tx_pkts_queued, tx_pkts_queued);
  570. atomic_set(&priv->wmm.highest_queued_prio, HIGH_PRIO_TID);
  571. }
  572. spin_unlock_bh(&priv->wmm.ra_list_spinlock);
  573. }
  574. /* This function updates non-tdls peer ralist tx_pause while
  575. * tdls channel switching
  576. */
  577. void mwifiex_update_ralist_tx_pause_in_tdls_cs(struct mwifiex_private *priv,
  578. u8 *mac, u8 tx_pause)
  579. {
  580. struct mwifiex_ra_list_tbl *ra_list;
  581. u32 pkt_cnt = 0, tx_pkts_queued;
  582. int i;
  583. spin_lock_bh(&priv->wmm.ra_list_spinlock);
  584. for (i = 0; i < MAX_NUM_TID; ++i) {
  585. list_for_each_entry(ra_list, &priv->wmm.tid_tbl_ptr[i].ra_list,
  586. list) {
  587. if (!memcmp(ra_list->ra, mac, ETH_ALEN))
  588. continue;
  589. if (ra_list->tx_paused != tx_pause) {
  590. pkt_cnt += ra_list->total_pkt_count;
  591. ra_list->tx_paused = tx_pause;
  592. if (tx_pause)
  593. priv->wmm.pkts_paused[i] +=
  594. ra_list->total_pkt_count;
  595. else
  596. priv->wmm.pkts_paused[i] -=
  597. ra_list->total_pkt_count;
  598. }
  599. }
  600. }
  601. if (pkt_cnt) {
  602. tx_pkts_queued = atomic_read(&priv->wmm.tx_pkts_queued);
  603. if (tx_pause)
  604. tx_pkts_queued -= pkt_cnt;
  605. else
  606. tx_pkts_queued += pkt_cnt;
  607. atomic_set(&priv->wmm.tx_pkts_queued, tx_pkts_queued);
  608. atomic_set(&priv->wmm.highest_queued_prio, HIGH_PRIO_TID);
  609. }
  610. spin_unlock_bh(&priv->wmm.ra_list_spinlock);
  611. }
  612. /*
  613. * This function retrieves an RA list node for a given TID and
  614. * RA address pair.
  615. *
  616. * If no such node is found, a new node is added first and then
  617. * retrieved.
  618. */
  619. struct mwifiex_ra_list_tbl *
  620. mwifiex_wmm_get_queue_raptr(struct mwifiex_private *priv, u8 tid,
  621. const u8 *ra_addr)
  622. {
  623. struct mwifiex_ra_list_tbl *ra_list;
  624. ra_list = mwifiex_wmm_get_ralist_node(priv, tid, ra_addr);
  625. if (ra_list)
  626. return ra_list;
  627. mwifiex_ralist_add(priv, ra_addr);
  628. return mwifiex_wmm_get_ralist_node(priv, tid, ra_addr);
  629. }
  630. /*
  631. * This function deletes RA list nodes for given mac for all TIDs.
  632. * Function also decrements TX pending count accordingly.
  633. */
  634. void
  635. mwifiex_wmm_del_peer_ra_list(struct mwifiex_private *priv, const u8 *ra_addr)
  636. {
  637. struct mwifiex_ra_list_tbl *ra_list;
  638. int i;
  639. spin_lock_bh(&priv->wmm.ra_list_spinlock);
  640. for (i = 0; i < MAX_NUM_TID; ++i) {
  641. ra_list = mwifiex_wmm_get_ralist_node(priv, i, ra_addr);
  642. if (!ra_list)
  643. continue;
  644. mwifiex_wmm_del_pkts_in_ralist_node(priv, ra_list);
  645. if (ra_list->tx_paused)
  646. priv->wmm.pkts_paused[i] -= ra_list->total_pkt_count;
  647. else
  648. atomic_sub(ra_list->total_pkt_count,
  649. &priv->wmm.tx_pkts_queued);
  650. list_del(&ra_list->list);
  651. kfree(ra_list);
  652. }
  653. spin_unlock_bh(&priv->wmm.ra_list_spinlock);
  654. }
  655. /*
  656. * This function checks if a particular RA list node exists in a given TID
  657. * table index.
  658. */
  659. int
  660. mwifiex_is_ralist_valid(struct mwifiex_private *priv,
  661. struct mwifiex_ra_list_tbl *ra_list, int ptr_index)
  662. {
  663. struct mwifiex_ra_list_tbl *rlist;
  664. list_for_each_entry(rlist, &priv->wmm.tid_tbl_ptr[ptr_index].ra_list,
  665. list) {
  666. if (rlist == ra_list)
  667. return true;
  668. }
  669. return false;
  670. }
  671. /*
  672. * This function adds a packet to bypass TX queue.
  673. * This is special TX queue for packets which can be sent even when port_open
  674. * is false.
  675. */
  676. void
  677. mwifiex_wmm_add_buf_bypass_txqueue(struct mwifiex_private *priv,
  678. struct sk_buff *skb)
  679. {
  680. skb_queue_tail(&priv->bypass_txq, skb);
  681. }
  682. /*
  683. * This function adds a packet to WMM queue.
  684. *
  685. * In disconnected state the packet is immediately dropped and the
  686. * packet send completion callback is called with status failure.
  687. *
  688. * Otherwise, the correct RA list node is located and the packet
  689. * is queued at the list tail.
  690. */
  691. void
  692. mwifiex_wmm_add_buf_txqueue(struct mwifiex_private *priv,
  693. struct sk_buff *skb)
  694. {
  695. struct mwifiex_adapter *adapter = priv->adapter;
  696. u32 tid;
  697. struct mwifiex_ra_list_tbl *ra_list;
  698. u8 ra[ETH_ALEN], tid_down;
  699. struct list_head list_head;
  700. int tdls_status = TDLS_NOT_SETUP;
  701. struct ethhdr *eth_hdr = (struct ethhdr *)skb->data;
  702. struct mwifiex_txinfo *tx_info = MWIFIEX_SKB_TXCB(skb);
  703. memcpy(ra, eth_hdr->h_dest, ETH_ALEN);
  704. if (GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_STA &&
  705. ISSUPP_TDLS_ENABLED(adapter->fw_cap_info)) {
  706. if (ntohs(eth_hdr->h_proto) == ETH_P_TDLS)
  707. mwifiex_dbg(adapter, DATA,
  708. "TDLS setup packet for %pM.\t"
  709. "Don't block\n", ra);
  710. else if (memcmp(priv->cfg_bssid, ra, ETH_ALEN))
  711. tdls_status = mwifiex_get_tdls_link_status(priv, ra);
  712. }
  713. if (!priv->media_connected && !mwifiex_is_skb_mgmt_frame(skb)) {
  714. mwifiex_dbg(adapter, DATA, "data: drop packet in disconnect\n");
  715. mwifiex_write_data_complete(adapter, skb, 0, -1);
  716. return;
  717. }
  718. tid = skb->priority;
  719. spin_lock_bh(&priv->wmm.ra_list_spinlock);
  720. tid_down = mwifiex_wmm_downgrade_tid(priv, tid);
  721. /* In case of infra as we have already created the list during
  722. association we just don't have to call get_queue_raptr, we will
  723. have only 1 raptr for a tid in case of infra */
  724. if (!mwifiex_queuing_ra_based(priv) &&
  725. !mwifiex_is_skb_mgmt_frame(skb)) {
  726. switch (tdls_status) {
  727. case TDLS_SETUP_COMPLETE:
  728. case TDLS_CHAN_SWITCHING:
  729. case TDLS_IN_BASE_CHAN:
  730. case TDLS_IN_OFF_CHAN:
  731. ra_list = mwifiex_wmm_get_queue_raptr(priv, tid_down,
  732. ra);
  733. tx_info->flags |= MWIFIEX_BUF_FLAG_TDLS_PKT;
  734. break;
  735. case TDLS_SETUP_INPROGRESS:
  736. skb_queue_tail(&priv->tdls_txq, skb);
  737. spin_unlock_bh(&priv->wmm.ra_list_spinlock);
  738. return;
  739. default:
  740. list_head = priv->wmm.tid_tbl_ptr[tid_down].ra_list;
  741. ra_list = list_first_entry_or_null(&list_head,
  742. struct mwifiex_ra_list_tbl, list);
  743. break;
  744. }
  745. } else {
  746. memcpy(ra, skb->data, ETH_ALEN);
  747. if (ra[0] & 0x01 || mwifiex_is_skb_mgmt_frame(skb))
  748. eth_broadcast_addr(ra);
  749. ra_list = mwifiex_wmm_get_queue_raptr(priv, tid_down, ra);
  750. }
  751. if (!ra_list) {
  752. spin_unlock_bh(&priv->wmm.ra_list_spinlock);
  753. mwifiex_write_data_complete(adapter, skb, 0, -1);
  754. return;
  755. }
  756. skb_queue_tail(&ra_list->skb_head, skb);
  757. ra_list->ba_pkt_count++;
  758. ra_list->total_pkt_count++;
  759. if (atomic_read(&priv->wmm.highest_queued_prio) <
  760. priv->tos_to_tid_inv[tid_down])
  761. atomic_set(&priv->wmm.highest_queued_prio,
  762. priv->tos_to_tid_inv[tid_down]);
  763. if (ra_list->tx_paused)
  764. priv->wmm.pkts_paused[tid_down]++;
  765. else
  766. atomic_inc(&priv->wmm.tx_pkts_queued);
  767. spin_unlock_bh(&priv->wmm.ra_list_spinlock);
  768. }
  769. /*
  770. * This function processes the get WMM status command response from firmware.
  771. *
  772. * The response may contain multiple TLVs -
  773. * - AC Queue status TLVs
  774. * - Current WMM Parameter IE TLV
  775. * - Admission Control action frame TLVs
  776. *
  777. * This function parses the TLVs and then calls further specific functions
  778. * to process any changes in the queue prioritize or state.
  779. */
  780. int mwifiex_ret_wmm_get_status(struct mwifiex_private *priv,
  781. const struct host_cmd_ds_command *resp)
  782. {
  783. u8 *curr = (u8 *) &resp->params.get_wmm_status;
  784. uint16_t resp_len = le16_to_cpu(resp->size), tlv_len;
  785. int mask = IEEE80211_WMM_IE_AP_QOSINFO_PARAM_SET_CNT_MASK;
  786. bool valid = true;
  787. struct mwifiex_ie_types_data *tlv_hdr;
  788. struct mwifiex_ie_types_wmm_queue_status *tlv_wmm_qstatus;
  789. struct ieee_types_wmm_parameter *wmm_param_ie = NULL;
  790. struct mwifiex_wmm_ac_status *ac_status;
  791. mwifiex_dbg(priv->adapter, INFO,
  792. "info: WMM: WMM_GET_STATUS cmdresp received: %d\n",
  793. resp_len);
  794. while ((resp_len >= sizeof(tlv_hdr->header)) && valid) {
  795. tlv_hdr = (struct mwifiex_ie_types_data *) curr;
  796. tlv_len = le16_to_cpu(tlv_hdr->header.len);
  797. if (resp_len < tlv_len + sizeof(tlv_hdr->header))
  798. break;
  799. switch (le16_to_cpu(tlv_hdr->header.type)) {
  800. case TLV_TYPE_WMMQSTATUS:
  801. tlv_wmm_qstatus =
  802. (struct mwifiex_ie_types_wmm_queue_status *)
  803. tlv_hdr;
  804. mwifiex_dbg(priv->adapter, CMD,
  805. "info: CMD_RESP: WMM_GET_STATUS:\t"
  806. "QSTATUS TLV: %d, %d, %d\n",
  807. tlv_wmm_qstatus->queue_index,
  808. tlv_wmm_qstatus->flow_required,
  809. tlv_wmm_qstatus->disabled);
  810. ac_status = &priv->wmm.ac_status[tlv_wmm_qstatus->
  811. queue_index];
  812. ac_status->disabled = tlv_wmm_qstatus->disabled;
  813. ac_status->flow_required =
  814. tlv_wmm_qstatus->flow_required;
  815. ac_status->flow_created = tlv_wmm_qstatus->flow_created;
  816. break;
  817. case WLAN_EID_VENDOR_SPECIFIC:
  818. /*
  819. * Point the regular IEEE IE 2 bytes into the Marvell IE
  820. * and setup the IEEE IE type and length byte fields
  821. */
  822. wmm_param_ie =
  823. (struct ieee_types_wmm_parameter *) (curr +
  824. 2);
  825. wmm_param_ie->vend_hdr.len = (u8) tlv_len;
  826. wmm_param_ie->vend_hdr.element_id =
  827. WLAN_EID_VENDOR_SPECIFIC;
  828. mwifiex_dbg(priv->adapter, CMD,
  829. "info: CMD_RESP: WMM_GET_STATUS:\t"
  830. "WMM Parameter Set Count: %d\n",
  831. wmm_param_ie->qos_info_bitmap & mask);
  832. if (wmm_param_ie->vend_hdr.len + 2 >
  833. sizeof(struct ieee_types_wmm_parameter))
  834. break;
  835. memcpy((u8 *) &priv->curr_bss_params.bss_descriptor.
  836. wmm_ie, wmm_param_ie,
  837. wmm_param_ie->vend_hdr.len + 2);
  838. break;
  839. default:
  840. valid = false;
  841. break;
  842. }
  843. curr += (tlv_len + sizeof(tlv_hdr->header));
  844. resp_len -= (tlv_len + sizeof(tlv_hdr->header));
  845. }
  846. mwifiex_wmm_setup_queue_priorities(priv, wmm_param_ie);
  847. mwifiex_wmm_setup_ac_downgrade(priv);
  848. return 0;
  849. }
  850. /*
  851. * Callback handler from the command module to allow insertion of a WMM TLV.
  852. *
  853. * If the BSS we are associating to supports WMM, this function adds the
  854. * required WMM Information IE to the association request command buffer in
  855. * the form of a Marvell extended IEEE IE.
  856. */
  857. u32
  858. mwifiex_wmm_process_association_req(struct mwifiex_private *priv,
  859. u8 **assoc_buf,
  860. struct ieee_types_wmm_parameter *wmm_ie,
  861. struct ieee80211_ht_cap *ht_cap)
  862. {
  863. struct mwifiex_ie_types_wmm_param_set *wmm_tlv;
  864. u32 ret_len = 0;
  865. /* Null checks */
  866. if (!assoc_buf)
  867. return 0;
  868. if (!(*assoc_buf))
  869. return 0;
  870. if (!wmm_ie)
  871. return 0;
  872. mwifiex_dbg(priv->adapter, INFO,
  873. "info: WMM: process assoc req: bss->wmm_ie=%#x\n",
  874. wmm_ie->vend_hdr.element_id);
  875. if ((priv->wmm_required ||
  876. (ht_cap && (priv->adapter->config_bands & BAND_GN ||
  877. priv->adapter->config_bands & BAND_AN))) &&
  878. wmm_ie->vend_hdr.element_id == WLAN_EID_VENDOR_SPECIFIC) {
  879. wmm_tlv = (struct mwifiex_ie_types_wmm_param_set *) *assoc_buf;
  880. wmm_tlv->header.type = cpu_to_le16((u16) wmm_info_ie[0]);
  881. wmm_tlv->header.len = cpu_to_le16((u16) wmm_info_ie[1]);
  882. memcpy(wmm_tlv->wmm_ie, &wmm_info_ie[2],
  883. le16_to_cpu(wmm_tlv->header.len));
  884. if (wmm_ie->qos_info_bitmap & IEEE80211_WMM_IE_AP_QOSINFO_UAPSD)
  885. memcpy((u8 *) (wmm_tlv->wmm_ie
  886. + le16_to_cpu(wmm_tlv->header.len)
  887. - sizeof(priv->wmm_qosinfo)),
  888. &priv->wmm_qosinfo, sizeof(priv->wmm_qosinfo));
  889. ret_len = sizeof(wmm_tlv->header)
  890. + le16_to_cpu(wmm_tlv->header.len);
  891. *assoc_buf += ret_len;
  892. }
  893. return ret_len;
  894. }
  895. /*
  896. * This function computes the time delay in the driver queues for a
  897. * given packet.
  898. *
  899. * When the packet is received at the OS/Driver interface, the current
  900. * time is set in the packet structure. The difference between the present
  901. * time and that received time is computed in this function and limited
  902. * based on pre-compiled limits in the driver.
  903. */
  904. u8
  905. mwifiex_wmm_compute_drv_pkt_delay(struct mwifiex_private *priv,
  906. const struct sk_buff *skb)
  907. {
  908. u32 queue_delay = ktime_to_ms(net_timedelta(skb->tstamp));
  909. u8 ret_val;
  910. /*
  911. * Queue delay is passed as a uint8 in units of 2ms (ms shifted
  912. * by 1). Min value (other than 0) is therefore 2ms, max is 510ms.
  913. *
  914. * Pass max value if queue_delay is beyond the uint8 range
  915. */
  916. ret_val = (u8) (min(queue_delay, priv->wmm.drv_pkt_delay_max) >> 1);
  917. mwifiex_dbg(priv->adapter, DATA, "data: WMM: Pkt Delay: %d ms,\t"
  918. "%d ms sent to FW\n", queue_delay, ret_val);
  919. return ret_val;
  920. }
  921. /*
  922. * This function retrieves the highest priority RA list table pointer.
  923. */
  924. static struct mwifiex_ra_list_tbl *
  925. mwifiex_wmm_get_highest_priolist_ptr(struct mwifiex_adapter *adapter,
  926. struct mwifiex_private **priv, int *tid)
  927. {
  928. struct mwifiex_private *priv_tmp;
  929. struct mwifiex_ra_list_tbl *ptr;
  930. struct mwifiex_tid_tbl *tid_ptr;
  931. atomic_t *hqp;
  932. int i, j;
  933. /* check the BSS with highest priority first */
  934. for (j = adapter->priv_num - 1; j >= 0; --j) {
  935. /* iterate over BSS with the equal priority */
  936. list_for_each_entry(adapter->bss_prio_tbl[j].bss_prio_cur,
  937. &adapter->bss_prio_tbl[j].bss_prio_head,
  938. list) {
  939. try_again:
  940. priv_tmp = adapter->bss_prio_tbl[j].bss_prio_cur->priv;
  941. if (((priv_tmp->bss_mode != NL80211_IFTYPE_ADHOC) &&
  942. !priv_tmp->port_open) ||
  943. (atomic_read(&priv_tmp->wmm.tx_pkts_queued) == 0))
  944. continue;
  945. if (adapter->if_ops.is_port_ready &&
  946. !adapter->if_ops.is_port_ready(priv_tmp))
  947. continue;
  948. /* iterate over the WMM queues of the BSS */
  949. hqp = &priv_tmp->wmm.highest_queued_prio;
  950. for (i = atomic_read(hqp); i >= LOW_PRIO_TID; --i) {
  951. spin_lock_bh(&priv_tmp->wmm.ra_list_spinlock);
  952. tid_ptr = &(priv_tmp)->wmm.
  953. tid_tbl_ptr[tos_to_tid[i]];
  954. /* iterate over receiver addresses */
  955. list_for_each_entry(ptr, &tid_ptr->ra_list,
  956. list) {
  957. if (!ptr->tx_paused &&
  958. !skb_queue_empty(&ptr->skb_head))
  959. /* holds both locks */
  960. goto found;
  961. }
  962. spin_unlock_bh(&priv_tmp->wmm.ra_list_spinlock);
  963. }
  964. if (atomic_read(&priv_tmp->wmm.tx_pkts_queued) != 0) {
  965. atomic_set(&priv_tmp->wmm.highest_queued_prio,
  966. HIGH_PRIO_TID);
  967. /* Iterate current private once more, since
  968. * there still exist packets in data queue
  969. */
  970. goto try_again;
  971. } else
  972. atomic_set(&priv_tmp->wmm.highest_queued_prio,
  973. NO_PKT_PRIO_TID);
  974. }
  975. }
  976. return NULL;
  977. found:
  978. /* holds ra_list_spinlock */
  979. if (atomic_read(hqp) > i)
  980. atomic_set(hqp, i);
  981. spin_unlock_bh(&priv_tmp->wmm.ra_list_spinlock);
  982. *priv = priv_tmp;
  983. *tid = tos_to_tid[i];
  984. return ptr;
  985. }
  986. /* This functions rotates ra and bss lists so packets are picked round robin.
  987. *
  988. * After a packet is successfully transmitted, rotate the ra list, so the ra
  989. * next to the one transmitted, will come first in the list. This way we pick
  990. * the ra' in a round robin fashion. Same applies to bss nodes of equal
  991. * priority.
  992. *
  993. * Function also increments wmm.packets_out counter.
  994. */
  995. void mwifiex_rotate_priolists(struct mwifiex_private *priv,
  996. struct mwifiex_ra_list_tbl *ra,
  997. int tid)
  998. {
  999. struct mwifiex_adapter *adapter = priv->adapter;
  1000. struct mwifiex_bss_prio_tbl *tbl = adapter->bss_prio_tbl;
  1001. struct mwifiex_tid_tbl *tid_ptr = &priv->wmm.tid_tbl_ptr[tid];
  1002. spin_lock_bh(&tbl[priv->bss_priority].bss_prio_lock);
  1003. /*
  1004. * dirty trick: we remove 'head' temporarily and reinsert it after
  1005. * curr bss node. imagine list to stay fixed while head is moved
  1006. */
  1007. list_move(&tbl[priv->bss_priority].bss_prio_head,
  1008. &tbl[priv->bss_priority].bss_prio_cur->list);
  1009. spin_unlock_bh(&tbl[priv->bss_priority].bss_prio_lock);
  1010. spin_lock_bh(&priv->wmm.ra_list_spinlock);
  1011. if (mwifiex_is_ralist_valid(priv, ra, tid)) {
  1012. priv->wmm.packets_out[tid]++;
  1013. /* same as above */
  1014. list_move(&tid_ptr->ra_list, &ra->list);
  1015. }
  1016. spin_unlock_bh(&priv->wmm.ra_list_spinlock);
  1017. }
  1018. /*
  1019. * This function checks if 11n aggregation is possible.
  1020. */
  1021. static int
  1022. mwifiex_is_11n_aggragation_possible(struct mwifiex_private *priv,
  1023. struct mwifiex_ra_list_tbl *ptr,
  1024. int max_buf_size)
  1025. {
  1026. int count = 0, total_size = 0;
  1027. struct sk_buff *skb, *tmp;
  1028. int max_amsdu_size;
  1029. if (priv->bss_role == MWIFIEX_BSS_ROLE_UAP && priv->ap_11n_enabled &&
  1030. ptr->is_11n_enabled)
  1031. max_amsdu_size = min_t(int, ptr->max_amsdu, max_buf_size);
  1032. else
  1033. max_amsdu_size = max_buf_size;
  1034. skb_queue_walk_safe(&ptr->skb_head, skb, tmp) {
  1035. total_size += skb->len;
  1036. if (total_size >= max_amsdu_size)
  1037. break;
  1038. if (++count >= MIN_NUM_AMSDU)
  1039. return true;
  1040. }
  1041. return false;
  1042. }
  1043. /*
  1044. * This function sends a single packet to firmware for transmission.
  1045. */
  1046. static void
  1047. mwifiex_send_single_packet(struct mwifiex_private *priv,
  1048. struct mwifiex_ra_list_tbl *ptr, int ptr_index)
  1049. __releases(&priv->wmm.ra_list_spinlock)
  1050. {
  1051. struct sk_buff *skb, *skb_next;
  1052. struct mwifiex_tx_param tx_param;
  1053. struct mwifiex_adapter *adapter = priv->adapter;
  1054. struct mwifiex_txinfo *tx_info;
  1055. if (skb_queue_empty(&ptr->skb_head)) {
  1056. spin_unlock_bh(&priv->wmm.ra_list_spinlock);
  1057. mwifiex_dbg(adapter, DATA, "data: nothing to send\n");
  1058. return;
  1059. }
  1060. skb = skb_dequeue(&ptr->skb_head);
  1061. tx_info = MWIFIEX_SKB_TXCB(skb);
  1062. mwifiex_dbg(adapter, DATA,
  1063. "data: dequeuing the packet %p %p\n", ptr, skb);
  1064. ptr->total_pkt_count--;
  1065. if (!skb_queue_empty(&ptr->skb_head))
  1066. skb_next = skb_peek(&ptr->skb_head);
  1067. else
  1068. skb_next = NULL;
  1069. spin_unlock_bh(&priv->wmm.ra_list_spinlock);
  1070. tx_param.next_pkt_len = ((skb_next) ? skb_next->len +
  1071. sizeof(struct txpd) : 0);
  1072. if (mwifiex_process_tx(priv, skb, &tx_param) == -EBUSY) {
  1073. /* Queue the packet back at the head */
  1074. spin_lock_bh(&priv->wmm.ra_list_spinlock);
  1075. if (!mwifiex_is_ralist_valid(priv, ptr, ptr_index)) {
  1076. spin_unlock_bh(&priv->wmm.ra_list_spinlock);
  1077. mwifiex_write_data_complete(adapter, skb, 0, -1);
  1078. return;
  1079. }
  1080. skb_queue_tail(&ptr->skb_head, skb);
  1081. ptr->total_pkt_count++;
  1082. ptr->ba_pkt_count++;
  1083. tx_info->flags |= MWIFIEX_BUF_FLAG_REQUEUED_PKT;
  1084. spin_unlock_bh(&priv->wmm.ra_list_spinlock);
  1085. } else {
  1086. mwifiex_rotate_priolists(priv, ptr, ptr_index);
  1087. atomic_dec(&priv->wmm.tx_pkts_queued);
  1088. }
  1089. }
  1090. /*
  1091. * This function checks if the first packet in the given RA list
  1092. * is already processed or not.
  1093. */
  1094. static int
  1095. mwifiex_is_ptr_processed(struct mwifiex_private *priv,
  1096. struct mwifiex_ra_list_tbl *ptr)
  1097. {
  1098. struct sk_buff *skb;
  1099. struct mwifiex_txinfo *tx_info;
  1100. if (skb_queue_empty(&ptr->skb_head))
  1101. return false;
  1102. skb = skb_peek(&ptr->skb_head);
  1103. tx_info = MWIFIEX_SKB_TXCB(skb);
  1104. if (tx_info->flags & MWIFIEX_BUF_FLAG_REQUEUED_PKT)
  1105. return true;
  1106. return false;
  1107. }
  1108. /*
  1109. * This function sends a single processed packet to firmware for
  1110. * transmission.
  1111. */
  1112. static void
  1113. mwifiex_send_processed_packet(struct mwifiex_private *priv,
  1114. struct mwifiex_ra_list_tbl *ptr, int ptr_index)
  1115. __releases(&priv->wmm.ra_list_spinlock)
  1116. {
  1117. struct mwifiex_tx_param tx_param;
  1118. struct mwifiex_adapter *adapter = priv->adapter;
  1119. int ret = -1;
  1120. struct sk_buff *skb, *skb_next;
  1121. struct mwifiex_txinfo *tx_info;
  1122. if (skb_queue_empty(&ptr->skb_head)) {
  1123. spin_unlock_bh(&priv->wmm.ra_list_spinlock);
  1124. return;
  1125. }
  1126. skb = skb_dequeue(&ptr->skb_head);
  1127. if (adapter->data_sent || adapter->tx_lock_flag) {
  1128. ptr->total_pkt_count--;
  1129. spin_unlock_bh(&priv->wmm.ra_list_spinlock);
  1130. skb_queue_tail(&adapter->tx_data_q, skb);
  1131. atomic_dec(&priv->wmm.tx_pkts_queued);
  1132. atomic_inc(&adapter->tx_queued);
  1133. return;
  1134. }
  1135. if (!skb_queue_empty(&ptr->skb_head))
  1136. skb_next = skb_peek(&ptr->skb_head);
  1137. else
  1138. skb_next = NULL;
  1139. tx_info = MWIFIEX_SKB_TXCB(skb);
  1140. spin_unlock_bh(&priv->wmm.ra_list_spinlock);
  1141. tx_param.next_pkt_len =
  1142. ((skb_next) ? skb_next->len +
  1143. sizeof(struct txpd) : 0);
  1144. if (adapter->iface_type == MWIFIEX_USB) {
  1145. ret = adapter->if_ops.host_to_card(adapter, priv->usb_port,
  1146. skb, &tx_param);
  1147. } else {
  1148. ret = adapter->if_ops.host_to_card(adapter, MWIFIEX_TYPE_DATA,
  1149. skb, &tx_param);
  1150. }
  1151. switch (ret) {
  1152. case -EBUSY:
  1153. mwifiex_dbg(adapter, ERROR, "data: -EBUSY is returned\n");
  1154. spin_lock_bh(&priv->wmm.ra_list_spinlock);
  1155. if (!mwifiex_is_ralist_valid(priv, ptr, ptr_index)) {
  1156. spin_unlock_bh(&priv->wmm.ra_list_spinlock);
  1157. mwifiex_write_data_complete(adapter, skb, 0, -1);
  1158. return;
  1159. }
  1160. skb_queue_tail(&ptr->skb_head, skb);
  1161. tx_info->flags |= MWIFIEX_BUF_FLAG_REQUEUED_PKT;
  1162. spin_unlock_bh(&priv->wmm.ra_list_spinlock);
  1163. break;
  1164. case -1:
  1165. mwifiex_dbg(adapter, ERROR, "host_to_card failed: %#x\n", ret);
  1166. adapter->dbg.num_tx_host_to_card_failure++;
  1167. mwifiex_write_data_complete(adapter, skb, 0, ret);
  1168. break;
  1169. case -EINPROGRESS:
  1170. break;
  1171. case 0:
  1172. mwifiex_write_data_complete(adapter, skb, 0, ret);
  1173. break;
  1174. default:
  1175. break;
  1176. }
  1177. if (ret != -EBUSY) {
  1178. mwifiex_rotate_priolists(priv, ptr, ptr_index);
  1179. atomic_dec(&priv->wmm.tx_pkts_queued);
  1180. spin_lock_bh(&priv->wmm.ra_list_spinlock);
  1181. ptr->total_pkt_count--;
  1182. spin_unlock_bh(&priv->wmm.ra_list_spinlock);
  1183. }
  1184. }
  1185. /*
  1186. * This function dequeues a packet from the highest priority list
  1187. * and transmits it.
  1188. */
  1189. static int
  1190. mwifiex_dequeue_tx_packet(struct mwifiex_adapter *adapter)
  1191. {
  1192. struct mwifiex_ra_list_tbl *ptr;
  1193. struct mwifiex_private *priv = NULL;
  1194. int ptr_index = 0;
  1195. u8 ra[ETH_ALEN];
  1196. int tid_del = 0, tid = 0;
  1197. ptr = mwifiex_wmm_get_highest_priolist_ptr(adapter, &priv, &ptr_index);
  1198. if (!ptr)
  1199. return -1;
  1200. tid = mwifiex_get_tid(ptr);
  1201. mwifiex_dbg(adapter, DATA, "data: tid=%d\n", tid);
  1202. spin_lock_bh(&priv->wmm.ra_list_spinlock);
  1203. if (!mwifiex_is_ralist_valid(priv, ptr, ptr_index)) {
  1204. spin_unlock_bh(&priv->wmm.ra_list_spinlock);
  1205. return -1;
  1206. }
  1207. if (mwifiex_is_ptr_processed(priv, ptr)) {
  1208. mwifiex_send_processed_packet(priv, ptr, ptr_index);
  1209. /* ra_list_spinlock has been freed in
  1210. mwifiex_send_processed_packet() */
  1211. return 0;
  1212. }
  1213. if (!ptr->is_11n_enabled ||
  1214. ptr->ba_status ||
  1215. priv->wps.session_enable) {
  1216. if (ptr->is_11n_enabled &&
  1217. ptr->ba_status &&
  1218. ptr->amsdu_in_ampdu &&
  1219. mwifiex_is_amsdu_allowed(priv, tid) &&
  1220. mwifiex_is_11n_aggragation_possible(priv, ptr,
  1221. adapter->tx_buf_size))
  1222. mwifiex_11n_aggregate_pkt(priv, ptr, ptr_index);
  1223. /* ra_list_spinlock has been freed in
  1224. * mwifiex_11n_aggregate_pkt()
  1225. */
  1226. else
  1227. mwifiex_send_single_packet(priv, ptr, ptr_index);
  1228. /* ra_list_spinlock has been freed in
  1229. * mwifiex_send_single_packet()
  1230. */
  1231. } else {
  1232. if (mwifiex_is_ampdu_allowed(priv, ptr, tid) &&
  1233. ptr->ba_pkt_count > ptr->ba_packet_thr) {
  1234. if (mwifiex_space_avail_for_new_ba_stream(adapter)) {
  1235. mwifiex_create_ba_tbl(priv, ptr->ra, tid,
  1236. BA_SETUP_INPROGRESS);
  1237. mwifiex_send_addba(priv, tid, ptr->ra);
  1238. } else if (mwifiex_find_stream_to_delete
  1239. (priv, tid, &tid_del, ra)) {
  1240. mwifiex_create_ba_tbl(priv, ptr->ra, tid,
  1241. BA_SETUP_INPROGRESS);
  1242. mwifiex_send_delba(priv, tid_del, ra, 1);
  1243. }
  1244. }
  1245. if (mwifiex_is_amsdu_allowed(priv, tid) &&
  1246. mwifiex_is_11n_aggragation_possible(priv, ptr,
  1247. adapter->tx_buf_size))
  1248. mwifiex_11n_aggregate_pkt(priv, ptr, ptr_index);
  1249. /* ra_list_spinlock has been freed in
  1250. mwifiex_11n_aggregate_pkt() */
  1251. else
  1252. mwifiex_send_single_packet(priv, ptr, ptr_index);
  1253. /* ra_list_spinlock has been freed in
  1254. mwifiex_send_single_packet() */
  1255. }
  1256. return 0;
  1257. }
  1258. void mwifiex_process_bypass_tx(struct mwifiex_adapter *adapter)
  1259. {
  1260. struct mwifiex_tx_param tx_param;
  1261. struct sk_buff *skb;
  1262. struct mwifiex_txinfo *tx_info;
  1263. struct mwifiex_private *priv;
  1264. int i;
  1265. if (adapter->data_sent || adapter->tx_lock_flag)
  1266. return;
  1267. for (i = 0; i < adapter->priv_num; ++i) {
  1268. priv = adapter->priv[i];
  1269. if (!priv)
  1270. continue;
  1271. if (adapter->if_ops.is_port_ready &&
  1272. !adapter->if_ops.is_port_ready(priv))
  1273. continue;
  1274. if (skb_queue_empty(&priv->bypass_txq))
  1275. continue;
  1276. skb = skb_dequeue(&priv->bypass_txq);
  1277. tx_info = MWIFIEX_SKB_TXCB(skb);
  1278. /* no aggregation for bypass packets */
  1279. tx_param.next_pkt_len = 0;
  1280. if (mwifiex_process_tx(priv, skb, &tx_param) == -EBUSY) {
  1281. skb_queue_head(&priv->bypass_txq, skb);
  1282. tx_info->flags |= MWIFIEX_BUF_FLAG_REQUEUED_PKT;
  1283. } else {
  1284. atomic_dec(&adapter->bypass_tx_pending);
  1285. }
  1286. }
  1287. }
  1288. /*
  1289. * This function transmits the highest priority packet awaiting in the
  1290. * WMM Queues.
  1291. */
  1292. void
  1293. mwifiex_wmm_process_tx(struct mwifiex_adapter *adapter)
  1294. {
  1295. do {
  1296. if (mwifiex_dequeue_tx_packet(adapter))
  1297. break;
  1298. if (adapter->iface_type != MWIFIEX_SDIO) {
  1299. if (adapter->data_sent ||
  1300. adapter->tx_lock_flag)
  1301. break;
  1302. } else {
  1303. if (atomic_read(&adapter->tx_queued) >=
  1304. MWIFIEX_MAX_PKTS_TXQ)
  1305. break;
  1306. }
  1307. } while (!mwifiex_wmm_lists_empty(adapter));
  1308. }