txrx.c 70 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604
  1. // SPDX-License-Identifier: ISC
  2. /*
  3. * Copyright (c) 2012-2017 Qualcomm Atheros, Inc.
  4. * Copyright (c) 2018-2021, The Linux Foundation. All rights reserved.
  5. */
  6. #include <linux/etherdevice.h>
  7. #include <net/ieee80211_radiotap.h>
  8. #include <linux/if_arp.h>
  9. #include <linux/moduleparam.h>
  10. #include <linux/ip.h>
  11. #include <linux/ipv6.h>
  12. #include <linux/if_vlan.h>
  13. #include <net/ipv6.h>
  14. #include <linux/prefetch.h>
  15. #include "wil6210.h"
  16. #include "wmi.h"
  17. #include "txrx.h"
  18. #include "trace.h"
  19. #include "txrx_edma.h"
  20. bool rx_align_2;
  21. module_param(rx_align_2, bool, 0444);
  22. MODULE_PARM_DESC(rx_align_2, " align Rx buffers on 4*n+2, default - no");
  23. bool rx_large_buf;
  24. module_param(rx_large_buf, bool, 0444);
  25. MODULE_PARM_DESC(rx_large_buf, " allocate 8KB RX buffers, default - no");
  26. /* Drop Tx packets in case Tx ring is full */
  27. bool drop_if_ring_full;
  28. static inline uint wil_rx_snaplen(void)
  29. {
  30. return rx_align_2 ? 6 : 0;
  31. }
  32. /* wil_ring_wmark_low - low watermark for available descriptor space */
  33. static inline int wil_ring_wmark_low(struct wil_ring *ring)
  34. {
  35. return ring->size / 8;
  36. }
  37. /* wil_ring_wmark_high - high watermark for available descriptor space */
  38. static inline int wil_ring_wmark_high(struct wil_ring *ring)
  39. {
  40. return ring->size / 4;
  41. }
  42. /* returns true if num avail descriptors is lower than wmark_low */
  43. static inline int wil_ring_avail_low(struct wil_ring *ring)
  44. {
  45. return wil_ring_avail_tx(ring) < wil_ring_wmark_low(ring);
  46. }
  47. /* returns true if num avail descriptors is higher than wmark_high */
  48. static inline int wil_ring_avail_high(struct wil_ring *ring)
  49. {
  50. return wil_ring_avail_tx(ring) > wil_ring_wmark_high(ring);
  51. }
  52. /* returns true when all tx vrings are empty */
  53. bool wil_is_tx_idle(struct wil6210_priv *wil)
  54. {
  55. int i;
  56. unsigned long data_comp_to;
  57. int min_ring_id = wil_get_min_tx_ring_id(wil);
  58. for (i = min_ring_id; i < WIL6210_MAX_TX_RINGS; i++) {
  59. struct wil_ring *vring = &wil->ring_tx[i];
  60. int vring_index = vring - wil->ring_tx;
  61. struct wil_ring_tx_data *txdata =
  62. &wil->ring_tx_data[vring_index];
  63. spin_lock(&txdata->lock);
  64. if (!vring->va || !txdata->enabled) {
  65. spin_unlock(&txdata->lock);
  66. continue;
  67. }
  68. data_comp_to = jiffies + msecs_to_jiffies(
  69. WIL_DATA_COMPLETION_TO_MS);
  70. if (test_bit(wil_status_napi_en, wil->status)) {
  71. while (!wil_ring_is_empty(vring)) {
  72. if (time_after(jiffies, data_comp_to)) {
  73. wil_dbg_pm(wil,
  74. "TO waiting for idle tx\n");
  75. spin_unlock(&txdata->lock);
  76. return false;
  77. }
  78. wil_dbg_ratelimited(wil,
  79. "tx vring is not empty -> NAPI\n");
  80. spin_unlock(&txdata->lock);
  81. napi_synchronize(&wil->napi_tx);
  82. msleep(20);
  83. spin_lock(&txdata->lock);
  84. if (!vring->va || !txdata->enabled)
  85. break;
  86. }
  87. }
  88. spin_unlock(&txdata->lock);
  89. }
  90. return true;
  91. }
  92. static int wil_vring_alloc(struct wil6210_priv *wil, struct wil_ring *vring)
  93. {
  94. struct device *dev = wil_to_dev(wil);
  95. size_t sz = vring->size * sizeof(vring->va[0]);
  96. uint i;
  97. wil_dbg_misc(wil, "vring_alloc:\n");
  98. BUILD_BUG_ON(sizeof(vring->va[0]) != 32);
  99. vring->swhead = 0;
  100. vring->swtail = 0;
  101. vring->ctx = kcalloc(vring->size, sizeof(vring->ctx[0]), GFP_KERNEL);
  102. if (!vring->ctx) {
  103. vring->va = NULL;
  104. return -ENOMEM;
  105. }
  106. /* vring->va should be aligned on its size rounded up to power of 2
  107. * This is granted by the dma_alloc_coherent.
  108. *
  109. * HW has limitation that all vrings addresses must share the same
  110. * upper 16 msb bits part of 48 bits address. To workaround that,
  111. * if we are using more than 32 bit addresses switch to 32 bit
  112. * allocation before allocating vring memory.
  113. *
  114. * There's no check for the return value of dma_set_mask_and_coherent,
  115. * since we assume if we were able to set the mask during
  116. * initialization in this system it will not fail if we set it again
  117. */
  118. if (wil->dma_addr_size > 32)
  119. dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
  120. vring->va = dma_alloc_coherent(dev, sz, &vring->pa, GFP_KERNEL);
  121. if (!vring->va) {
  122. kfree(vring->ctx);
  123. vring->ctx = NULL;
  124. return -ENOMEM;
  125. }
  126. if (wil->dma_addr_size > 32)
  127. dma_set_mask_and_coherent(dev,
  128. DMA_BIT_MASK(wil->dma_addr_size));
  129. /* initially, all descriptors are SW owned
  130. * For Tx and Rx, ownership bit is at the same location, thus
  131. * we can use any
  132. */
  133. for (i = 0; i < vring->size; i++) {
  134. volatile struct vring_tx_desc *_d =
  135. &vring->va[i].tx.legacy;
  136. _d->dma.status = TX_DMA_STATUS_DU;
  137. }
  138. wil_dbg_misc(wil, "vring[%d] 0x%p:%pad 0x%p\n", vring->size,
  139. vring->va, &vring->pa, vring->ctx);
  140. return 0;
  141. }
  142. static void wil_txdesc_unmap(struct device *dev, union wil_tx_desc *desc,
  143. struct wil_ctx *ctx)
  144. {
  145. struct vring_tx_desc *d = &desc->legacy;
  146. dma_addr_t pa = wil_desc_addr(&d->dma.addr);
  147. u16 dmalen = le16_to_cpu(d->dma.length);
  148. switch (ctx->mapped_as) {
  149. case wil_mapped_as_single:
  150. dma_unmap_single(dev, pa, dmalen, DMA_TO_DEVICE);
  151. break;
  152. case wil_mapped_as_page:
  153. dma_unmap_page(dev, pa, dmalen, DMA_TO_DEVICE);
  154. break;
  155. default:
  156. break;
  157. }
  158. }
  159. static void wil_vring_free(struct wil6210_priv *wil, struct wil_ring *vring)
  160. {
  161. struct device *dev = wil_to_dev(wil);
  162. size_t sz = vring->size * sizeof(vring->va[0]);
  163. lockdep_assert_held(&wil->mutex);
  164. if (!vring->is_rx) {
  165. int vring_index = vring - wil->ring_tx;
  166. wil_dbg_misc(wil, "free Tx vring %d [%d] 0x%p:%pad 0x%p\n",
  167. vring_index, vring->size, vring->va,
  168. &vring->pa, vring->ctx);
  169. } else {
  170. wil_dbg_misc(wil, "free Rx vring [%d] 0x%p:%pad 0x%p\n",
  171. vring->size, vring->va,
  172. &vring->pa, vring->ctx);
  173. }
  174. while (!wil_ring_is_empty(vring)) {
  175. dma_addr_t pa;
  176. u16 dmalen;
  177. struct wil_ctx *ctx;
  178. if (!vring->is_rx) {
  179. struct vring_tx_desc dd, *d = &dd;
  180. volatile struct vring_tx_desc *_d =
  181. &vring->va[vring->swtail].tx.legacy;
  182. ctx = &vring->ctx[vring->swtail];
  183. if (!ctx) {
  184. wil_dbg_txrx(wil,
  185. "ctx(%d) was already completed\n",
  186. vring->swtail);
  187. vring->swtail = wil_ring_next_tail(vring);
  188. continue;
  189. }
  190. *d = *_d;
  191. wil_txdesc_unmap(dev, (union wil_tx_desc *)d, ctx);
  192. if (ctx->skb)
  193. dev_kfree_skb_any(ctx->skb);
  194. vring->swtail = wil_ring_next_tail(vring);
  195. } else { /* rx */
  196. struct vring_rx_desc dd, *d = &dd;
  197. volatile struct vring_rx_desc *_d =
  198. &vring->va[vring->swhead].rx.legacy;
  199. ctx = &vring->ctx[vring->swhead];
  200. *d = *_d;
  201. pa = wil_desc_addr(&d->dma.addr);
  202. dmalen = le16_to_cpu(d->dma.length);
  203. dma_unmap_single(dev, pa, dmalen, DMA_FROM_DEVICE);
  204. kfree_skb(ctx->skb);
  205. wil_ring_advance_head(vring, 1);
  206. }
  207. }
  208. dma_free_coherent(dev, sz, (void *)vring->va, vring->pa);
  209. kfree(vring->ctx);
  210. vring->pa = 0;
  211. vring->va = NULL;
  212. vring->ctx = NULL;
  213. }
  214. /* Allocate one skb for Rx VRING
  215. *
  216. * Safe to call from IRQ
  217. */
  218. static int wil_vring_alloc_skb(struct wil6210_priv *wil, struct wil_ring *vring,
  219. u32 i, int headroom)
  220. {
  221. struct device *dev = wil_to_dev(wil);
  222. unsigned int sz = wil->rx_buf_len + ETH_HLEN + wil_rx_snaplen();
  223. struct vring_rx_desc dd, *d = &dd;
  224. volatile struct vring_rx_desc *_d = &vring->va[i].rx.legacy;
  225. dma_addr_t pa;
  226. struct sk_buff *skb = dev_alloc_skb(sz + headroom);
  227. if (unlikely(!skb))
  228. return -ENOMEM;
  229. skb_reserve(skb, headroom);
  230. skb_put(skb, sz);
  231. /**
  232. * Make sure that the network stack calculates checksum for packets
  233. * which failed the HW checksum calculation
  234. */
  235. skb->ip_summed = CHECKSUM_NONE;
  236. pa = dma_map_single(dev, skb->data, skb->len, DMA_FROM_DEVICE);
  237. if (unlikely(dma_mapping_error(dev, pa))) {
  238. kfree_skb(skb);
  239. return -ENOMEM;
  240. }
  241. d->dma.d0 = RX_DMA_D0_CMD_DMA_RT | RX_DMA_D0_CMD_DMA_IT;
  242. wil_desc_addr_set(&d->dma.addr, pa);
  243. /* ip_length don't care */
  244. /* b11 don't care */
  245. /* error don't care */
  246. d->dma.status = 0; /* BIT(0) should be 0 for HW_OWNED */
  247. d->dma.length = cpu_to_le16(sz);
  248. *_d = *d;
  249. vring->ctx[i].skb = skb;
  250. return 0;
  251. }
  252. /* Adds radiotap header
  253. *
  254. * Any error indicated as "Bad FCS"
  255. *
  256. * Vendor data for 04:ce:14-1 (Wilocity-1) consists of:
  257. * - Rx descriptor: 32 bytes
  258. * - Phy info
  259. */
  260. static void wil_rx_add_radiotap_header(struct wil6210_priv *wil,
  261. struct sk_buff *skb)
  262. {
  263. struct wil6210_rtap {
  264. struct ieee80211_radiotap_header rthdr;
  265. /* fields should be in the order of bits in rthdr.it_present */
  266. /* flags */
  267. u8 flags;
  268. /* channel */
  269. __le16 chnl_freq __aligned(2);
  270. __le16 chnl_flags;
  271. /* MCS */
  272. u8 mcs_present;
  273. u8 mcs_flags;
  274. u8 mcs_index;
  275. } __packed;
  276. struct vring_rx_desc *d = wil_skb_rxdesc(skb);
  277. struct wil6210_rtap *rtap;
  278. int rtap_len = sizeof(struct wil6210_rtap);
  279. struct ieee80211_channel *ch = wil->monitor_chandef.chan;
  280. if (skb_headroom(skb) < rtap_len &&
  281. pskb_expand_head(skb, rtap_len, 0, GFP_ATOMIC)) {
  282. wil_err(wil, "Unable to expand headroom to %d\n", rtap_len);
  283. return;
  284. }
  285. rtap = skb_push(skb, rtap_len);
  286. memset(rtap, 0, rtap_len);
  287. rtap->rthdr.it_version = PKTHDR_RADIOTAP_VERSION;
  288. rtap->rthdr.it_len = cpu_to_le16(rtap_len);
  289. rtap->rthdr.it_present = cpu_to_le32((1 << IEEE80211_RADIOTAP_FLAGS) |
  290. (1 << IEEE80211_RADIOTAP_CHANNEL) |
  291. (1 << IEEE80211_RADIOTAP_MCS));
  292. if (d->dma.status & RX_DMA_STATUS_ERROR)
  293. rtap->flags |= IEEE80211_RADIOTAP_F_BADFCS;
  294. rtap->chnl_freq = cpu_to_le16(ch ? ch->center_freq : 58320);
  295. rtap->chnl_flags = cpu_to_le16(0);
  296. rtap->mcs_present = IEEE80211_RADIOTAP_MCS_HAVE_MCS;
  297. rtap->mcs_flags = 0;
  298. rtap->mcs_index = wil_rxdesc_mcs(d);
  299. }
  300. static bool wil_is_rx_idle(struct wil6210_priv *wil)
  301. {
  302. struct vring_rx_desc *_d;
  303. struct wil_ring *ring = &wil->ring_rx;
  304. _d = (struct vring_rx_desc *)&ring->va[ring->swhead].rx.legacy;
  305. if (_d->dma.status & RX_DMA_STATUS_DU)
  306. return false;
  307. return true;
  308. }
  309. static int wil_rx_get_cid_by_skb(struct wil6210_priv *wil, struct sk_buff *skb)
  310. {
  311. struct vring_rx_desc *d = wil_skb_rxdesc(skb);
  312. int mid = wil_rxdesc_mid(d);
  313. struct wil6210_vif *vif = wil->vifs[mid];
  314. /* cid from DMA descriptor is limited to 3 bits.
  315. * In case of cid>=8, the value would be cid modulo 8 and we need to
  316. * find real cid by locating the transmitter (ta) inside sta array
  317. */
  318. int cid = wil_rxdesc_cid(d);
  319. unsigned int snaplen = wil_rx_snaplen();
  320. struct ieee80211_hdr_3addr *hdr;
  321. int i;
  322. unsigned char *ta;
  323. u8 ftype;
  324. /* in monitor mode there are no connections */
  325. if (vif->wdev.iftype == NL80211_IFTYPE_MONITOR)
  326. return cid;
  327. ftype = wil_rxdesc_ftype(d) << 2;
  328. if (likely(ftype == IEEE80211_FTYPE_DATA)) {
  329. if (unlikely(skb->len < ETH_HLEN + snaplen)) {
  330. wil_err_ratelimited(wil,
  331. "Short data frame, len = %d\n",
  332. skb->len);
  333. return -ENOENT;
  334. }
  335. ta = wil_skb_get_sa(skb);
  336. } else {
  337. if (unlikely(skb->len < sizeof(struct ieee80211_hdr_3addr))) {
  338. wil_err_ratelimited(wil, "Short frame, len = %d\n",
  339. skb->len);
  340. return -ENOENT;
  341. }
  342. hdr = (void *)skb->data;
  343. ta = hdr->addr2;
  344. }
  345. if (wil->max_assoc_sta <= WIL6210_RX_DESC_MAX_CID)
  346. return cid;
  347. /* assuming no concurrency between AP interfaces and STA interfaces.
  348. * multista is used only in P2P_GO or AP mode. In other modes return
  349. * cid from the rx descriptor
  350. */
  351. if (vif->wdev.iftype != NL80211_IFTYPE_P2P_GO &&
  352. vif->wdev.iftype != NL80211_IFTYPE_AP)
  353. return cid;
  354. /* For Rx packets cid from rx descriptor is limited to 3 bits (0..7),
  355. * to find the real cid, compare transmitter address with the stored
  356. * stations mac address in the driver sta array
  357. */
  358. for (i = cid; i < wil->max_assoc_sta; i += WIL6210_RX_DESC_MAX_CID) {
  359. if (wil->sta[i].status != wil_sta_unused &&
  360. ether_addr_equal(wil->sta[i].addr, ta)) {
  361. cid = i;
  362. break;
  363. }
  364. }
  365. if (i >= wil->max_assoc_sta) {
  366. wil_err_ratelimited(wil, "Could not find cid for frame with transmit addr = %pM, iftype = %d, frametype = %d, len = %d\n",
  367. ta, vif->wdev.iftype, ftype, skb->len);
  368. cid = -ENOENT;
  369. }
  370. return cid;
  371. }
  372. /* reap 1 frame from @swhead
  373. *
  374. * Rx descriptor copied to skb->cb
  375. *
  376. * Safe to call from IRQ
  377. */
  378. static struct sk_buff *wil_vring_reap_rx(struct wil6210_priv *wil,
  379. struct wil_ring *vring)
  380. {
  381. struct device *dev = wil_to_dev(wil);
  382. struct wil6210_vif *vif;
  383. struct net_device *ndev;
  384. volatile struct vring_rx_desc *_d;
  385. struct vring_rx_desc *d;
  386. struct sk_buff *skb;
  387. dma_addr_t pa;
  388. unsigned int snaplen = wil_rx_snaplen();
  389. unsigned int sz = wil->rx_buf_len + ETH_HLEN + snaplen;
  390. u16 dmalen;
  391. u8 ftype;
  392. int cid, mid;
  393. int i;
  394. struct wil_net_stats *stats;
  395. BUILD_BUG_ON(sizeof(struct skb_rx_info) > sizeof(skb->cb));
  396. again:
  397. if (unlikely(wil_ring_is_empty(vring)))
  398. return NULL;
  399. i = (int)vring->swhead;
  400. _d = &vring->va[i].rx.legacy;
  401. if (unlikely(!(_d->dma.status & RX_DMA_STATUS_DU))) {
  402. /* it is not error, we just reached end of Rx done area */
  403. return NULL;
  404. }
  405. skb = vring->ctx[i].skb;
  406. vring->ctx[i].skb = NULL;
  407. wil_ring_advance_head(vring, 1);
  408. if (!skb) {
  409. wil_err(wil, "No Rx skb at [%d]\n", i);
  410. goto again;
  411. }
  412. d = wil_skb_rxdesc(skb);
  413. *d = *_d;
  414. pa = wil_desc_addr(&d->dma.addr);
  415. dma_unmap_single(dev, pa, sz, DMA_FROM_DEVICE);
  416. dmalen = le16_to_cpu(d->dma.length);
  417. trace_wil6210_rx(i, d);
  418. wil_dbg_txrx(wil, "Rx[%3d] : %d bytes\n", i, dmalen);
  419. wil_hex_dump_txrx("RxD ", DUMP_PREFIX_NONE, 32, 4,
  420. (const void *)d, sizeof(*d), false);
  421. mid = wil_rxdesc_mid(d);
  422. vif = wil->vifs[mid];
  423. if (unlikely(!vif)) {
  424. wil_dbg_txrx(wil, "skipped RX descriptor with invalid mid %d",
  425. mid);
  426. kfree_skb(skb);
  427. goto again;
  428. }
  429. ndev = vif_to_ndev(vif);
  430. if (unlikely(dmalen > sz)) {
  431. wil_err_ratelimited(wil, "Rx size too large: %d bytes!\n",
  432. dmalen);
  433. kfree_skb(skb);
  434. goto again;
  435. }
  436. skb_trim(skb, dmalen);
  437. prefetch(skb->data);
  438. wil_hex_dump_txrx("Rx ", DUMP_PREFIX_OFFSET, 16, 1,
  439. skb->data, skb_headlen(skb), false);
  440. cid = wil_rx_get_cid_by_skb(wil, skb);
  441. if (cid == -ENOENT) {
  442. kfree_skb(skb);
  443. goto again;
  444. }
  445. wil_skb_set_cid(skb, (u8)cid);
  446. stats = &wil->sta[cid].stats;
  447. stats->last_mcs_rx = wil_rxdesc_mcs(d);
  448. if (stats->last_mcs_rx < ARRAY_SIZE(stats->rx_per_mcs))
  449. stats->rx_per_mcs[stats->last_mcs_rx]++;
  450. /* use radiotap header only if required */
  451. if (ndev->type == ARPHRD_IEEE80211_RADIOTAP)
  452. wil_rx_add_radiotap_header(wil, skb);
  453. /* no extra checks if in sniffer mode */
  454. if (ndev->type != ARPHRD_ETHER)
  455. return skb;
  456. /* Non-data frames may be delivered through Rx DMA channel (ex: BAR)
  457. * Driver should recognize it by frame type, that is found
  458. * in Rx descriptor. If type is not data, it is 802.11 frame as is
  459. */
  460. ftype = wil_rxdesc_ftype(d) << 2;
  461. if (unlikely(ftype != IEEE80211_FTYPE_DATA)) {
  462. u8 fc1 = wil_rxdesc_fc1(d);
  463. int tid = wil_rxdesc_tid(d);
  464. u16 seq = wil_rxdesc_seq(d);
  465. wil_dbg_txrx(wil,
  466. "Non-data frame FC[7:0] 0x%02x MID %d CID %d TID %d Seq 0x%03x\n",
  467. fc1, mid, cid, tid, seq);
  468. stats->rx_non_data_frame++;
  469. if (wil_is_back_req(fc1)) {
  470. wil_dbg_txrx(wil,
  471. "BAR: MID %d CID %d TID %d Seq 0x%03x\n",
  472. mid, cid, tid, seq);
  473. wil_rx_bar(wil, vif, cid, tid, seq);
  474. } else {
  475. /* print again all info. One can enable only this
  476. * without overhead for printing every Rx frame
  477. */
  478. wil_dbg_txrx(wil,
  479. "Unhandled non-data frame FC[7:0] 0x%02x MID %d CID %d TID %d Seq 0x%03x\n",
  480. fc1, mid, cid, tid, seq);
  481. wil_hex_dump_txrx("RxD ", DUMP_PREFIX_NONE, 32, 4,
  482. (const void *)d, sizeof(*d), false);
  483. wil_hex_dump_txrx("Rx ", DUMP_PREFIX_OFFSET, 16, 1,
  484. skb->data, skb_headlen(skb), false);
  485. }
  486. kfree_skb(skb);
  487. goto again;
  488. }
  489. /* L4 IDENT is on when HW calculated checksum, check status
  490. * and in case of error drop the packet
  491. * higher stack layers will handle retransmission (if required)
  492. */
  493. if (likely(d->dma.status & RX_DMA_STATUS_L4I)) {
  494. /* L4 protocol identified, csum calculated */
  495. if (likely((d->dma.error & RX_DMA_ERROR_L4_ERR) == 0))
  496. skb->ip_summed = CHECKSUM_UNNECESSARY;
  497. /* If HW reports bad checksum, let IP stack re-check it
  498. * For example, HW don't understand Microsoft IP stack that
  499. * mis-calculates TCP checksum - if it should be 0x0,
  500. * it writes 0xffff in violation of RFC 1624
  501. */
  502. else
  503. stats->rx_csum_err++;
  504. }
  505. if (snaplen) {
  506. /* Packet layout
  507. * +-------+-------+---------+------------+------+
  508. * | SA(6) | DA(6) | SNAP(6) | ETHTYPE(2) | DATA |
  509. * +-------+-------+---------+------------+------+
  510. * Need to remove SNAP, shifting SA and DA forward
  511. */
  512. memmove(skb->data + snaplen, skb->data, 2 * ETH_ALEN);
  513. skb_pull(skb, snaplen);
  514. }
  515. return skb;
  516. }
  517. /* allocate and fill up to @count buffers in rx ring
  518. * buffers posted at @swtail
  519. * Note: we have a single RX queue for servicing all VIFs, but we
  520. * allocate skbs with headroom according to main interface only. This
  521. * means it will not work with monitor interface together with other VIFs.
  522. * Currently we only support monitor interface on its own without other VIFs,
  523. * and we will need to fix this code once we add support.
  524. */
  525. static int wil_rx_refill(struct wil6210_priv *wil, int count)
  526. {
  527. struct net_device *ndev = wil->main_ndev;
  528. struct wil_ring *v = &wil->ring_rx;
  529. u32 next_tail;
  530. int rc = 0;
  531. int headroom = ndev->type == ARPHRD_IEEE80211_RADIOTAP ?
  532. WIL6210_RTAP_SIZE : 0;
  533. for (; next_tail = wil_ring_next_tail(v),
  534. (next_tail != v->swhead) && (count-- > 0);
  535. v->swtail = next_tail) {
  536. rc = wil_vring_alloc_skb(wil, v, v->swtail, headroom);
  537. if (unlikely(rc)) {
  538. wil_err_ratelimited(wil, "Error %d in rx refill[%d]\n",
  539. rc, v->swtail);
  540. break;
  541. }
  542. }
  543. /* make sure all writes to descriptors (shared memory) are done before
  544. * committing them to HW
  545. */
  546. wmb();
  547. wil_w(wil, v->hwtail, v->swtail);
  548. return rc;
  549. }
  550. /**
  551. * reverse_memcmp - Compare two areas of memory, in reverse order
  552. * @cs: One area of memory
  553. * @ct: Another area of memory
  554. * @count: The size of the area.
  555. *
  556. * Cut'n'paste from original memcmp (see lib/string.c)
  557. * with minimal modifications
  558. */
  559. int reverse_memcmp(const void *cs, const void *ct, size_t count)
  560. {
  561. const unsigned char *su1, *su2;
  562. int res = 0;
  563. for (su1 = cs + count - 1, su2 = ct + count - 1; count > 0;
  564. --su1, --su2, count--) {
  565. res = *su1 - *su2;
  566. if (res)
  567. break;
  568. }
  569. return res;
  570. }
  571. static int wil_rx_crypto_check(struct wil6210_priv *wil, struct sk_buff *skb)
  572. {
  573. struct vring_rx_desc *d = wil_skb_rxdesc(skb);
  574. int cid = wil_skb_get_cid(skb);
  575. int tid = wil_rxdesc_tid(d);
  576. int key_id = wil_rxdesc_key_id(d);
  577. int mc = wil_rxdesc_mcast(d);
  578. struct wil_sta_info *s = &wil->sta[cid];
  579. struct wil_tid_crypto_rx *c = mc ? &s->group_crypto_rx :
  580. &s->tid_crypto_rx[tid];
  581. struct wil_tid_crypto_rx_single *cc = &c->key_id[key_id];
  582. const u8 *pn = (u8 *)&d->mac.pn;
  583. if (!cc->key_set) {
  584. wil_err_ratelimited(wil,
  585. "Key missing. CID %d TID %d MCast %d KEY_ID %d\n",
  586. cid, tid, mc, key_id);
  587. return -EINVAL;
  588. }
  589. if (reverse_memcmp(pn, cc->pn, IEEE80211_GCMP_PN_LEN) <= 0) {
  590. wil_err_ratelimited(wil,
  591. "Replay attack. CID %d TID %d MCast %d KEY_ID %d PN %6phN last %6phN\n",
  592. cid, tid, mc, key_id, pn, cc->pn);
  593. return -EINVAL;
  594. }
  595. memcpy(cc->pn, pn, IEEE80211_GCMP_PN_LEN);
  596. return 0;
  597. }
  598. static int wil_rx_error_check(struct wil6210_priv *wil, struct sk_buff *skb,
  599. struct wil_net_stats *stats)
  600. {
  601. struct vring_rx_desc *d = wil_skb_rxdesc(skb);
  602. if ((d->dma.status & RX_DMA_STATUS_ERROR) &&
  603. (d->dma.error & RX_DMA_ERROR_MIC)) {
  604. stats->rx_mic_error++;
  605. wil_dbg_txrx(wil, "MIC error, dropping packet\n");
  606. return -EFAULT;
  607. }
  608. return 0;
  609. }
  610. static void wil_get_netif_rx_params(struct sk_buff *skb, int *cid,
  611. int *security)
  612. {
  613. struct vring_rx_desc *d = wil_skb_rxdesc(skb);
  614. *cid = wil_skb_get_cid(skb);
  615. *security = wil_rxdesc_security(d);
  616. }
  617. /*
  618. * Check if skb is ptk eapol key message
  619. *
  620. * returns a pointer to the start of the eapol key structure, NULL
  621. * if frame is not PTK eapol key
  622. */
  623. static struct wil_eapol_key *wil_is_ptk_eapol_key(struct wil6210_priv *wil,
  624. struct sk_buff *skb)
  625. {
  626. u8 *buf;
  627. const struct wil_1x_hdr *hdr;
  628. struct wil_eapol_key *key;
  629. u16 key_info;
  630. int len = skb->len;
  631. if (!skb_mac_header_was_set(skb)) {
  632. wil_err(wil, "mac header was not set\n");
  633. return NULL;
  634. }
  635. len -= skb_mac_offset(skb);
  636. if (len < sizeof(struct ethhdr) + sizeof(struct wil_1x_hdr) +
  637. sizeof(struct wil_eapol_key))
  638. return NULL;
  639. buf = skb_mac_header(skb) + sizeof(struct ethhdr);
  640. hdr = (const struct wil_1x_hdr *)buf;
  641. if (hdr->type != WIL_1X_TYPE_EAPOL_KEY)
  642. return NULL;
  643. key = (struct wil_eapol_key *)(buf + sizeof(struct wil_1x_hdr));
  644. if (key->type != WIL_EAPOL_KEY_TYPE_WPA &&
  645. key->type != WIL_EAPOL_KEY_TYPE_RSN)
  646. return NULL;
  647. key_info = be16_to_cpu(key->key_info);
  648. if (!(key_info & WIL_KEY_INFO_KEY_TYPE)) /* check if pairwise */
  649. return NULL;
  650. return key;
  651. }
  652. static bool wil_skb_is_eap_3(struct wil6210_priv *wil, struct sk_buff *skb)
  653. {
  654. struct wil_eapol_key *key;
  655. u16 key_info;
  656. key = wil_is_ptk_eapol_key(wil, skb);
  657. if (!key)
  658. return false;
  659. key_info = be16_to_cpu(key->key_info);
  660. if (key_info & (WIL_KEY_INFO_MIC |
  661. WIL_KEY_INFO_ENCR_KEY_DATA)) {
  662. /* 3/4 of 4-Way Handshake */
  663. wil_dbg_misc(wil, "EAPOL key message 3\n");
  664. return true;
  665. }
  666. /* 1/4 of 4-Way Handshake */
  667. wil_dbg_misc(wil, "EAPOL key message 1\n");
  668. return false;
  669. }
  670. static bool wil_skb_is_eap_4(struct wil6210_priv *wil, struct sk_buff *skb)
  671. {
  672. struct wil_eapol_key *key;
  673. u32 *nonce, i;
  674. key = wil_is_ptk_eapol_key(wil, skb);
  675. if (!key)
  676. return false;
  677. nonce = (u32 *)key->key_nonce;
  678. for (i = 0; i < WIL_EAP_NONCE_LEN / sizeof(u32); i++, nonce++) {
  679. if (*nonce != 0) {
  680. /* message 2/4 */
  681. wil_dbg_misc(wil, "EAPOL key message 2\n");
  682. return false;
  683. }
  684. }
  685. wil_dbg_misc(wil, "EAPOL key message 4\n");
  686. return true;
  687. }
  688. void wil_enable_tx_key_worker(struct work_struct *work)
  689. {
  690. struct wil6210_vif *vif = container_of(work,
  691. struct wil6210_vif, enable_tx_key_worker);
  692. struct wil6210_priv *wil = vif_to_wil(vif);
  693. int rc, cid;
  694. rtnl_lock();
  695. if (vif->ptk_rekey_state != WIL_REKEY_WAIT_M4_SENT) {
  696. wil_dbg_misc(wil, "Invalid rekey state = %d\n",
  697. vif->ptk_rekey_state);
  698. rtnl_unlock();
  699. return;
  700. }
  701. cid = wil_find_cid_by_idx(wil, vif->mid, 0);
  702. if (!wil_cid_valid(wil, cid)) {
  703. wil_err(wil, "Invalid cid = %d\n", cid);
  704. rtnl_unlock();
  705. return;
  706. }
  707. wil_dbg_misc(wil, "Apply PTK key after eapol was sent out\n");
  708. rc = wmi_add_cipher_key(vif, 0, wil->sta[cid].addr, 0, NULL,
  709. WMI_KEY_USE_APPLY_PTK);
  710. vif->ptk_rekey_state = WIL_REKEY_IDLE;
  711. rtnl_unlock();
  712. if (rc)
  713. wil_err(wil, "Apply PTK key failed %d\n", rc);
  714. }
  715. void wil_tx_complete_handle_eapol(struct wil6210_vif *vif, struct sk_buff *skb)
  716. {
  717. struct wil6210_priv *wil = vif_to_wil(vif);
  718. struct wireless_dev *wdev = vif_to_wdev(vif);
  719. bool q = false;
  720. if (wdev->iftype != NL80211_IFTYPE_STATION ||
  721. !test_bit(WMI_FW_CAPABILITY_SPLIT_REKEY, wil->fw_capabilities))
  722. return;
  723. /* check if skb is an EAP message 4/4 */
  724. if (!wil_skb_is_eap_4(wil, skb))
  725. return;
  726. spin_lock_bh(&wil->eap_lock);
  727. switch (vif->ptk_rekey_state) {
  728. case WIL_REKEY_IDLE:
  729. /* ignore idle state, can happen due to M4 retransmission */
  730. break;
  731. case WIL_REKEY_M3_RECEIVED:
  732. vif->ptk_rekey_state = WIL_REKEY_IDLE;
  733. break;
  734. case WIL_REKEY_WAIT_M4_SENT:
  735. q = true;
  736. break;
  737. default:
  738. wil_err(wil, "Unknown rekey state = %d",
  739. vif->ptk_rekey_state);
  740. }
  741. spin_unlock_bh(&wil->eap_lock);
  742. if (q) {
  743. q = queue_work(wil->wmi_wq, &vif->enable_tx_key_worker);
  744. wil_dbg_misc(wil, "queue_work of enable_tx_key_worker -> %d\n",
  745. q);
  746. }
  747. }
  748. static void wil_rx_handle_eapol(struct wil6210_vif *vif, struct sk_buff *skb)
  749. {
  750. struct wil6210_priv *wil = vif_to_wil(vif);
  751. struct wireless_dev *wdev = vif_to_wdev(vif);
  752. if (wdev->iftype != NL80211_IFTYPE_STATION ||
  753. !test_bit(WMI_FW_CAPABILITY_SPLIT_REKEY, wil->fw_capabilities))
  754. return;
  755. /* check if skb is a EAP message 3/4 */
  756. if (!wil_skb_is_eap_3(wil, skb))
  757. return;
  758. if (vif->ptk_rekey_state == WIL_REKEY_IDLE)
  759. vif->ptk_rekey_state = WIL_REKEY_M3_RECEIVED;
  760. }
  761. /*
  762. * Pass Rx packet to the netif. Update statistics.
  763. * Called in softirq context (NAPI poll).
  764. */
  765. void wil_netif_rx(struct sk_buff *skb, struct net_device *ndev, int cid,
  766. struct wil_net_stats *stats, bool gro)
  767. {
  768. struct wil6210_vif *vif = ndev_to_vif(ndev);
  769. struct wil6210_priv *wil = ndev_to_wil(ndev);
  770. struct wireless_dev *wdev = vif_to_wdev(vif);
  771. unsigned int len = skb->len;
  772. u8 *sa, *da = wil_skb_get_da(skb);
  773. /* here looking for DA, not A1, thus Rxdesc's 'mcast' indication
  774. * is not suitable, need to look at data
  775. */
  776. int mcast = is_multicast_ether_addr(da);
  777. struct sk_buff *xmit_skb = NULL;
  778. if (wdev->iftype == NL80211_IFTYPE_STATION) {
  779. sa = wil_skb_get_sa(skb);
  780. if (mcast && ether_addr_equal(sa, ndev->dev_addr)) {
  781. /* mcast packet looped back to us */
  782. dev_kfree_skb(skb);
  783. ndev->stats.rx_dropped++;
  784. stats->rx_dropped++;
  785. wil_dbg_txrx(wil, "Rx drop %d bytes\n", len);
  786. return;
  787. }
  788. } else if (wdev->iftype == NL80211_IFTYPE_AP && !vif->ap_isolate &&
  789. /* pass EAPOL packets to local net stack only */
  790. (wil_skb_get_protocol(skb) != htons(ETH_P_PAE))) {
  791. if (mcast) {
  792. /* send multicast frames both to higher layers in
  793. * local net stack and back to the wireless medium
  794. */
  795. xmit_skb = skb_copy(skb, GFP_ATOMIC);
  796. } else {
  797. int xmit_cid = wil_find_cid(wil, vif->mid, da);
  798. if (xmit_cid >= 0) {
  799. /* The destination station is associated to
  800. * this AP (in this VLAN), so send the frame
  801. * directly to it and do not pass it to local
  802. * net stack.
  803. */
  804. xmit_skb = skb;
  805. skb = NULL;
  806. }
  807. }
  808. }
  809. if (xmit_skb) {
  810. /* Send to wireless media and increase priority by 256 to
  811. * keep the received priority instead of reclassifying
  812. * the frame (see cfg80211_classify8021d).
  813. */
  814. xmit_skb->dev = ndev;
  815. xmit_skb->priority += 256;
  816. xmit_skb->protocol = htons(ETH_P_802_3);
  817. skb_reset_network_header(xmit_skb);
  818. skb_reset_mac_header(xmit_skb);
  819. wil_dbg_txrx(wil, "Rx -> Tx %d bytes\n", len);
  820. dev_queue_xmit(xmit_skb);
  821. }
  822. if (skb) { /* deliver to local stack */
  823. skb->protocol = eth_type_trans(skb, ndev);
  824. skb->dev = ndev;
  825. if (skb->protocol == cpu_to_be16(ETH_P_PAE))
  826. wil_rx_handle_eapol(vif, skb);
  827. if (gro)
  828. napi_gro_receive(&wil->napi_rx, skb);
  829. else
  830. netif_rx(skb);
  831. }
  832. ndev->stats.rx_packets++;
  833. stats->rx_packets++;
  834. ndev->stats.rx_bytes += len;
  835. stats->rx_bytes += len;
  836. if (mcast)
  837. ndev->stats.multicast++;
  838. }
  839. void wil_netif_rx_any(struct sk_buff *skb, struct net_device *ndev)
  840. {
  841. int cid, security;
  842. struct wil6210_priv *wil = ndev_to_wil(ndev);
  843. struct wil6210_vif *vif = ndev_to_vif(ndev);
  844. struct wil_net_stats *stats;
  845. wil->txrx_ops.get_netif_rx_params(skb, &cid, &security);
  846. stats = &wil->sta[cid].stats;
  847. skb_orphan(skb);
  848. /* pass only EAPOL packets as plaintext */
  849. if (vif->privacy && !security &&
  850. wil_skb_get_protocol(skb) != htons(ETH_P_PAE)) {
  851. wil_dbg_txrx(wil,
  852. "Rx drop plaintext frame with %d bytes in secure network\n",
  853. skb->len);
  854. dev_kfree_skb(skb);
  855. ndev->stats.rx_dropped++;
  856. stats->rx_dropped++;
  857. return;
  858. }
  859. if (security && (wil->txrx_ops.rx_crypto_check(wil, skb) != 0)) {
  860. wil_dbg_txrx(wil, "Rx drop %d bytes\n", skb->len);
  861. dev_kfree_skb(skb);
  862. ndev->stats.rx_dropped++;
  863. stats->rx_replay++;
  864. stats->rx_dropped++;
  865. return;
  866. }
  867. /* check errors reported by HW and update statistics */
  868. if (unlikely(wil->txrx_ops.rx_error_check(wil, skb, stats))) {
  869. dev_kfree_skb(skb);
  870. return;
  871. }
  872. wil_netif_rx(skb, ndev, cid, stats, true);
  873. }
  874. /* Proceed all completed skb's from Rx VRING
  875. *
  876. * Safe to call from NAPI poll, i.e. softirq with interrupts enabled
  877. */
  878. void wil_rx_handle(struct wil6210_priv *wil, int *quota)
  879. {
  880. struct net_device *ndev = wil->main_ndev;
  881. struct wireless_dev *wdev = ndev->ieee80211_ptr;
  882. struct wil_ring *v = &wil->ring_rx;
  883. struct sk_buff *skb;
  884. if (unlikely(!v->va)) {
  885. wil_err(wil, "Rx IRQ while Rx not yet initialized\n");
  886. return;
  887. }
  888. wil_dbg_txrx(wil, "rx_handle\n");
  889. while ((*quota > 0) && (NULL != (skb = wil_vring_reap_rx(wil, v)))) {
  890. (*quota)--;
  891. /* monitor is currently supported on main interface only */
  892. if (wdev->iftype == NL80211_IFTYPE_MONITOR) {
  893. skb->dev = ndev;
  894. skb_reset_mac_header(skb);
  895. skb->ip_summed = CHECKSUM_UNNECESSARY;
  896. skb->pkt_type = PACKET_OTHERHOST;
  897. skb->protocol = htons(ETH_P_802_2);
  898. wil_netif_rx_any(skb, ndev);
  899. } else {
  900. wil_rx_reorder(wil, skb);
  901. }
  902. }
  903. wil_rx_refill(wil, v->size);
  904. }
  905. static void wil_rx_buf_len_init(struct wil6210_priv *wil)
  906. {
  907. wil->rx_buf_len = rx_large_buf ?
  908. WIL_MAX_ETH_MTU : TXRX_BUF_LEN_DEFAULT - WIL_MAX_MPDU_OVERHEAD;
  909. if (mtu_max > wil->rx_buf_len) {
  910. /* do not allow RX buffers to be smaller than mtu_max, for
  911. * backward compatibility (mtu_max parameter was also used
  912. * to support receiving large packets)
  913. */
  914. wil_info(wil, "Override RX buffer to mtu_max(%d)\n", mtu_max);
  915. wil->rx_buf_len = mtu_max;
  916. }
  917. }
  918. static int wil_rx_init(struct wil6210_priv *wil, uint order)
  919. {
  920. struct wil_ring *vring = &wil->ring_rx;
  921. int rc;
  922. wil_dbg_misc(wil, "rx_init\n");
  923. if (vring->va) {
  924. wil_err(wil, "Rx ring already allocated\n");
  925. return -EINVAL;
  926. }
  927. wil_rx_buf_len_init(wil);
  928. vring->size = 1 << order;
  929. vring->is_rx = true;
  930. rc = wil_vring_alloc(wil, vring);
  931. if (rc)
  932. return rc;
  933. rc = wmi_rx_chain_add(wil, vring);
  934. if (rc)
  935. goto err_free;
  936. rc = wil_rx_refill(wil, vring->size);
  937. if (rc)
  938. goto err_free;
  939. return 0;
  940. err_free:
  941. wil_vring_free(wil, vring);
  942. return rc;
  943. }
  944. static void wil_rx_fini(struct wil6210_priv *wil)
  945. {
  946. struct wil_ring *vring = &wil->ring_rx;
  947. wil_dbg_misc(wil, "rx_fini\n");
  948. if (vring->va)
  949. wil_vring_free(wil, vring);
  950. }
  951. static int wil_tx_desc_map(union wil_tx_desc *desc, dma_addr_t pa,
  952. u32 len, int vring_index)
  953. {
  954. struct vring_tx_desc *d = &desc->legacy;
  955. wil_desc_addr_set(&d->dma.addr, pa);
  956. d->dma.ip_length = 0;
  957. /* 0..6: mac_length; 7:ip_version 0-IP6 1-IP4*/
  958. d->dma.b11 = 0/*14 | BIT(7)*/;
  959. d->dma.error = 0;
  960. d->dma.status = 0; /* BIT(0) should be 0 for HW_OWNED */
  961. d->dma.length = cpu_to_le16((u16)len);
  962. d->dma.d0 = (vring_index << DMA_CFG_DESC_TX_0_QID_POS);
  963. d->mac.d[0] = 0;
  964. d->mac.d[1] = 0;
  965. d->mac.d[2] = 0;
  966. d->mac.ucode_cmd = 0;
  967. /* translation type: 0 - bypass; 1 - 802.3; 2 - native wifi */
  968. d->mac.d[2] = BIT(MAC_CFG_DESC_TX_2_SNAP_HDR_INSERTION_EN_POS) |
  969. (1 << MAC_CFG_DESC_TX_2_L2_TRANSLATION_TYPE_POS);
  970. return 0;
  971. }
  972. void wil_tx_data_init(struct wil_ring_tx_data *txdata)
  973. {
  974. spin_lock_bh(&txdata->lock);
  975. txdata->dot1x_open = false;
  976. txdata->enabled = 0;
  977. txdata->idle = 0;
  978. txdata->last_idle = 0;
  979. txdata->begin = 0;
  980. txdata->agg_wsize = 0;
  981. txdata->agg_timeout = 0;
  982. txdata->agg_amsdu = 0;
  983. txdata->addba_in_progress = false;
  984. txdata->mid = U8_MAX;
  985. spin_unlock_bh(&txdata->lock);
  986. }
  987. static int wil_vring_init_tx(struct wil6210_vif *vif, int id, int size,
  988. int cid, int tid)
  989. {
  990. struct wil6210_priv *wil = vif_to_wil(vif);
  991. int rc;
  992. struct wmi_vring_cfg_cmd cmd = {
  993. .action = cpu_to_le32(WMI_VRING_CMD_ADD),
  994. .vring_cfg = {
  995. .tx_sw_ring = {
  996. .max_mpdu_size =
  997. cpu_to_le16(wil_mtu2macbuf(mtu_max)),
  998. .ring_size = cpu_to_le16(size),
  999. },
  1000. .ringid = id,
  1001. .encap_trans_type = WMI_VRING_ENC_TYPE_802_3,
  1002. .mac_ctrl = 0,
  1003. .to_resolution = 0,
  1004. .agg_max_wsize = 0,
  1005. .schd_params = {
  1006. .priority = cpu_to_le16(0),
  1007. .timeslot_us = cpu_to_le16(0xfff),
  1008. },
  1009. },
  1010. };
  1011. struct {
  1012. struct wmi_cmd_hdr wmi;
  1013. struct wmi_vring_cfg_done_event cmd;
  1014. } __packed reply = {
  1015. .cmd = {.status = WMI_FW_STATUS_FAILURE},
  1016. };
  1017. struct wil_ring *vring = &wil->ring_tx[id];
  1018. struct wil_ring_tx_data *txdata = &wil->ring_tx_data[id];
  1019. if (cid >= WIL6210_RX_DESC_MAX_CID) {
  1020. cmd.vring_cfg.cidxtid = CIDXTID_EXTENDED_CID_TID;
  1021. cmd.vring_cfg.cid = cid;
  1022. cmd.vring_cfg.tid = tid;
  1023. } else {
  1024. cmd.vring_cfg.cidxtid = mk_cidxtid(cid, tid);
  1025. }
  1026. wil_dbg_misc(wil, "vring_init_tx: max_mpdu_size %d\n",
  1027. cmd.vring_cfg.tx_sw_ring.max_mpdu_size);
  1028. lockdep_assert_held(&wil->mutex);
  1029. if (vring->va) {
  1030. wil_err(wil, "Tx ring [%d] already allocated\n", id);
  1031. rc = -EINVAL;
  1032. goto out;
  1033. }
  1034. wil_tx_data_init(txdata);
  1035. vring->is_rx = false;
  1036. vring->size = size;
  1037. rc = wil_vring_alloc(wil, vring);
  1038. if (rc)
  1039. goto out;
  1040. wil->ring2cid_tid[id][0] = cid;
  1041. wil->ring2cid_tid[id][1] = tid;
  1042. cmd.vring_cfg.tx_sw_ring.ring_mem_base = cpu_to_le64(vring->pa);
  1043. if (!vif->privacy)
  1044. txdata->dot1x_open = true;
  1045. rc = wmi_call(wil, WMI_VRING_CFG_CMDID, vif->mid, &cmd, sizeof(cmd),
  1046. WMI_VRING_CFG_DONE_EVENTID, &reply, sizeof(reply),
  1047. WIL_WMI_CALL_GENERAL_TO_MS);
  1048. if (rc)
  1049. goto out_free;
  1050. if (reply.cmd.status != WMI_FW_STATUS_SUCCESS) {
  1051. wil_err(wil, "Tx config failed, status 0x%02x\n",
  1052. reply.cmd.status);
  1053. rc = -EINVAL;
  1054. goto out_free;
  1055. }
  1056. spin_lock_bh(&txdata->lock);
  1057. vring->hwtail = le32_to_cpu(reply.cmd.tx_vring_tail_ptr);
  1058. txdata->mid = vif->mid;
  1059. txdata->enabled = 1;
  1060. spin_unlock_bh(&txdata->lock);
  1061. if (txdata->dot1x_open && (agg_wsize >= 0))
  1062. wil_addba_tx_request(wil, id, agg_wsize);
  1063. return 0;
  1064. out_free:
  1065. spin_lock_bh(&txdata->lock);
  1066. txdata->dot1x_open = false;
  1067. txdata->enabled = 0;
  1068. spin_unlock_bh(&txdata->lock);
  1069. wil_vring_free(wil, vring);
  1070. wil->ring2cid_tid[id][0] = wil->max_assoc_sta;
  1071. wil->ring2cid_tid[id][1] = 0;
  1072. out:
  1073. return rc;
  1074. }
  1075. static int wil_tx_vring_modify(struct wil6210_vif *vif, int ring_id, int cid,
  1076. int tid)
  1077. {
  1078. struct wil6210_priv *wil = vif_to_wil(vif);
  1079. int rc;
  1080. struct wmi_vring_cfg_cmd cmd = {
  1081. .action = cpu_to_le32(WMI_VRING_CMD_MODIFY),
  1082. .vring_cfg = {
  1083. .tx_sw_ring = {
  1084. .max_mpdu_size =
  1085. cpu_to_le16(wil_mtu2macbuf(mtu_max)),
  1086. .ring_size = 0,
  1087. },
  1088. .ringid = ring_id,
  1089. .cidxtid = mk_cidxtid(cid, tid),
  1090. .encap_trans_type = WMI_VRING_ENC_TYPE_802_3,
  1091. .mac_ctrl = 0,
  1092. .to_resolution = 0,
  1093. .agg_max_wsize = 0,
  1094. .schd_params = {
  1095. .priority = cpu_to_le16(0),
  1096. .timeslot_us = cpu_to_le16(0xfff),
  1097. },
  1098. },
  1099. };
  1100. struct {
  1101. struct wmi_cmd_hdr wmi;
  1102. struct wmi_vring_cfg_done_event cmd;
  1103. } __packed reply = {
  1104. .cmd = {.status = WMI_FW_STATUS_FAILURE},
  1105. };
  1106. struct wil_ring *vring = &wil->ring_tx[ring_id];
  1107. struct wil_ring_tx_data *txdata = &wil->ring_tx_data[ring_id];
  1108. wil_dbg_misc(wil, "vring_modify: ring %d cid %d tid %d\n", ring_id,
  1109. cid, tid);
  1110. lockdep_assert_held(&wil->mutex);
  1111. if (!vring->va) {
  1112. wil_err(wil, "Tx ring [%d] not allocated\n", ring_id);
  1113. return -EINVAL;
  1114. }
  1115. if (wil->ring2cid_tid[ring_id][0] != cid ||
  1116. wil->ring2cid_tid[ring_id][1] != tid) {
  1117. wil_err(wil, "ring info does not match cid=%u tid=%u\n",
  1118. wil->ring2cid_tid[ring_id][0],
  1119. wil->ring2cid_tid[ring_id][1]);
  1120. }
  1121. cmd.vring_cfg.tx_sw_ring.ring_mem_base = cpu_to_le64(vring->pa);
  1122. rc = wmi_call(wil, WMI_VRING_CFG_CMDID, vif->mid, &cmd, sizeof(cmd),
  1123. WMI_VRING_CFG_DONE_EVENTID, &reply, sizeof(reply),
  1124. WIL_WMI_CALL_GENERAL_TO_MS);
  1125. if (rc)
  1126. goto fail;
  1127. if (reply.cmd.status != WMI_FW_STATUS_SUCCESS) {
  1128. wil_err(wil, "Tx modify failed, status 0x%02x\n",
  1129. reply.cmd.status);
  1130. rc = -EINVAL;
  1131. goto fail;
  1132. }
  1133. /* set BA aggregation window size to 0 to force a new BA with the
  1134. * new AP
  1135. */
  1136. txdata->agg_wsize = 0;
  1137. if (txdata->dot1x_open && agg_wsize >= 0)
  1138. wil_addba_tx_request(wil, ring_id, agg_wsize);
  1139. return 0;
  1140. fail:
  1141. spin_lock_bh(&txdata->lock);
  1142. txdata->dot1x_open = false;
  1143. txdata->enabled = 0;
  1144. spin_unlock_bh(&txdata->lock);
  1145. wil->ring2cid_tid[ring_id][0] = wil->max_assoc_sta;
  1146. wil->ring2cid_tid[ring_id][1] = 0;
  1147. return rc;
  1148. }
  1149. int wil_vring_init_bcast(struct wil6210_vif *vif, int id, int size)
  1150. {
  1151. struct wil6210_priv *wil = vif_to_wil(vif);
  1152. int rc;
  1153. struct wmi_bcast_vring_cfg_cmd cmd = {
  1154. .action = cpu_to_le32(WMI_VRING_CMD_ADD),
  1155. .vring_cfg = {
  1156. .tx_sw_ring = {
  1157. .max_mpdu_size =
  1158. cpu_to_le16(wil_mtu2macbuf(mtu_max)),
  1159. .ring_size = cpu_to_le16(size),
  1160. },
  1161. .ringid = id,
  1162. .encap_trans_type = WMI_VRING_ENC_TYPE_802_3,
  1163. },
  1164. };
  1165. struct {
  1166. struct wmi_cmd_hdr wmi;
  1167. struct wmi_vring_cfg_done_event cmd;
  1168. } __packed reply = {
  1169. .cmd = {.status = WMI_FW_STATUS_FAILURE},
  1170. };
  1171. struct wil_ring *vring = &wil->ring_tx[id];
  1172. struct wil_ring_tx_data *txdata = &wil->ring_tx_data[id];
  1173. wil_dbg_misc(wil, "vring_init_bcast: max_mpdu_size %d\n",
  1174. cmd.vring_cfg.tx_sw_ring.max_mpdu_size);
  1175. lockdep_assert_held(&wil->mutex);
  1176. if (vring->va) {
  1177. wil_err(wil, "Tx ring [%d] already allocated\n", id);
  1178. rc = -EINVAL;
  1179. goto out;
  1180. }
  1181. wil_tx_data_init(txdata);
  1182. vring->is_rx = false;
  1183. vring->size = size;
  1184. rc = wil_vring_alloc(wil, vring);
  1185. if (rc)
  1186. goto out;
  1187. wil->ring2cid_tid[id][0] = wil->max_assoc_sta; /* CID */
  1188. wil->ring2cid_tid[id][1] = 0; /* TID */
  1189. cmd.vring_cfg.tx_sw_ring.ring_mem_base = cpu_to_le64(vring->pa);
  1190. if (!vif->privacy)
  1191. txdata->dot1x_open = true;
  1192. rc = wmi_call(wil, WMI_BCAST_VRING_CFG_CMDID, vif->mid,
  1193. &cmd, sizeof(cmd),
  1194. WMI_VRING_CFG_DONE_EVENTID, &reply, sizeof(reply),
  1195. WIL_WMI_CALL_GENERAL_TO_MS);
  1196. if (rc)
  1197. goto out_free;
  1198. if (reply.cmd.status != WMI_FW_STATUS_SUCCESS) {
  1199. wil_err(wil, "Tx config failed, status 0x%02x\n",
  1200. reply.cmd.status);
  1201. rc = -EINVAL;
  1202. goto out_free;
  1203. }
  1204. spin_lock_bh(&txdata->lock);
  1205. vring->hwtail = le32_to_cpu(reply.cmd.tx_vring_tail_ptr);
  1206. txdata->mid = vif->mid;
  1207. txdata->enabled = 1;
  1208. spin_unlock_bh(&txdata->lock);
  1209. return 0;
  1210. out_free:
  1211. spin_lock_bh(&txdata->lock);
  1212. txdata->enabled = 0;
  1213. txdata->dot1x_open = false;
  1214. spin_unlock_bh(&txdata->lock);
  1215. wil_vring_free(wil, vring);
  1216. out:
  1217. return rc;
  1218. }
  1219. static struct wil_ring *wil_find_tx_ucast(struct wil6210_priv *wil,
  1220. struct wil6210_vif *vif,
  1221. struct sk_buff *skb)
  1222. {
  1223. int i, cid;
  1224. const u8 *da = wil_skb_get_da(skb);
  1225. int min_ring_id = wil_get_min_tx_ring_id(wil);
  1226. cid = wil_find_cid(wil, vif->mid, da);
  1227. if (cid < 0 || cid >= wil->max_assoc_sta)
  1228. return NULL;
  1229. /* TODO: fix for multiple TID */
  1230. for (i = min_ring_id; i < ARRAY_SIZE(wil->ring2cid_tid); i++) {
  1231. if (!wil->ring_tx_data[i].dot1x_open &&
  1232. skb->protocol != cpu_to_be16(ETH_P_PAE))
  1233. continue;
  1234. if (wil->ring2cid_tid[i][0] == cid) {
  1235. struct wil_ring *v = &wil->ring_tx[i];
  1236. struct wil_ring_tx_data *txdata = &wil->ring_tx_data[i];
  1237. wil_dbg_txrx(wil, "find_tx_ucast: (%pM) -> [%d]\n",
  1238. da, i);
  1239. if (v->va && txdata->enabled) {
  1240. return v;
  1241. } else {
  1242. wil_dbg_txrx(wil,
  1243. "find_tx_ucast: vring[%d] not valid\n",
  1244. i);
  1245. return NULL;
  1246. }
  1247. }
  1248. }
  1249. return NULL;
  1250. }
  1251. static int wil_tx_ring(struct wil6210_priv *wil, struct wil6210_vif *vif,
  1252. struct wil_ring *ring, struct sk_buff *skb);
  1253. static struct wil_ring *wil_find_tx_ring_sta(struct wil6210_priv *wil,
  1254. struct wil6210_vif *vif,
  1255. struct sk_buff *skb)
  1256. {
  1257. struct wil_ring *ring;
  1258. int i;
  1259. u8 cid;
  1260. struct wil_ring_tx_data *txdata;
  1261. int min_ring_id = wil_get_min_tx_ring_id(wil);
  1262. /* In the STA mode, it is expected to have only 1 VRING
  1263. * for the AP we connected to.
  1264. * find 1-st vring eligible for this skb and use it.
  1265. */
  1266. for (i = min_ring_id; i < WIL6210_MAX_TX_RINGS; i++) {
  1267. ring = &wil->ring_tx[i];
  1268. txdata = &wil->ring_tx_data[i];
  1269. if (!ring->va || !txdata->enabled || txdata->mid != vif->mid)
  1270. continue;
  1271. cid = wil->ring2cid_tid[i][0];
  1272. if (cid >= wil->max_assoc_sta) /* skip BCAST */
  1273. continue;
  1274. if (!wil->ring_tx_data[i].dot1x_open &&
  1275. skb->protocol != cpu_to_be16(ETH_P_PAE))
  1276. continue;
  1277. wil_dbg_txrx(wil, "Tx -> ring %d\n", i);
  1278. return ring;
  1279. }
  1280. wil_dbg_txrx(wil, "Tx while no rings active?\n");
  1281. return NULL;
  1282. }
  1283. /* Use one of 2 strategies:
  1284. *
  1285. * 1. New (real broadcast):
  1286. * use dedicated broadcast vring
  1287. * 2. Old (pseudo-DMS):
  1288. * Find 1-st vring and return it;
  1289. * duplicate skb and send it to other active vrings;
  1290. * in all cases override dest address to unicast peer's address
  1291. * Use old strategy when new is not supported yet:
  1292. * - for PBSS
  1293. */
  1294. static struct wil_ring *wil_find_tx_bcast_1(struct wil6210_priv *wil,
  1295. struct wil6210_vif *vif,
  1296. struct sk_buff *skb)
  1297. {
  1298. struct wil_ring *v;
  1299. struct wil_ring_tx_data *txdata;
  1300. int i = vif->bcast_ring;
  1301. if (i < 0)
  1302. return NULL;
  1303. v = &wil->ring_tx[i];
  1304. txdata = &wil->ring_tx_data[i];
  1305. if (!v->va || !txdata->enabled)
  1306. return NULL;
  1307. if (!wil->ring_tx_data[i].dot1x_open &&
  1308. skb->protocol != cpu_to_be16(ETH_P_PAE))
  1309. return NULL;
  1310. return v;
  1311. }
  1312. /* apply multicast to unicast only for ARP and IP packets
  1313. * (see NL80211_CMD_SET_MULTICAST_TO_UNICAST for more info)
  1314. */
  1315. static bool wil_check_multicast_to_unicast(struct wil6210_priv *wil,
  1316. struct sk_buff *skb)
  1317. {
  1318. const struct ethhdr *eth = (void *)skb->data;
  1319. const struct vlan_ethhdr *ethvlan = (void *)skb->data;
  1320. __be16 ethertype;
  1321. if (!wil->multicast_to_unicast)
  1322. return false;
  1323. /* multicast to unicast conversion only for some payload */
  1324. ethertype = eth->h_proto;
  1325. if (ethertype == htons(ETH_P_8021Q) && skb->len >= VLAN_ETH_HLEN)
  1326. ethertype = ethvlan->h_vlan_encapsulated_proto;
  1327. switch (ethertype) {
  1328. case htons(ETH_P_ARP):
  1329. case htons(ETH_P_IP):
  1330. case htons(ETH_P_IPV6):
  1331. break;
  1332. default:
  1333. return false;
  1334. }
  1335. return true;
  1336. }
  1337. static void wil_set_da_for_vring(struct wil6210_priv *wil,
  1338. struct sk_buff *skb, int vring_index)
  1339. {
  1340. u8 *da = wil_skb_get_da(skb);
  1341. int cid = wil->ring2cid_tid[vring_index][0];
  1342. ether_addr_copy(da, wil->sta[cid].addr);
  1343. }
  1344. static struct wil_ring *wil_find_tx_bcast_2(struct wil6210_priv *wil,
  1345. struct wil6210_vif *vif,
  1346. struct sk_buff *skb)
  1347. {
  1348. struct wil_ring *v, *v2;
  1349. struct sk_buff *skb2;
  1350. int i;
  1351. u8 cid;
  1352. const u8 *src = wil_skb_get_sa(skb);
  1353. struct wil_ring_tx_data *txdata, *txdata2;
  1354. int min_ring_id = wil_get_min_tx_ring_id(wil);
  1355. /* find 1-st vring eligible for data */
  1356. for (i = min_ring_id; i < WIL6210_MAX_TX_RINGS; i++) {
  1357. v = &wil->ring_tx[i];
  1358. txdata = &wil->ring_tx_data[i];
  1359. if (!v->va || !txdata->enabled || txdata->mid != vif->mid)
  1360. continue;
  1361. cid = wil->ring2cid_tid[i][0];
  1362. if (cid >= wil->max_assoc_sta) /* skip BCAST */
  1363. continue;
  1364. if (!wil->ring_tx_data[i].dot1x_open &&
  1365. skb->protocol != cpu_to_be16(ETH_P_PAE))
  1366. continue;
  1367. /* don't Tx back to source when re-routing Rx->Tx at the AP */
  1368. if (0 == memcmp(wil->sta[cid].addr, src, ETH_ALEN))
  1369. continue;
  1370. goto found;
  1371. }
  1372. wil_dbg_txrx(wil, "Tx while no vrings active?\n");
  1373. return NULL;
  1374. found:
  1375. wil_dbg_txrx(wil, "BCAST -> ring %d\n", i);
  1376. wil_set_da_for_vring(wil, skb, i);
  1377. /* find other active vrings and duplicate skb for each */
  1378. for (i++; i < WIL6210_MAX_TX_RINGS; i++) {
  1379. v2 = &wil->ring_tx[i];
  1380. txdata2 = &wil->ring_tx_data[i];
  1381. if (!v2->va || txdata2->mid != vif->mid)
  1382. continue;
  1383. cid = wil->ring2cid_tid[i][0];
  1384. if (cid >= wil->max_assoc_sta) /* skip BCAST */
  1385. continue;
  1386. if (!wil->ring_tx_data[i].dot1x_open &&
  1387. skb->protocol != cpu_to_be16(ETH_P_PAE))
  1388. continue;
  1389. if (0 == memcmp(wil->sta[cid].addr, src, ETH_ALEN))
  1390. continue;
  1391. skb2 = skb_copy(skb, GFP_ATOMIC);
  1392. if (skb2) {
  1393. wil_dbg_txrx(wil, "BCAST DUP -> ring %d\n", i);
  1394. wil_set_da_for_vring(wil, skb2, i);
  1395. wil_tx_ring(wil, vif, v2, skb2);
  1396. /* successful call to wil_tx_ring takes skb2 ref */
  1397. dev_kfree_skb_any(skb2);
  1398. } else {
  1399. wil_err(wil, "skb_copy failed\n");
  1400. }
  1401. }
  1402. return v;
  1403. }
  1404. static inline
  1405. void wil_tx_desc_set_nr_frags(struct vring_tx_desc *d, int nr_frags)
  1406. {
  1407. d->mac.d[2] |= (nr_frags << MAC_CFG_DESC_TX_2_NUM_OF_DESCRIPTORS_POS);
  1408. }
  1409. /* Sets the descriptor @d up for csum and/or TSO offloading. The corresponding
  1410. * @skb is used to obtain the protocol and headers length.
  1411. * @tso_desc_type is a descriptor type for TSO: 0 - a header, 1 - first data,
  1412. * 2 - middle, 3 - last descriptor.
  1413. */
  1414. static void wil_tx_desc_offload_setup_tso(struct vring_tx_desc *d,
  1415. struct sk_buff *skb,
  1416. int tso_desc_type, bool is_ipv4,
  1417. int tcp_hdr_len, int skb_net_hdr_len)
  1418. {
  1419. d->dma.b11 = ETH_HLEN; /* MAC header length */
  1420. d->dma.b11 |= is_ipv4 << DMA_CFG_DESC_TX_OFFLOAD_CFG_L3T_IPV4_POS;
  1421. d->dma.d0 |= (2 << DMA_CFG_DESC_TX_0_L4_TYPE_POS);
  1422. /* L4 header len: TCP header length */
  1423. d->dma.d0 |= (tcp_hdr_len & DMA_CFG_DESC_TX_0_L4_LENGTH_MSK);
  1424. /* Setup TSO: bit and desc type */
  1425. d->dma.d0 |= (BIT(DMA_CFG_DESC_TX_0_TCP_SEG_EN_POS)) |
  1426. (tso_desc_type << DMA_CFG_DESC_TX_0_SEGMENT_BUF_DETAILS_POS);
  1427. d->dma.d0 |= (is_ipv4 << DMA_CFG_DESC_TX_0_IPV4_CHECKSUM_EN_POS);
  1428. d->dma.ip_length = skb_net_hdr_len;
  1429. /* Enable TCP/UDP checksum */
  1430. d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_TCP_UDP_CHECKSUM_EN_POS);
  1431. /* Calculate pseudo-header */
  1432. d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_PSEUDO_HEADER_CALC_EN_POS);
  1433. }
  1434. /* Sets the descriptor @d up for csum. The corresponding
  1435. * @skb is used to obtain the protocol and headers length.
  1436. * Returns the protocol: 0 - not TCP, 1 - TCPv4, 2 - TCPv6.
  1437. * Note, if d==NULL, the function only returns the protocol result.
  1438. *
  1439. * It is very similar to previous wil_tx_desc_offload_setup_tso. This
  1440. * is "if unrolling" to optimize the critical path.
  1441. */
  1442. static int wil_tx_desc_offload_setup(struct vring_tx_desc *d,
  1443. struct sk_buff *skb){
  1444. int protocol;
  1445. if (skb->ip_summed != CHECKSUM_PARTIAL)
  1446. return 0;
  1447. d->dma.b11 = ETH_HLEN; /* MAC header length */
  1448. switch (skb->protocol) {
  1449. case cpu_to_be16(ETH_P_IP):
  1450. protocol = ip_hdr(skb)->protocol;
  1451. d->dma.b11 |= BIT(DMA_CFG_DESC_TX_OFFLOAD_CFG_L3T_IPV4_POS);
  1452. break;
  1453. case cpu_to_be16(ETH_P_IPV6):
  1454. protocol = ipv6_hdr(skb)->nexthdr;
  1455. break;
  1456. default:
  1457. return -EINVAL;
  1458. }
  1459. switch (protocol) {
  1460. case IPPROTO_TCP:
  1461. d->dma.d0 |= (2 << DMA_CFG_DESC_TX_0_L4_TYPE_POS);
  1462. /* L4 header len: TCP header length */
  1463. d->dma.d0 |=
  1464. (tcp_hdrlen(skb) & DMA_CFG_DESC_TX_0_L4_LENGTH_MSK);
  1465. break;
  1466. case IPPROTO_UDP:
  1467. /* L4 header len: UDP header length */
  1468. d->dma.d0 |=
  1469. (sizeof(struct udphdr) & DMA_CFG_DESC_TX_0_L4_LENGTH_MSK);
  1470. break;
  1471. default:
  1472. return -EINVAL;
  1473. }
  1474. d->dma.ip_length = skb_network_header_len(skb);
  1475. /* Enable TCP/UDP checksum */
  1476. d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_TCP_UDP_CHECKSUM_EN_POS);
  1477. /* Calculate pseudo-header */
  1478. d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_PSEUDO_HEADER_CALC_EN_POS);
  1479. return 0;
  1480. }
  1481. static inline void wil_tx_last_desc(struct vring_tx_desc *d)
  1482. {
  1483. d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_CMD_EOP_POS) |
  1484. BIT(DMA_CFG_DESC_TX_0_CMD_MARK_WB_POS) |
  1485. BIT(DMA_CFG_DESC_TX_0_CMD_DMA_IT_POS);
  1486. }
  1487. static inline void wil_set_tx_desc_last_tso(volatile struct vring_tx_desc *d)
  1488. {
  1489. d->dma.d0 |= wil_tso_type_lst <<
  1490. DMA_CFG_DESC_TX_0_SEGMENT_BUF_DETAILS_POS;
  1491. }
  1492. static int __wil_tx_vring_tso(struct wil6210_priv *wil, struct wil6210_vif *vif,
  1493. struct wil_ring *vring, struct sk_buff *skb)
  1494. {
  1495. struct device *dev = wil_to_dev(wil);
  1496. /* point to descriptors in shared memory */
  1497. volatile struct vring_tx_desc *_desc = NULL, *_hdr_desc,
  1498. *_first_desc = NULL;
  1499. /* pointers to shadow descriptors */
  1500. struct vring_tx_desc desc_mem, hdr_desc_mem, first_desc_mem,
  1501. *d = &hdr_desc_mem, *hdr_desc = &hdr_desc_mem,
  1502. *first_desc = &first_desc_mem;
  1503. /* pointer to shadow descriptors' context */
  1504. struct wil_ctx *hdr_ctx, *first_ctx = NULL;
  1505. int descs_used = 0; /* total number of used descriptors */
  1506. int sg_desc_cnt = 0; /* number of descriptors for current mss*/
  1507. u32 swhead = vring->swhead;
  1508. int used, avail = wil_ring_avail_tx(vring);
  1509. int nr_frags = skb_shinfo(skb)->nr_frags;
  1510. int min_desc_required = nr_frags + 1;
  1511. int mss = skb_shinfo(skb)->gso_size; /* payload size w/o headers */
  1512. int f, len, hdrlen, headlen;
  1513. int vring_index = vring - wil->ring_tx;
  1514. struct wil_ring_tx_data *txdata = &wil->ring_tx_data[vring_index];
  1515. uint i = swhead;
  1516. dma_addr_t pa;
  1517. const skb_frag_t *frag = NULL;
  1518. int rem_data = mss;
  1519. int lenmss;
  1520. int hdr_compensation_need = true;
  1521. int desc_tso_type = wil_tso_type_first;
  1522. bool is_ipv4;
  1523. int tcp_hdr_len;
  1524. int skb_net_hdr_len;
  1525. int gso_type;
  1526. int rc = -EINVAL;
  1527. wil_dbg_txrx(wil, "tx_vring_tso: %d bytes to vring %d\n", skb->len,
  1528. vring_index);
  1529. if (unlikely(!txdata->enabled))
  1530. return -EINVAL;
  1531. /* A typical page 4K is 3-4 payloads, we assume each fragment
  1532. * is a full payload, that's how min_desc_required has been
  1533. * calculated. In real we might need more or less descriptors,
  1534. * this is the initial check only.
  1535. */
  1536. if (unlikely(avail < min_desc_required)) {
  1537. wil_err_ratelimited(wil,
  1538. "TSO: Tx ring[%2d] full. No space for %d fragments\n",
  1539. vring_index, min_desc_required);
  1540. return -ENOMEM;
  1541. }
  1542. /* Header Length = MAC header len + IP header len + TCP header len*/
  1543. hdrlen = skb_tcp_all_headers(skb);
  1544. gso_type = skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV6 | SKB_GSO_TCPV4);
  1545. switch (gso_type) {
  1546. case SKB_GSO_TCPV4:
  1547. /* TCP v4, zero out the IP length and IPv4 checksum fields
  1548. * as required by the offloading doc
  1549. */
  1550. ip_hdr(skb)->tot_len = 0;
  1551. ip_hdr(skb)->check = 0;
  1552. is_ipv4 = true;
  1553. break;
  1554. case SKB_GSO_TCPV6:
  1555. /* TCP v6, zero out the payload length */
  1556. ipv6_hdr(skb)->payload_len = 0;
  1557. is_ipv4 = false;
  1558. break;
  1559. default:
  1560. /* other than TCPv4 or TCPv6 types are not supported for TSO.
  1561. * It is also illegal for both to be set simultaneously
  1562. */
  1563. return -EINVAL;
  1564. }
  1565. if (skb->ip_summed != CHECKSUM_PARTIAL)
  1566. return -EINVAL;
  1567. /* tcp header length and skb network header length are fixed for all
  1568. * packet's descriptors - read then once here
  1569. */
  1570. tcp_hdr_len = tcp_hdrlen(skb);
  1571. skb_net_hdr_len = skb_network_header_len(skb);
  1572. _hdr_desc = &vring->va[i].tx.legacy;
  1573. pa = dma_map_single(dev, skb->data, hdrlen, DMA_TO_DEVICE);
  1574. if (unlikely(dma_mapping_error(dev, pa))) {
  1575. wil_err(wil, "TSO: Skb head DMA map error\n");
  1576. goto err_exit;
  1577. }
  1578. wil->txrx_ops.tx_desc_map((union wil_tx_desc *)hdr_desc, pa,
  1579. hdrlen, vring_index);
  1580. wil_tx_desc_offload_setup_tso(hdr_desc, skb, wil_tso_type_hdr, is_ipv4,
  1581. tcp_hdr_len, skb_net_hdr_len);
  1582. wil_tx_last_desc(hdr_desc);
  1583. vring->ctx[i].mapped_as = wil_mapped_as_single;
  1584. hdr_ctx = &vring->ctx[i];
  1585. descs_used++;
  1586. headlen = skb_headlen(skb) - hdrlen;
  1587. for (f = headlen ? -1 : 0; f < nr_frags; f++) {
  1588. if (headlen) {
  1589. len = headlen;
  1590. wil_dbg_txrx(wil, "TSO: process skb head, len %u\n",
  1591. len);
  1592. } else {
  1593. frag = &skb_shinfo(skb)->frags[f];
  1594. len = skb_frag_size(frag);
  1595. wil_dbg_txrx(wil, "TSO: frag[%d]: len %u\n", f, len);
  1596. }
  1597. while (len) {
  1598. wil_dbg_txrx(wil,
  1599. "TSO: len %d, rem_data %d, descs_used %d\n",
  1600. len, rem_data, descs_used);
  1601. if (descs_used == avail) {
  1602. wil_err_ratelimited(wil, "TSO: ring overflow\n");
  1603. rc = -ENOMEM;
  1604. goto mem_error;
  1605. }
  1606. lenmss = min_t(int, rem_data, len);
  1607. i = (swhead + descs_used) % vring->size;
  1608. wil_dbg_txrx(wil, "TSO: lenmss %d, i %d\n", lenmss, i);
  1609. if (!headlen) {
  1610. pa = skb_frag_dma_map(dev, frag,
  1611. skb_frag_size(frag) - len,
  1612. lenmss, DMA_TO_DEVICE);
  1613. vring->ctx[i].mapped_as = wil_mapped_as_page;
  1614. } else {
  1615. pa = dma_map_single(dev,
  1616. skb->data +
  1617. skb_headlen(skb) - headlen,
  1618. lenmss,
  1619. DMA_TO_DEVICE);
  1620. vring->ctx[i].mapped_as = wil_mapped_as_single;
  1621. headlen -= lenmss;
  1622. }
  1623. if (unlikely(dma_mapping_error(dev, pa))) {
  1624. wil_err(wil, "TSO: DMA map page error\n");
  1625. goto mem_error;
  1626. }
  1627. _desc = &vring->va[i].tx.legacy;
  1628. if (!_first_desc) {
  1629. _first_desc = _desc;
  1630. first_ctx = &vring->ctx[i];
  1631. d = first_desc;
  1632. } else {
  1633. d = &desc_mem;
  1634. }
  1635. wil->txrx_ops.tx_desc_map((union wil_tx_desc *)d,
  1636. pa, lenmss, vring_index);
  1637. wil_tx_desc_offload_setup_tso(d, skb, desc_tso_type,
  1638. is_ipv4, tcp_hdr_len,
  1639. skb_net_hdr_len);
  1640. /* use tso_type_first only once */
  1641. desc_tso_type = wil_tso_type_mid;
  1642. descs_used++; /* desc used so far */
  1643. sg_desc_cnt++; /* desc used for this segment */
  1644. len -= lenmss;
  1645. rem_data -= lenmss;
  1646. wil_dbg_txrx(wil,
  1647. "TSO: len %d, rem_data %d, descs_used %d, sg_desc_cnt %d,\n",
  1648. len, rem_data, descs_used, sg_desc_cnt);
  1649. /* Close the segment if reached mss size or last frag*/
  1650. if (rem_data == 0 || (f == nr_frags - 1 && len == 0)) {
  1651. if (hdr_compensation_need) {
  1652. /* first segment include hdr desc for
  1653. * release
  1654. */
  1655. hdr_ctx->nr_frags = sg_desc_cnt;
  1656. wil_tx_desc_set_nr_frags(first_desc,
  1657. sg_desc_cnt +
  1658. 1);
  1659. hdr_compensation_need = false;
  1660. } else {
  1661. wil_tx_desc_set_nr_frags(first_desc,
  1662. sg_desc_cnt);
  1663. }
  1664. first_ctx->nr_frags = sg_desc_cnt - 1;
  1665. wil_tx_last_desc(d);
  1666. /* first descriptor may also be the last
  1667. * for this mss - make sure not to copy
  1668. * it twice
  1669. */
  1670. if (first_desc != d)
  1671. *_first_desc = *first_desc;
  1672. /*last descriptor will be copied at the end
  1673. * of this TS processing
  1674. */
  1675. if (f < nr_frags - 1 || len > 0)
  1676. *_desc = *d;
  1677. rem_data = mss;
  1678. _first_desc = NULL;
  1679. sg_desc_cnt = 0;
  1680. } else if (first_desc != d) /* update mid descriptor */
  1681. *_desc = *d;
  1682. }
  1683. }
  1684. if (!_desc)
  1685. goto mem_error;
  1686. /* first descriptor may also be the last.
  1687. * in this case d pointer is invalid
  1688. */
  1689. if (_first_desc == _desc)
  1690. d = first_desc;
  1691. /* Last data descriptor */
  1692. wil_set_tx_desc_last_tso(d);
  1693. *_desc = *d;
  1694. /* Fill the total number of descriptors in first desc (hdr)*/
  1695. wil_tx_desc_set_nr_frags(hdr_desc, descs_used);
  1696. *_hdr_desc = *hdr_desc;
  1697. /* hold reference to skb
  1698. * to prevent skb release before accounting
  1699. * in case of immediate "tx done"
  1700. */
  1701. vring->ctx[i].skb = skb_get(skb);
  1702. /* performance monitoring */
  1703. used = wil_ring_used_tx(vring);
  1704. if (wil_val_in_range(wil->ring_idle_trsh,
  1705. used, used + descs_used)) {
  1706. txdata->idle += get_cycles() - txdata->last_idle;
  1707. wil_dbg_txrx(wil, "Ring[%2d] not idle %d -> %d\n",
  1708. vring_index, used, used + descs_used);
  1709. }
  1710. /* Make sure to advance the head only after descriptor update is done.
  1711. * This will prevent a race condition where the completion thread
  1712. * will see the DU bit set from previous run and will handle the
  1713. * skb before it was completed.
  1714. */
  1715. wmb();
  1716. /* advance swhead */
  1717. wil_ring_advance_head(vring, descs_used);
  1718. wil_dbg_txrx(wil, "TSO: Tx swhead %d -> %d\n", swhead, vring->swhead);
  1719. /* make sure all writes to descriptors (shared memory) are done before
  1720. * committing them to HW
  1721. */
  1722. wmb();
  1723. if (wil->tx_latency)
  1724. *(ktime_t *)&skb->cb = ktime_get();
  1725. else
  1726. memset(skb->cb, 0, sizeof(ktime_t));
  1727. wil_w(wil, vring->hwtail, vring->swhead);
  1728. return 0;
  1729. mem_error:
  1730. while (descs_used > 0) {
  1731. struct wil_ctx *ctx;
  1732. i = (swhead + descs_used - 1) % vring->size;
  1733. d = (struct vring_tx_desc *)&vring->va[i].tx.legacy;
  1734. _desc = &vring->va[i].tx.legacy;
  1735. *d = *_desc;
  1736. _desc->dma.status = TX_DMA_STATUS_DU;
  1737. ctx = &vring->ctx[i];
  1738. wil_txdesc_unmap(dev, (union wil_tx_desc *)d, ctx);
  1739. memset(ctx, 0, sizeof(*ctx));
  1740. descs_used--;
  1741. }
  1742. err_exit:
  1743. return rc;
  1744. }
  1745. static int __wil_tx_ring(struct wil6210_priv *wil, struct wil6210_vif *vif,
  1746. struct wil_ring *ring, struct sk_buff *skb)
  1747. {
  1748. struct device *dev = wil_to_dev(wil);
  1749. struct vring_tx_desc dd, *d = &dd;
  1750. volatile struct vring_tx_desc *_d;
  1751. u32 swhead = ring->swhead;
  1752. int avail = wil_ring_avail_tx(ring);
  1753. int nr_frags = skb_shinfo(skb)->nr_frags;
  1754. uint f = 0;
  1755. int ring_index = ring - wil->ring_tx;
  1756. struct wil_ring_tx_data *txdata = &wil->ring_tx_data[ring_index];
  1757. uint i = swhead;
  1758. dma_addr_t pa;
  1759. int used;
  1760. bool mcast = (ring_index == vif->bcast_ring);
  1761. uint len = skb_headlen(skb);
  1762. wil_dbg_txrx(wil, "tx_ring: %d bytes to ring %d, nr_frags %d\n",
  1763. skb->len, ring_index, nr_frags);
  1764. if (unlikely(!txdata->enabled))
  1765. return -EINVAL;
  1766. if (unlikely(avail < 1 + nr_frags)) {
  1767. wil_err_ratelimited(wil,
  1768. "Tx ring[%2d] full. No space for %d fragments\n",
  1769. ring_index, 1 + nr_frags);
  1770. return -ENOMEM;
  1771. }
  1772. _d = &ring->va[i].tx.legacy;
  1773. pa = dma_map_single(dev, skb->data, skb_headlen(skb), DMA_TO_DEVICE);
  1774. wil_dbg_txrx(wil, "Tx[%2d] skb %d bytes 0x%p -> %pad\n", ring_index,
  1775. skb_headlen(skb), skb->data, &pa);
  1776. wil_hex_dump_txrx("Tx ", DUMP_PREFIX_OFFSET, 16, 1,
  1777. skb->data, skb_headlen(skb), false);
  1778. if (unlikely(dma_mapping_error(dev, pa)))
  1779. return -EINVAL;
  1780. ring->ctx[i].mapped_as = wil_mapped_as_single;
  1781. /* 1-st segment */
  1782. wil->txrx_ops.tx_desc_map((union wil_tx_desc *)d, pa, len,
  1783. ring_index);
  1784. if (unlikely(mcast)) {
  1785. d->mac.d[0] |= BIT(MAC_CFG_DESC_TX_0_MCS_EN_POS); /* MCS 0 */
  1786. if (unlikely(len > WIL_BCAST_MCS0_LIMIT)) /* set MCS 1 */
  1787. d->mac.d[0] |= (1 << MAC_CFG_DESC_TX_0_MCS_INDEX_POS);
  1788. }
  1789. /* Process TCP/UDP checksum offloading */
  1790. if (unlikely(wil_tx_desc_offload_setup(d, skb))) {
  1791. wil_err(wil, "Tx[%2d] Failed to set cksum, drop packet\n",
  1792. ring_index);
  1793. goto dma_error;
  1794. }
  1795. ring->ctx[i].nr_frags = nr_frags;
  1796. wil_tx_desc_set_nr_frags(d, nr_frags + 1);
  1797. /* middle segments */
  1798. for (; f < nr_frags; f++) {
  1799. const skb_frag_t *frag = &skb_shinfo(skb)->frags[f];
  1800. int len = skb_frag_size(frag);
  1801. *_d = *d;
  1802. wil_dbg_txrx(wil, "Tx[%2d] desc[%4d]\n", ring_index, i);
  1803. wil_hex_dump_txrx("TxD ", DUMP_PREFIX_NONE, 32, 4,
  1804. (const void *)d, sizeof(*d), false);
  1805. i = (swhead + f + 1) % ring->size;
  1806. _d = &ring->va[i].tx.legacy;
  1807. pa = skb_frag_dma_map(dev, frag, 0, skb_frag_size(frag),
  1808. DMA_TO_DEVICE);
  1809. if (unlikely(dma_mapping_error(dev, pa))) {
  1810. wil_err(wil, "Tx[%2d] failed to map fragment\n",
  1811. ring_index);
  1812. goto dma_error;
  1813. }
  1814. ring->ctx[i].mapped_as = wil_mapped_as_page;
  1815. wil->txrx_ops.tx_desc_map((union wil_tx_desc *)d,
  1816. pa, len, ring_index);
  1817. /* no need to check return code -
  1818. * if it succeeded for 1-st descriptor,
  1819. * it will succeed here too
  1820. */
  1821. wil_tx_desc_offload_setup(d, skb);
  1822. }
  1823. /* for the last seg only */
  1824. d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_CMD_EOP_POS);
  1825. d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_CMD_MARK_WB_POS);
  1826. d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_CMD_DMA_IT_POS);
  1827. *_d = *d;
  1828. wil_dbg_txrx(wil, "Tx[%2d] desc[%4d]\n", ring_index, i);
  1829. wil_hex_dump_txrx("TxD ", DUMP_PREFIX_NONE, 32, 4,
  1830. (const void *)d, sizeof(*d), false);
  1831. /* hold reference to skb
  1832. * to prevent skb release before accounting
  1833. * in case of immediate "tx done"
  1834. */
  1835. ring->ctx[i].skb = skb_get(skb);
  1836. /* performance monitoring */
  1837. used = wil_ring_used_tx(ring);
  1838. if (wil_val_in_range(wil->ring_idle_trsh,
  1839. used, used + nr_frags + 1)) {
  1840. txdata->idle += get_cycles() - txdata->last_idle;
  1841. wil_dbg_txrx(wil, "Ring[%2d] not idle %d -> %d\n",
  1842. ring_index, used, used + nr_frags + 1);
  1843. }
  1844. /* Make sure to advance the head only after descriptor update is done.
  1845. * This will prevent a race condition where the completion thread
  1846. * will see the DU bit set from previous run and will handle the
  1847. * skb before it was completed.
  1848. */
  1849. wmb();
  1850. /* advance swhead */
  1851. wil_ring_advance_head(ring, nr_frags + 1);
  1852. wil_dbg_txrx(wil, "Tx[%2d] swhead %d -> %d\n", ring_index, swhead,
  1853. ring->swhead);
  1854. trace_wil6210_tx(ring_index, swhead, skb->len, nr_frags);
  1855. /* make sure all writes to descriptors (shared memory) are done before
  1856. * committing them to HW
  1857. */
  1858. wmb();
  1859. if (wil->tx_latency)
  1860. *(ktime_t *)&skb->cb = ktime_get();
  1861. else
  1862. memset(skb->cb, 0, sizeof(ktime_t));
  1863. wil_w(wil, ring->hwtail, ring->swhead);
  1864. return 0;
  1865. dma_error:
  1866. /* unmap what we have mapped */
  1867. nr_frags = f + 1; /* frags mapped + one for skb head */
  1868. for (f = 0; f < nr_frags; f++) {
  1869. struct wil_ctx *ctx;
  1870. i = (swhead + f) % ring->size;
  1871. ctx = &ring->ctx[i];
  1872. _d = &ring->va[i].tx.legacy;
  1873. *d = *_d;
  1874. _d->dma.status = TX_DMA_STATUS_DU;
  1875. wil->txrx_ops.tx_desc_unmap(dev,
  1876. (union wil_tx_desc *)d,
  1877. ctx);
  1878. memset(ctx, 0, sizeof(*ctx));
  1879. }
  1880. return -EINVAL;
  1881. }
  1882. static int wil_tx_ring(struct wil6210_priv *wil, struct wil6210_vif *vif,
  1883. struct wil_ring *ring, struct sk_buff *skb)
  1884. {
  1885. int ring_index = ring - wil->ring_tx;
  1886. struct wil_ring_tx_data *txdata = &wil->ring_tx_data[ring_index];
  1887. int rc;
  1888. spin_lock(&txdata->lock);
  1889. if (test_bit(wil_status_suspending, wil->status) ||
  1890. test_bit(wil_status_suspended, wil->status) ||
  1891. test_bit(wil_status_resuming, wil->status)) {
  1892. wil_dbg_txrx(wil,
  1893. "suspend/resume in progress. drop packet\n");
  1894. spin_unlock(&txdata->lock);
  1895. return -EINVAL;
  1896. }
  1897. rc = (skb_is_gso(skb) ? wil->txrx_ops.tx_ring_tso : __wil_tx_ring)
  1898. (wil, vif, ring, skb);
  1899. spin_unlock(&txdata->lock);
  1900. return rc;
  1901. }
  1902. /* Check status of tx vrings and stop/wake net queues if needed
  1903. * It will start/stop net queues of a specific VIF net_device.
  1904. *
  1905. * This function does one of two checks:
  1906. * In case check_stop is true, will check if net queues need to be stopped. If
  1907. * the conditions for stopping are met, netif_tx_stop_all_queues() is called.
  1908. * In case check_stop is false, will check if net queues need to be waked. If
  1909. * the conditions for waking are met, netif_tx_wake_all_queues() is called.
  1910. * vring is the vring which is currently being modified by either adding
  1911. * descriptors (tx) into it or removing descriptors (tx complete) from it. Can
  1912. * be null when irrelevant (e.g. connect/disconnect events).
  1913. *
  1914. * The implementation is to stop net queues if modified vring has low
  1915. * descriptor availability. Wake if all vrings are not in low descriptor
  1916. * availability and modified vring has high descriptor availability.
  1917. */
  1918. static inline void __wil_update_net_queues(struct wil6210_priv *wil,
  1919. struct wil6210_vif *vif,
  1920. struct wil_ring *ring,
  1921. bool check_stop)
  1922. {
  1923. int i;
  1924. int min_ring_id = wil_get_min_tx_ring_id(wil);
  1925. if (unlikely(!vif))
  1926. return;
  1927. if (ring)
  1928. wil_dbg_txrx(wil, "vring %d, mid %d, check_stop=%d, stopped=%d",
  1929. (int)(ring - wil->ring_tx), vif->mid, check_stop,
  1930. vif->net_queue_stopped);
  1931. else
  1932. wil_dbg_txrx(wil, "check_stop=%d, mid=%d, stopped=%d",
  1933. check_stop, vif->mid, vif->net_queue_stopped);
  1934. if (ring && drop_if_ring_full)
  1935. /* no need to stop/wake net queues */
  1936. return;
  1937. if (check_stop == vif->net_queue_stopped)
  1938. /* net queues already in desired state */
  1939. return;
  1940. if (check_stop) {
  1941. if (!ring || unlikely(wil_ring_avail_low(ring))) {
  1942. /* not enough room in the vring */
  1943. netif_tx_stop_all_queues(vif_to_ndev(vif));
  1944. vif->net_queue_stopped = true;
  1945. wil_dbg_txrx(wil, "netif_tx_stop called\n");
  1946. }
  1947. return;
  1948. }
  1949. /* Do not wake the queues in suspend flow */
  1950. if (test_bit(wil_status_suspending, wil->status) ||
  1951. test_bit(wil_status_suspended, wil->status))
  1952. return;
  1953. /* check wake */
  1954. for (i = min_ring_id; i < WIL6210_MAX_TX_RINGS; i++) {
  1955. struct wil_ring *cur_ring = &wil->ring_tx[i];
  1956. struct wil_ring_tx_data *txdata = &wil->ring_tx_data[i];
  1957. if (txdata->mid != vif->mid || !cur_ring->va ||
  1958. !txdata->enabled || cur_ring == ring)
  1959. continue;
  1960. if (wil_ring_avail_low(cur_ring)) {
  1961. wil_dbg_txrx(wil, "ring %d full, can't wake\n",
  1962. (int)(cur_ring - wil->ring_tx));
  1963. return;
  1964. }
  1965. }
  1966. if (!ring || wil_ring_avail_high(ring)) {
  1967. /* enough room in the ring */
  1968. wil_dbg_txrx(wil, "calling netif_tx_wake\n");
  1969. netif_tx_wake_all_queues(vif_to_ndev(vif));
  1970. vif->net_queue_stopped = false;
  1971. }
  1972. }
  1973. void wil_update_net_queues(struct wil6210_priv *wil, struct wil6210_vif *vif,
  1974. struct wil_ring *ring, bool check_stop)
  1975. {
  1976. spin_lock(&wil->net_queue_lock);
  1977. __wil_update_net_queues(wil, vif, ring, check_stop);
  1978. spin_unlock(&wil->net_queue_lock);
  1979. }
  1980. void wil_update_net_queues_bh(struct wil6210_priv *wil, struct wil6210_vif *vif,
  1981. struct wil_ring *ring, bool check_stop)
  1982. {
  1983. spin_lock_bh(&wil->net_queue_lock);
  1984. __wil_update_net_queues(wil, vif, ring, check_stop);
  1985. spin_unlock_bh(&wil->net_queue_lock);
  1986. }
  1987. netdev_tx_t wil_start_xmit(struct sk_buff *skb, struct net_device *ndev)
  1988. {
  1989. struct wil6210_vif *vif = ndev_to_vif(ndev);
  1990. struct wil6210_priv *wil = vif_to_wil(vif);
  1991. const u8 *da = wil_skb_get_da(skb);
  1992. bool bcast = is_multicast_ether_addr(da);
  1993. struct wil_ring *ring;
  1994. static bool pr_once_fw;
  1995. int rc;
  1996. wil_dbg_txrx(wil, "start_xmit\n");
  1997. if (unlikely(!test_bit(wil_status_fwready, wil->status))) {
  1998. if (!pr_once_fw) {
  1999. wil_err(wil, "FW not ready\n");
  2000. pr_once_fw = true;
  2001. }
  2002. goto drop;
  2003. }
  2004. if (unlikely(!test_bit(wil_vif_fwconnected, vif->status))) {
  2005. wil_dbg_ratelimited(wil,
  2006. "VIF not connected, packet dropped\n");
  2007. goto drop;
  2008. }
  2009. if (unlikely(vif->wdev.iftype == NL80211_IFTYPE_MONITOR)) {
  2010. wil_err(wil, "Xmit in monitor mode not supported\n");
  2011. goto drop;
  2012. }
  2013. pr_once_fw = false;
  2014. /* find vring */
  2015. if (vif->wdev.iftype == NL80211_IFTYPE_STATION && !vif->pbss) {
  2016. /* in STA mode (ESS), all to same VRING (to AP) */
  2017. ring = wil_find_tx_ring_sta(wil, vif, skb);
  2018. } else if (bcast) {
  2019. if (vif->pbss || wil_check_multicast_to_unicast(wil, skb))
  2020. /* in pbss, no bcast VRING - duplicate skb in
  2021. * all stations VRINGs
  2022. */
  2023. ring = wil_find_tx_bcast_2(wil, vif, skb);
  2024. else if (vif->wdev.iftype == NL80211_IFTYPE_AP)
  2025. /* AP has a dedicated bcast VRING */
  2026. ring = wil_find_tx_bcast_1(wil, vif, skb);
  2027. else
  2028. /* unexpected combination, fallback to duplicating
  2029. * the skb in all stations VRINGs
  2030. */
  2031. ring = wil_find_tx_bcast_2(wil, vif, skb);
  2032. } else {
  2033. /* unicast, find specific VRING by dest. address */
  2034. ring = wil_find_tx_ucast(wil, vif, skb);
  2035. }
  2036. if (unlikely(!ring)) {
  2037. wil_dbg_txrx(wil, "No Tx RING found for %pM\n", da);
  2038. goto drop;
  2039. }
  2040. /* set up vring entry */
  2041. rc = wil_tx_ring(wil, vif, ring, skb);
  2042. switch (rc) {
  2043. case 0:
  2044. /* shall we stop net queues? */
  2045. wil_update_net_queues_bh(wil, vif, ring, true);
  2046. /* statistics will be updated on the tx_complete */
  2047. dev_kfree_skb_any(skb);
  2048. return NETDEV_TX_OK;
  2049. case -ENOMEM:
  2050. if (drop_if_ring_full)
  2051. goto drop;
  2052. return NETDEV_TX_BUSY;
  2053. default:
  2054. break; /* goto drop; */
  2055. }
  2056. drop:
  2057. ndev->stats.tx_dropped++;
  2058. dev_kfree_skb_any(skb);
  2059. return NET_XMIT_DROP;
  2060. }
  2061. void wil_tx_latency_calc(struct wil6210_priv *wil, struct sk_buff *skb,
  2062. struct wil_sta_info *sta)
  2063. {
  2064. int skb_time_us;
  2065. int bin;
  2066. if (!wil->tx_latency)
  2067. return;
  2068. if (ktime_to_ms(*(ktime_t *)&skb->cb) == 0)
  2069. return;
  2070. skb_time_us = ktime_us_delta(ktime_get(), *(ktime_t *)&skb->cb);
  2071. bin = skb_time_us / wil->tx_latency_res;
  2072. bin = min_t(int, bin, WIL_NUM_LATENCY_BINS - 1);
  2073. wil_dbg_txrx(wil, "skb time %dus => bin %d\n", skb_time_us, bin);
  2074. sta->tx_latency_bins[bin]++;
  2075. sta->stats.tx_latency_total_us += skb_time_us;
  2076. if (skb_time_us < sta->stats.tx_latency_min_us)
  2077. sta->stats.tx_latency_min_us = skb_time_us;
  2078. if (skb_time_us > sta->stats.tx_latency_max_us)
  2079. sta->stats.tx_latency_max_us = skb_time_us;
  2080. }
  2081. /* Clean up transmitted skb's from the Tx VRING
  2082. *
  2083. * Return number of descriptors cleared
  2084. *
  2085. * Safe to call from IRQ
  2086. */
  2087. int wil_tx_complete(struct wil6210_vif *vif, int ringid)
  2088. {
  2089. struct wil6210_priv *wil = vif_to_wil(vif);
  2090. struct net_device *ndev = vif_to_ndev(vif);
  2091. struct device *dev = wil_to_dev(wil);
  2092. struct wil_ring *vring = &wil->ring_tx[ringid];
  2093. struct wil_ring_tx_data *txdata = &wil->ring_tx_data[ringid];
  2094. int done = 0;
  2095. int cid = wil->ring2cid_tid[ringid][0];
  2096. struct wil_net_stats *stats = NULL;
  2097. volatile struct vring_tx_desc *_d;
  2098. int used_before_complete;
  2099. int used_new;
  2100. if (unlikely(!vring->va)) {
  2101. wil_err(wil, "Tx irq[%d]: vring not initialized\n", ringid);
  2102. return 0;
  2103. }
  2104. if (unlikely(!txdata->enabled)) {
  2105. wil_info(wil, "Tx irq[%d]: vring disabled\n", ringid);
  2106. return 0;
  2107. }
  2108. wil_dbg_txrx(wil, "tx_complete: (%d)\n", ringid);
  2109. used_before_complete = wil_ring_used_tx(vring);
  2110. if (cid < wil->max_assoc_sta)
  2111. stats = &wil->sta[cid].stats;
  2112. while (!wil_ring_is_empty(vring)) {
  2113. int new_swtail;
  2114. struct wil_ctx *ctx = &vring->ctx[vring->swtail];
  2115. /* For the fragmented skb, HW will set DU bit only for the
  2116. * last fragment. look for it.
  2117. * In TSO the first DU will include hdr desc
  2118. */
  2119. int lf = (vring->swtail + ctx->nr_frags) % vring->size;
  2120. /* TODO: check we are not past head */
  2121. _d = &vring->va[lf].tx.legacy;
  2122. if (unlikely(!(_d->dma.status & TX_DMA_STATUS_DU)))
  2123. break;
  2124. new_swtail = (lf + 1) % vring->size;
  2125. while (vring->swtail != new_swtail) {
  2126. struct vring_tx_desc dd, *d = &dd;
  2127. u16 dmalen;
  2128. struct sk_buff *skb;
  2129. ctx = &vring->ctx[vring->swtail];
  2130. skb = ctx->skb;
  2131. _d = &vring->va[vring->swtail].tx.legacy;
  2132. *d = *_d;
  2133. dmalen = le16_to_cpu(d->dma.length);
  2134. trace_wil6210_tx_done(ringid, vring->swtail, dmalen,
  2135. d->dma.error);
  2136. wil_dbg_txrx(wil,
  2137. "TxC[%2d][%3d] : %d bytes, status 0x%02x err 0x%02x\n",
  2138. ringid, vring->swtail, dmalen,
  2139. d->dma.status, d->dma.error);
  2140. wil_hex_dump_txrx("TxCD ", DUMP_PREFIX_NONE, 32, 4,
  2141. (const void *)d, sizeof(*d), false);
  2142. wil->txrx_ops.tx_desc_unmap(dev,
  2143. (union wil_tx_desc *)d,
  2144. ctx);
  2145. if (skb) {
  2146. if (likely(d->dma.error == 0)) {
  2147. ndev->stats.tx_packets++;
  2148. ndev->stats.tx_bytes += skb->len;
  2149. if (stats) {
  2150. stats->tx_packets++;
  2151. stats->tx_bytes += skb->len;
  2152. wil_tx_latency_calc(wil, skb,
  2153. &wil->sta[cid]);
  2154. }
  2155. } else {
  2156. ndev->stats.tx_errors++;
  2157. if (stats)
  2158. stats->tx_errors++;
  2159. }
  2160. if (skb->protocol == cpu_to_be16(ETH_P_PAE))
  2161. wil_tx_complete_handle_eapol(vif, skb);
  2162. wil_consume_skb(skb, d->dma.error == 0);
  2163. }
  2164. memset(ctx, 0, sizeof(*ctx));
  2165. /* Make sure the ctx is zeroed before updating the tail
  2166. * to prevent a case where wil_tx_ring will see
  2167. * this descriptor as used and handle it before ctx zero
  2168. * is completed.
  2169. */
  2170. wmb();
  2171. /* There is no need to touch HW descriptor:
  2172. * - ststus bit TX_DMA_STATUS_DU is set by design,
  2173. * so hardware will not try to process this desc.,
  2174. * - rest of descriptor will be initialized on Tx.
  2175. */
  2176. vring->swtail = wil_ring_next_tail(vring);
  2177. done++;
  2178. }
  2179. }
  2180. /* performance monitoring */
  2181. used_new = wil_ring_used_tx(vring);
  2182. if (wil_val_in_range(wil->ring_idle_trsh,
  2183. used_new, used_before_complete)) {
  2184. wil_dbg_txrx(wil, "Ring[%2d] idle %d -> %d\n",
  2185. ringid, used_before_complete, used_new);
  2186. txdata->last_idle = get_cycles();
  2187. }
  2188. /* shall we wake net queues? */
  2189. if (done)
  2190. wil_update_net_queues(wil, vif, vring, false);
  2191. return done;
  2192. }
  2193. static inline int wil_tx_init(struct wil6210_priv *wil)
  2194. {
  2195. return 0;
  2196. }
  2197. static inline void wil_tx_fini(struct wil6210_priv *wil) {}
  2198. static void wil_get_reorder_params(struct wil6210_priv *wil,
  2199. struct sk_buff *skb, int *tid, int *cid,
  2200. int *mid, u16 *seq, int *mcast, int *retry)
  2201. {
  2202. struct vring_rx_desc *d = wil_skb_rxdesc(skb);
  2203. *tid = wil_rxdesc_tid(d);
  2204. *cid = wil_skb_get_cid(skb);
  2205. *mid = wil_rxdesc_mid(d);
  2206. *seq = wil_rxdesc_seq(d);
  2207. *mcast = wil_rxdesc_mcast(d);
  2208. *retry = wil_rxdesc_retry(d);
  2209. }
  2210. void wil_init_txrx_ops_legacy_dma(struct wil6210_priv *wil)
  2211. {
  2212. wil->txrx_ops.configure_interrupt_moderation =
  2213. wil_configure_interrupt_moderation;
  2214. /* TX ops */
  2215. wil->txrx_ops.tx_desc_map = wil_tx_desc_map;
  2216. wil->txrx_ops.tx_desc_unmap = wil_txdesc_unmap;
  2217. wil->txrx_ops.tx_ring_tso = __wil_tx_vring_tso;
  2218. wil->txrx_ops.ring_init_tx = wil_vring_init_tx;
  2219. wil->txrx_ops.ring_fini_tx = wil_vring_free;
  2220. wil->txrx_ops.ring_init_bcast = wil_vring_init_bcast;
  2221. wil->txrx_ops.tx_init = wil_tx_init;
  2222. wil->txrx_ops.tx_fini = wil_tx_fini;
  2223. wil->txrx_ops.tx_ring_modify = wil_tx_vring_modify;
  2224. /* RX ops */
  2225. wil->txrx_ops.rx_init = wil_rx_init;
  2226. wil->txrx_ops.wmi_addba_rx_resp = wmi_addba_rx_resp;
  2227. wil->txrx_ops.get_reorder_params = wil_get_reorder_params;
  2228. wil->txrx_ops.get_netif_rx_params =
  2229. wil_get_netif_rx_params;
  2230. wil->txrx_ops.rx_crypto_check = wil_rx_crypto_check;
  2231. wil->txrx_ops.rx_error_check = wil_rx_error_check;
  2232. wil->txrx_ops.is_rx_idle = wil_is_rx_idle;
  2233. wil->txrx_ops.rx_fini = wil_rx_fini;
  2234. }