usb.c 25 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133
  1. // SPDX-License-Identifier: ISC
  2. /*
  3. * Copyright (C) 2018 Lorenzo Bianconi <[email protected]>
  4. */
  5. #include <linux/module.h>
  6. #include "mt76.h"
  7. #include "usb_trace.h"
  8. #include "dma.h"
  9. #define MT_VEND_REQ_MAX_RETRY 10
  10. #define MT_VEND_REQ_TOUT_MS 300
  11. static bool disable_usb_sg;
  12. module_param_named(disable_usb_sg, disable_usb_sg, bool, 0644);
  13. MODULE_PARM_DESC(disable_usb_sg, "Disable usb scatter-gather support");
  14. int __mt76u_vendor_request(struct mt76_dev *dev, u8 req, u8 req_type,
  15. u16 val, u16 offset, void *buf, size_t len)
  16. {
  17. struct usb_interface *uintf = to_usb_interface(dev->dev);
  18. struct usb_device *udev = interface_to_usbdev(uintf);
  19. unsigned int pipe;
  20. int i, ret;
  21. lockdep_assert_held(&dev->usb.usb_ctrl_mtx);
  22. pipe = (req_type & USB_DIR_IN) ? usb_rcvctrlpipe(udev, 0)
  23. : usb_sndctrlpipe(udev, 0);
  24. for (i = 0; i < MT_VEND_REQ_MAX_RETRY; i++) {
  25. if (test_bit(MT76_REMOVED, &dev->phy.state))
  26. return -EIO;
  27. ret = usb_control_msg(udev, pipe, req, req_type, val,
  28. offset, buf, len, MT_VEND_REQ_TOUT_MS);
  29. if (ret == -ENODEV)
  30. set_bit(MT76_REMOVED, &dev->phy.state);
  31. if (ret >= 0 || ret == -ENODEV)
  32. return ret;
  33. usleep_range(5000, 10000);
  34. }
  35. dev_err(dev->dev, "vendor request req:%02x off:%04x failed:%d\n",
  36. req, offset, ret);
  37. return ret;
  38. }
  39. EXPORT_SYMBOL_GPL(__mt76u_vendor_request);
  40. int mt76u_vendor_request(struct mt76_dev *dev, u8 req,
  41. u8 req_type, u16 val, u16 offset,
  42. void *buf, size_t len)
  43. {
  44. int ret;
  45. mutex_lock(&dev->usb.usb_ctrl_mtx);
  46. ret = __mt76u_vendor_request(dev, req, req_type,
  47. val, offset, buf, len);
  48. trace_usb_reg_wr(dev, offset, val);
  49. mutex_unlock(&dev->usb.usb_ctrl_mtx);
  50. return ret;
  51. }
  52. EXPORT_SYMBOL_GPL(mt76u_vendor_request);
  53. u32 ___mt76u_rr(struct mt76_dev *dev, u8 req, u8 req_type, u32 addr)
  54. {
  55. struct mt76_usb *usb = &dev->usb;
  56. u32 data = ~0;
  57. int ret;
  58. ret = __mt76u_vendor_request(dev, req, req_type, addr >> 16,
  59. addr, usb->data, sizeof(__le32));
  60. if (ret == sizeof(__le32))
  61. data = get_unaligned_le32(usb->data);
  62. trace_usb_reg_rr(dev, addr, data);
  63. return data;
  64. }
  65. EXPORT_SYMBOL_GPL(___mt76u_rr);
  66. static u32 __mt76u_rr(struct mt76_dev *dev, u32 addr)
  67. {
  68. u8 req;
  69. switch (addr & MT_VEND_TYPE_MASK) {
  70. case MT_VEND_TYPE_EEPROM:
  71. req = MT_VEND_READ_EEPROM;
  72. break;
  73. case MT_VEND_TYPE_CFG:
  74. req = MT_VEND_READ_CFG;
  75. break;
  76. default:
  77. req = MT_VEND_MULTI_READ;
  78. break;
  79. }
  80. return ___mt76u_rr(dev, req, USB_DIR_IN | USB_TYPE_VENDOR,
  81. addr & ~MT_VEND_TYPE_MASK);
  82. }
  83. static u32 mt76u_rr(struct mt76_dev *dev, u32 addr)
  84. {
  85. u32 ret;
  86. mutex_lock(&dev->usb.usb_ctrl_mtx);
  87. ret = __mt76u_rr(dev, addr);
  88. mutex_unlock(&dev->usb.usb_ctrl_mtx);
  89. return ret;
  90. }
  91. void ___mt76u_wr(struct mt76_dev *dev, u8 req, u8 req_type,
  92. u32 addr, u32 val)
  93. {
  94. struct mt76_usb *usb = &dev->usb;
  95. put_unaligned_le32(val, usb->data);
  96. __mt76u_vendor_request(dev, req, req_type, addr >> 16,
  97. addr, usb->data, sizeof(__le32));
  98. trace_usb_reg_wr(dev, addr, val);
  99. }
  100. EXPORT_SYMBOL_GPL(___mt76u_wr);
  101. static void __mt76u_wr(struct mt76_dev *dev, u32 addr, u32 val)
  102. {
  103. u8 req;
  104. switch (addr & MT_VEND_TYPE_MASK) {
  105. case MT_VEND_TYPE_CFG:
  106. req = MT_VEND_WRITE_CFG;
  107. break;
  108. default:
  109. req = MT_VEND_MULTI_WRITE;
  110. break;
  111. }
  112. ___mt76u_wr(dev, req, USB_DIR_OUT | USB_TYPE_VENDOR,
  113. addr & ~MT_VEND_TYPE_MASK, val);
  114. }
  115. static void mt76u_wr(struct mt76_dev *dev, u32 addr, u32 val)
  116. {
  117. mutex_lock(&dev->usb.usb_ctrl_mtx);
  118. __mt76u_wr(dev, addr, val);
  119. mutex_unlock(&dev->usb.usb_ctrl_mtx);
  120. }
  121. static u32 mt76u_rmw(struct mt76_dev *dev, u32 addr,
  122. u32 mask, u32 val)
  123. {
  124. mutex_lock(&dev->usb.usb_ctrl_mtx);
  125. val |= __mt76u_rr(dev, addr) & ~mask;
  126. __mt76u_wr(dev, addr, val);
  127. mutex_unlock(&dev->usb.usb_ctrl_mtx);
  128. return val;
  129. }
  130. static void mt76u_copy(struct mt76_dev *dev, u32 offset,
  131. const void *data, int len)
  132. {
  133. struct mt76_usb *usb = &dev->usb;
  134. const u8 *val = data;
  135. int ret;
  136. int current_batch_size;
  137. int i = 0;
  138. /* Assure that always a multiple of 4 bytes are copied,
  139. * otherwise beacons can be corrupted.
  140. * See: "mt76: round up length on mt76_wr_copy"
  141. * Commit 850e8f6fbd5d0003b0
  142. */
  143. len = round_up(len, 4);
  144. mutex_lock(&usb->usb_ctrl_mtx);
  145. while (i < len) {
  146. current_batch_size = min_t(int, usb->data_len, len - i);
  147. memcpy(usb->data, val + i, current_batch_size);
  148. ret = __mt76u_vendor_request(dev, MT_VEND_MULTI_WRITE,
  149. USB_DIR_OUT | USB_TYPE_VENDOR,
  150. 0, offset + i, usb->data,
  151. current_batch_size);
  152. if (ret < 0)
  153. break;
  154. i += current_batch_size;
  155. }
  156. mutex_unlock(&usb->usb_ctrl_mtx);
  157. }
  158. void mt76u_read_copy(struct mt76_dev *dev, u32 offset,
  159. void *data, int len)
  160. {
  161. struct mt76_usb *usb = &dev->usb;
  162. int i = 0, batch_len, ret;
  163. u8 *val = data;
  164. len = round_up(len, 4);
  165. mutex_lock(&usb->usb_ctrl_mtx);
  166. while (i < len) {
  167. batch_len = min_t(int, usb->data_len, len - i);
  168. ret = __mt76u_vendor_request(dev, MT_VEND_READ_EXT,
  169. USB_DIR_IN | USB_TYPE_VENDOR,
  170. (offset + i) >> 16, offset + i,
  171. usb->data, batch_len);
  172. if (ret < 0)
  173. break;
  174. memcpy(val + i, usb->data, batch_len);
  175. i += batch_len;
  176. }
  177. mutex_unlock(&usb->usb_ctrl_mtx);
  178. }
  179. EXPORT_SYMBOL_GPL(mt76u_read_copy);
  180. void mt76u_single_wr(struct mt76_dev *dev, const u8 req,
  181. const u16 offset, const u32 val)
  182. {
  183. mutex_lock(&dev->usb.usb_ctrl_mtx);
  184. __mt76u_vendor_request(dev, req,
  185. USB_DIR_OUT | USB_TYPE_VENDOR,
  186. val & 0xffff, offset, NULL, 0);
  187. __mt76u_vendor_request(dev, req,
  188. USB_DIR_OUT | USB_TYPE_VENDOR,
  189. val >> 16, offset + 2, NULL, 0);
  190. mutex_unlock(&dev->usb.usb_ctrl_mtx);
  191. }
  192. EXPORT_SYMBOL_GPL(mt76u_single_wr);
  193. static int
  194. mt76u_req_wr_rp(struct mt76_dev *dev, u32 base,
  195. const struct mt76_reg_pair *data, int len)
  196. {
  197. struct mt76_usb *usb = &dev->usb;
  198. mutex_lock(&usb->usb_ctrl_mtx);
  199. while (len > 0) {
  200. __mt76u_wr(dev, base + data->reg, data->value);
  201. len--;
  202. data++;
  203. }
  204. mutex_unlock(&usb->usb_ctrl_mtx);
  205. return 0;
  206. }
  207. static int
  208. mt76u_wr_rp(struct mt76_dev *dev, u32 base,
  209. const struct mt76_reg_pair *data, int n)
  210. {
  211. if (test_bit(MT76_STATE_MCU_RUNNING, &dev->phy.state))
  212. return dev->mcu_ops->mcu_wr_rp(dev, base, data, n);
  213. else
  214. return mt76u_req_wr_rp(dev, base, data, n);
  215. }
  216. static int
  217. mt76u_req_rd_rp(struct mt76_dev *dev, u32 base, struct mt76_reg_pair *data,
  218. int len)
  219. {
  220. struct mt76_usb *usb = &dev->usb;
  221. mutex_lock(&usb->usb_ctrl_mtx);
  222. while (len > 0) {
  223. data->value = __mt76u_rr(dev, base + data->reg);
  224. len--;
  225. data++;
  226. }
  227. mutex_unlock(&usb->usb_ctrl_mtx);
  228. return 0;
  229. }
  230. static int
  231. mt76u_rd_rp(struct mt76_dev *dev, u32 base,
  232. struct mt76_reg_pair *data, int n)
  233. {
  234. if (test_bit(MT76_STATE_MCU_RUNNING, &dev->phy.state))
  235. return dev->mcu_ops->mcu_rd_rp(dev, base, data, n);
  236. else
  237. return mt76u_req_rd_rp(dev, base, data, n);
  238. }
  239. static bool mt76u_check_sg(struct mt76_dev *dev)
  240. {
  241. struct usb_interface *uintf = to_usb_interface(dev->dev);
  242. struct usb_device *udev = interface_to_usbdev(uintf);
  243. return (!disable_usb_sg && udev->bus->sg_tablesize > 0 &&
  244. (udev->bus->no_sg_constraint ||
  245. udev->speed == USB_SPEED_WIRELESS));
  246. }
  247. static int
  248. mt76u_set_endpoints(struct usb_interface *intf,
  249. struct mt76_usb *usb)
  250. {
  251. struct usb_host_interface *intf_desc = intf->cur_altsetting;
  252. struct usb_endpoint_descriptor *ep_desc;
  253. int i, in_ep = 0, out_ep = 0;
  254. for (i = 0; i < intf_desc->desc.bNumEndpoints; i++) {
  255. ep_desc = &intf_desc->endpoint[i].desc;
  256. if (usb_endpoint_is_bulk_in(ep_desc) &&
  257. in_ep < __MT_EP_IN_MAX) {
  258. usb->in_ep[in_ep] = usb_endpoint_num(ep_desc);
  259. in_ep++;
  260. } else if (usb_endpoint_is_bulk_out(ep_desc) &&
  261. out_ep < __MT_EP_OUT_MAX) {
  262. usb->out_ep[out_ep] = usb_endpoint_num(ep_desc);
  263. out_ep++;
  264. }
  265. }
  266. if (in_ep != __MT_EP_IN_MAX || out_ep != __MT_EP_OUT_MAX)
  267. return -EINVAL;
  268. return 0;
  269. }
  270. static int
  271. mt76u_fill_rx_sg(struct mt76_dev *dev, struct mt76_queue *q, struct urb *urb,
  272. int nsgs, gfp_t gfp)
  273. {
  274. int i;
  275. for (i = 0; i < nsgs; i++) {
  276. struct page *page;
  277. void *data;
  278. int offset;
  279. data = page_frag_alloc(&q->rx_page, q->buf_size, gfp);
  280. if (!data)
  281. break;
  282. page = virt_to_head_page(data);
  283. offset = data - page_address(page);
  284. sg_set_page(&urb->sg[i], page, q->buf_size, offset);
  285. }
  286. if (i < nsgs) {
  287. int j;
  288. for (j = nsgs; j < urb->num_sgs; j++)
  289. skb_free_frag(sg_virt(&urb->sg[j]));
  290. urb->num_sgs = i;
  291. }
  292. urb->num_sgs = max_t(int, i, urb->num_sgs);
  293. urb->transfer_buffer_length = urb->num_sgs * q->buf_size;
  294. sg_init_marker(urb->sg, urb->num_sgs);
  295. return i ? : -ENOMEM;
  296. }
  297. static int
  298. mt76u_refill_rx(struct mt76_dev *dev, struct mt76_queue *q,
  299. struct urb *urb, int nsgs, gfp_t gfp)
  300. {
  301. enum mt76_rxq_id qid = q - &dev->q_rx[MT_RXQ_MAIN];
  302. if (qid == MT_RXQ_MAIN && dev->usb.sg_en)
  303. return mt76u_fill_rx_sg(dev, q, urb, nsgs, gfp);
  304. urb->transfer_buffer_length = q->buf_size;
  305. urb->transfer_buffer = page_frag_alloc(&q->rx_page, q->buf_size, gfp);
  306. return urb->transfer_buffer ? 0 : -ENOMEM;
  307. }
  308. static int
  309. mt76u_urb_alloc(struct mt76_dev *dev, struct mt76_queue_entry *e,
  310. int sg_max_size)
  311. {
  312. unsigned int size = sizeof(struct urb);
  313. if (dev->usb.sg_en)
  314. size += sg_max_size * sizeof(struct scatterlist);
  315. e->urb = kzalloc(size, GFP_KERNEL);
  316. if (!e->urb)
  317. return -ENOMEM;
  318. usb_init_urb(e->urb);
  319. if (dev->usb.sg_en && sg_max_size > 0)
  320. e->urb->sg = (struct scatterlist *)(e->urb + 1);
  321. return 0;
  322. }
  323. static int
  324. mt76u_rx_urb_alloc(struct mt76_dev *dev, struct mt76_queue *q,
  325. struct mt76_queue_entry *e)
  326. {
  327. enum mt76_rxq_id qid = q - &dev->q_rx[MT_RXQ_MAIN];
  328. int err, sg_size;
  329. sg_size = qid == MT_RXQ_MAIN ? MT_RX_SG_MAX_SIZE : 0;
  330. err = mt76u_urb_alloc(dev, e, sg_size);
  331. if (err)
  332. return err;
  333. return mt76u_refill_rx(dev, q, e->urb, sg_size, GFP_KERNEL);
  334. }
  335. static void mt76u_urb_free(struct urb *urb)
  336. {
  337. int i;
  338. for (i = 0; i < urb->num_sgs; i++)
  339. skb_free_frag(sg_virt(&urb->sg[i]));
  340. if (urb->transfer_buffer)
  341. skb_free_frag(urb->transfer_buffer);
  342. usb_free_urb(urb);
  343. }
  344. static void
  345. mt76u_fill_bulk_urb(struct mt76_dev *dev, int dir, int index,
  346. struct urb *urb, usb_complete_t complete_fn,
  347. void *context)
  348. {
  349. struct usb_interface *uintf = to_usb_interface(dev->dev);
  350. struct usb_device *udev = interface_to_usbdev(uintf);
  351. unsigned int pipe;
  352. if (dir == USB_DIR_IN)
  353. pipe = usb_rcvbulkpipe(udev, dev->usb.in_ep[index]);
  354. else
  355. pipe = usb_sndbulkpipe(udev, dev->usb.out_ep[index]);
  356. urb->dev = udev;
  357. urb->pipe = pipe;
  358. urb->complete = complete_fn;
  359. urb->context = context;
  360. }
  361. static struct urb *
  362. mt76u_get_next_rx_entry(struct mt76_queue *q)
  363. {
  364. struct urb *urb = NULL;
  365. unsigned long flags;
  366. spin_lock_irqsave(&q->lock, flags);
  367. if (q->queued > 0) {
  368. urb = q->entry[q->tail].urb;
  369. q->tail = (q->tail + 1) % q->ndesc;
  370. q->queued--;
  371. }
  372. spin_unlock_irqrestore(&q->lock, flags);
  373. return urb;
  374. }
  375. static int
  376. mt76u_get_rx_entry_len(struct mt76_dev *dev, u8 *data,
  377. u32 data_len)
  378. {
  379. u16 dma_len, min_len;
  380. dma_len = get_unaligned_le16(data);
  381. if (dev->drv->drv_flags & MT_DRV_RX_DMA_HDR)
  382. return dma_len;
  383. min_len = MT_DMA_HDR_LEN + MT_RX_RXWI_LEN + MT_FCE_INFO_LEN;
  384. if (data_len < min_len || !dma_len ||
  385. dma_len + MT_DMA_HDR_LEN > data_len ||
  386. (dma_len & 0x3))
  387. return -EINVAL;
  388. return dma_len;
  389. }
  390. static struct sk_buff *
  391. mt76u_build_rx_skb(struct mt76_dev *dev, void *data,
  392. int len, int buf_size)
  393. {
  394. int head_room, drv_flags = dev->drv->drv_flags;
  395. struct sk_buff *skb;
  396. head_room = drv_flags & MT_DRV_RX_DMA_HDR ? 0 : MT_DMA_HDR_LEN;
  397. if (SKB_WITH_OVERHEAD(buf_size) < head_room + len) {
  398. struct page *page;
  399. /* slow path, not enough space for data and
  400. * skb_shared_info
  401. */
  402. skb = alloc_skb(MT_SKB_HEAD_LEN, GFP_ATOMIC);
  403. if (!skb)
  404. return NULL;
  405. skb_put_data(skb, data + head_room, MT_SKB_HEAD_LEN);
  406. data += head_room + MT_SKB_HEAD_LEN;
  407. page = virt_to_head_page(data);
  408. skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
  409. page, data - page_address(page),
  410. len - MT_SKB_HEAD_LEN, buf_size);
  411. return skb;
  412. }
  413. /* fast path */
  414. skb = build_skb(data, buf_size);
  415. if (!skb)
  416. return NULL;
  417. skb_reserve(skb, head_room);
  418. __skb_put(skb, len);
  419. return skb;
  420. }
  421. static int
  422. mt76u_process_rx_entry(struct mt76_dev *dev, struct urb *urb,
  423. int buf_size)
  424. {
  425. u8 *data = urb->num_sgs ? sg_virt(&urb->sg[0]) : urb->transfer_buffer;
  426. int data_len = urb->num_sgs ? urb->sg[0].length : urb->actual_length;
  427. int len, nsgs = 1, head_room, drv_flags = dev->drv->drv_flags;
  428. struct sk_buff *skb;
  429. if (!test_bit(MT76_STATE_INITIALIZED, &dev->phy.state))
  430. return 0;
  431. len = mt76u_get_rx_entry_len(dev, data, urb->actual_length);
  432. if (len < 0)
  433. return 0;
  434. head_room = drv_flags & MT_DRV_RX_DMA_HDR ? 0 : MT_DMA_HDR_LEN;
  435. data_len = min_t(int, len, data_len - head_room);
  436. if (len == data_len &&
  437. dev->drv->rx_check && !dev->drv->rx_check(dev, data, data_len))
  438. return 0;
  439. skb = mt76u_build_rx_skb(dev, data, data_len, buf_size);
  440. if (!skb)
  441. return 0;
  442. len -= data_len;
  443. while (len > 0 && nsgs < urb->num_sgs) {
  444. data_len = min_t(int, len, urb->sg[nsgs].length);
  445. skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
  446. sg_page(&urb->sg[nsgs]),
  447. urb->sg[nsgs].offset, data_len,
  448. buf_size);
  449. len -= data_len;
  450. nsgs++;
  451. }
  452. dev->drv->rx_skb(dev, MT_RXQ_MAIN, skb);
  453. return nsgs;
  454. }
  455. static void mt76u_complete_rx(struct urb *urb)
  456. {
  457. struct mt76_dev *dev = dev_get_drvdata(&urb->dev->dev);
  458. struct mt76_queue *q = urb->context;
  459. unsigned long flags;
  460. trace_rx_urb(dev, urb);
  461. switch (urb->status) {
  462. case -ECONNRESET:
  463. case -ESHUTDOWN:
  464. case -ENOENT:
  465. case -EPROTO:
  466. return;
  467. default:
  468. dev_err_ratelimited(dev->dev, "rx urb failed: %d\n",
  469. urb->status);
  470. fallthrough;
  471. case 0:
  472. break;
  473. }
  474. spin_lock_irqsave(&q->lock, flags);
  475. if (WARN_ONCE(q->entry[q->head].urb != urb, "rx urb mismatch"))
  476. goto out;
  477. q->head = (q->head + 1) % q->ndesc;
  478. q->queued++;
  479. mt76_worker_schedule(&dev->usb.rx_worker);
  480. out:
  481. spin_unlock_irqrestore(&q->lock, flags);
  482. }
  483. static int
  484. mt76u_submit_rx_buf(struct mt76_dev *dev, enum mt76_rxq_id qid,
  485. struct urb *urb)
  486. {
  487. int ep = qid == MT_RXQ_MAIN ? MT_EP_IN_PKT_RX : MT_EP_IN_CMD_RESP;
  488. mt76u_fill_bulk_urb(dev, USB_DIR_IN, ep, urb,
  489. mt76u_complete_rx, &dev->q_rx[qid]);
  490. trace_submit_urb(dev, urb);
  491. return usb_submit_urb(urb, GFP_ATOMIC);
  492. }
  493. static void
  494. mt76u_process_rx_queue(struct mt76_dev *dev, struct mt76_queue *q)
  495. {
  496. int qid = q - &dev->q_rx[MT_RXQ_MAIN];
  497. struct urb *urb;
  498. int err, count;
  499. while (true) {
  500. urb = mt76u_get_next_rx_entry(q);
  501. if (!urb)
  502. break;
  503. count = mt76u_process_rx_entry(dev, urb, q->buf_size);
  504. if (count > 0) {
  505. err = mt76u_refill_rx(dev, q, urb, count, GFP_ATOMIC);
  506. if (err < 0)
  507. break;
  508. }
  509. mt76u_submit_rx_buf(dev, qid, urb);
  510. }
  511. if (qid == MT_RXQ_MAIN) {
  512. local_bh_disable();
  513. mt76_rx_poll_complete(dev, MT_RXQ_MAIN, NULL);
  514. local_bh_enable();
  515. }
  516. }
  517. static void mt76u_rx_worker(struct mt76_worker *w)
  518. {
  519. struct mt76_usb *usb = container_of(w, struct mt76_usb, rx_worker);
  520. struct mt76_dev *dev = container_of(usb, struct mt76_dev, usb);
  521. int i;
  522. rcu_read_lock();
  523. mt76_for_each_q_rx(dev, i)
  524. mt76u_process_rx_queue(dev, &dev->q_rx[i]);
  525. rcu_read_unlock();
  526. }
  527. static int
  528. mt76u_submit_rx_buffers(struct mt76_dev *dev, enum mt76_rxq_id qid)
  529. {
  530. struct mt76_queue *q = &dev->q_rx[qid];
  531. unsigned long flags;
  532. int i, err = 0;
  533. spin_lock_irqsave(&q->lock, flags);
  534. for (i = 0; i < q->ndesc; i++) {
  535. err = mt76u_submit_rx_buf(dev, qid, q->entry[i].urb);
  536. if (err < 0)
  537. break;
  538. }
  539. q->head = q->tail = 0;
  540. q->queued = 0;
  541. spin_unlock_irqrestore(&q->lock, flags);
  542. return err;
  543. }
  544. static int
  545. mt76u_alloc_rx_queue(struct mt76_dev *dev, enum mt76_rxq_id qid)
  546. {
  547. struct mt76_queue *q = &dev->q_rx[qid];
  548. int i, err;
  549. spin_lock_init(&q->lock);
  550. q->entry = devm_kcalloc(dev->dev,
  551. MT_NUM_RX_ENTRIES, sizeof(*q->entry),
  552. GFP_KERNEL);
  553. if (!q->entry)
  554. return -ENOMEM;
  555. q->ndesc = MT_NUM_RX_ENTRIES;
  556. q->buf_size = PAGE_SIZE;
  557. for (i = 0; i < q->ndesc; i++) {
  558. err = mt76u_rx_urb_alloc(dev, q, &q->entry[i]);
  559. if (err < 0)
  560. return err;
  561. }
  562. return mt76u_submit_rx_buffers(dev, qid);
  563. }
  564. int mt76u_alloc_mcu_queue(struct mt76_dev *dev)
  565. {
  566. return mt76u_alloc_rx_queue(dev, MT_RXQ_MCU);
  567. }
  568. EXPORT_SYMBOL_GPL(mt76u_alloc_mcu_queue);
  569. static void
  570. mt76u_free_rx_queue(struct mt76_dev *dev, struct mt76_queue *q)
  571. {
  572. struct page *page;
  573. int i;
  574. for (i = 0; i < q->ndesc; i++) {
  575. if (!q->entry[i].urb)
  576. continue;
  577. mt76u_urb_free(q->entry[i].urb);
  578. q->entry[i].urb = NULL;
  579. }
  580. if (!q->rx_page.va)
  581. return;
  582. page = virt_to_page(q->rx_page.va);
  583. __page_frag_cache_drain(page, q->rx_page.pagecnt_bias);
  584. memset(&q->rx_page, 0, sizeof(q->rx_page));
  585. }
  586. static void mt76u_free_rx(struct mt76_dev *dev)
  587. {
  588. int i;
  589. mt76_worker_teardown(&dev->usb.rx_worker);
  590. mt76_for_each_q_rx(dev, i)
  591. mt76u_free_rx_queue(dev, &dev->q_rx[i]);
  592. }
  593. void mt76u_stop_rx(struct mt76_dev *dev)
  594. {
  595. int i;
  596. mt76_worker_disable(&dev->usb.rx_worker);
  597. mt76_for_each_q_rx(dev, i) {
  598. struct mt76_queue *q = &dev->q_rx[i];
  599. int j;
  600. for (j = 0; j < q->ndesc; j++)
  601. usb_poison_urb(q->entry[j].urb);
  602. }
  603. }
  604. EXPORT_SYMBOL_GPL(mt76u_stop_rx);
  605. int mt76u_resume_rx(struct mt76_dev *dev)
  606. {
  607. int i;
  608. mt76_for_each_q_rx(dev, i) {
  609. struct mt76_queue *q = &dev->q_rx[i];
  610. int err, j;
  611. for (j = 0; j < q->ndesc; j++)
  612. usb_unpoison_urb(q->entry[j].urb);
  613. err = mt76u_submit_rx_buffers(dev, i);
  614. if (err < 0)
  615. return err;
  616. }
  617. mt76_worker_enable(&dev->usb.rx_worker);
  618. return 0;
  619. }
  620. EXPORT_SYMBOL_GPL(mt76u_resume_rx);
  621. static void mt76u_status_worker(struct mt76_worker *w)
  622. {
  623. struct mt76_usb *usb = container_of(w, struct mt76_usb, status_worker);
  624. struct mt76_dev *dev = container_of(usb, struct mt76_dev, usb);
  625. struct mt76_queue_entry entry;
  626. struct mt76_queue *q;
  627. int i;
  628. if (!test_bit(MT76_STATE_RUNNING, &dev->phy.state))
  629. return;
  630. for (i = 0; i < IEEE80211_NUM_ACS; i++) {
  631. q = dev->phy.q_tx[i];
  632. if (!q)
  633. continue;
  634. while (q->queued > 0) {
  635. if (!q->entry[q->tail].done)
  636. break;
  637. entry = q->entry[q->tail];
  638. q->entry[q->tail].done = false;
  639. mt76_queue_tx_complete(dev, q, &entry);
  640. }
  641. if (!q->queued)
  642. wake_up(&dev->tx_wait);
  643. mt76_worker_schedule(&dev->tx_worker);
  644. }
  645. if (dev->drv->tx_status_data &&
  646. !test_and_set_bit(MT76_READING_STATS, &dev->phy.state))
  647. queue_work(dev->wq, &dev->usb.stat_work);
  648. }
  649. static void mt76u_tx_status_data(struct work_struct *work)
  650. {
  651. struct mt76_usb *usb;
  652. struct mt76_dev *dev;
  653. u8 update = 1;
  654. u16 count = 0;
  655. usb = container_of(work, struct mt76_usb, stat_work);
  656. dev = container_of(usb, struct mt76_dev, usb);
  657. while (true) {
  658. if (test_bit(MT76_REMOVED, &dev->phy.state))
  659. break;
  660. if (!dev->drv->tx_status_data(dev, &update))
  661. break;
  662. count++;
  663. }
  664. if (count && test_bit(MT76_STATE_RUNNING, &dev->phy.state))
  665. queue_work(dev->wq, &usb->stat_work);
  666. else
  667. clear_bit(MT76_READING_STATS, &dev->phy.state);
  668. }
  669. static void mt76u_complete_tx(struct urb *urb)
  670. {
  671. struct mt76_dev *dev = dev_get_drvdata(&urb->dev->dev);
  672. struct mt76_queue_entry *e = urb->context;
  673. if (mt76u_urb_error(urb))
  674. dev_err(dev->dev, "tx urb failed: %d\n", urb->status);
  675. e->done = true;
  676. mt76_worker_schedule(&dev->usb.status_worker);
  677. }
  678. static int
  679. mt76u_tx_setup_buffers(struct mt76_dev *dev, struct sk_buff *skb,
  680. struct urb *urb)
  681. {
  682. urb->transfer_buffer_length = skb->len;
  683. if (!dev->usb.sg_en) {
  684. urb->transfer_buffer = skb->data;
  685. return 0;
  686. }
  687. sg_init_table(urb->sg, MT_TX_SG_MAX_SIZE);
  688. urb->num_sgs = skb_to_sgvec(skb, urb->sg, 0, skb->len);
  689. if (!urb->num_sgs)
  690. return -ENOMEM;
  691. return urb->num_sgs;
  692. }
  693. static int
  694. mt76u_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q,
  695. enum mt76_txq_id qid, struct sk_buff *skb,
  696. struct mt76_wcid *wcid, struct ieee80211_sta *sta)
  697. {
  698. struct mt76_tx_info tx_info = {
  699. .skb = skb,
  700. };
  701. u16 idx = q->head;
  702. int err;
  703. if (q->queued == q->ndesc)
  704. return -ENOSPC;
  705. skb->prev = skb->next = NULL;
  706. err = dev->drv->tx_prepare_skb(dev, NULL, qid, wcid, sta, &tx_info);
  707. if (err < 0)
  708. return err;
  709. err = mt76u_tx_setup_buffers(dev, tx_info.skb, q->entry[idx].urb);
  710. if (err < 0)
  711. return err;
  712. mt76u_fill_bulk_urb(dev, USB_DIR_OUT, q2ep(q->hw_idx),
  713. q->entry[idx].urb, mt76u_complete_tx,
  714. &q->entry[idx]);
  715. q->head = (q->head + 1) % q->ndesc;
  716. q->entry[idx].skb = tx_info.skb;
  717. q->entry[idx].wcid = 0xffff;
  718. q->queued++;
  719. return idx;
  720. }
  721. static void mt76u_tx_kick(struct mt76_dev *dev, struct mt76_queue *q)
  722. {
  723. struct urb *urb;
  724. int err;
  725. while (q->first != q->head) {
  726. urb = q->entry[q->first].urb;
  727. trace_submit_urb(dev, urb);
  728. err = usb_submit_urb(urb, GFP_ATOMIC);
  729. if (err < 0) {
  730. if (err == -ENODEV)
  731. set_bit(MT76_REMOVED, &dev->phy.state);
  732. else
  733. dev_err(dev->dev, "tx urb submit failed:%d\n",
  734. err);
  735. break;
  736. }
  737. q->first = (q->first + 1) % q->ndesc;
  738. }
  739. }
  740. static u8 mt76u_ac_to_hwq(struct mt76_dev *dev, u8 ac)
  741. {
  742. if (mt76_chip(dev) == 0x7663) {
  743. static const u8 lmac_queue_map[] = {
  744. /* ac to lmac mapping */
  745. [IEEE80211_AC_BK] = 0,
  746. [IEEE80211_AC_BE] = 1,
  747. [IEEE80211_AC_VI] = 2,
  748. [IEEE80211_AC_VO] = 4,
  749. };
  750. if (WARN_ON(ac >= ARRAY_SIZE(lmac_queue_map)))
  751. return 1; /* BE */
  752. return lmac_queue_map[ac];
  753. }
  754. return mt76_ac_to_hwq(ac);
  755. }
  756. static int mt76u_alloc_tx(struct mt76_dev *dev)
  757. {
  758. struct mt76_queue *q;
  759. int i, j, err;
  760. for (i = 0; i <= MT_TXQ_PSD; i++) {
  761. if (i >= IEEE80211_NUM_ACS) {
  762. dev->phy.q_tx[i] = dev->phy.q_tx[0];
  763. continue;
  764. }
  765. q = devm_kzalloc(dev->dev, sizeof(*q), GFP_KERNEL);
  766. if (!q)
  767. return -ENOMEM;
  768. spin_lock_init(&q->lock);
  769. q->hw_idx = mt76u_ac_to_hwq(dev, i);
  770. dev->phy.q_tx[i] = q;
  771. q->entry = devm_kcalloc(dev->dev,
  772. MT_NUM_TX_ENTRIES, sizeof(*q->entry),
  773. GFP_KERNEL);
  774. if (!q->entry)
  775. return -ENOMEM;
  776. q->ndesc = MT_NUM_TX_ENTRIES;
  777. for (j = 0; j < q->ndesc; j++) {
  778. err = mt76u_urb_alloc(dev, &q->entry[j],
  779. MT_TX_SG_MAX_SIZE);
  780. if (err < 0)
  781. return err;
  782. }
  783. }
  784. return 0;
  785. }
  786. static void mt76u_free_tx(struct mt76_dev *dev)
  787. {
  788. int i;
  789. mt76_worker_teardown(&dev->usb.status_worker);
  790. for (i = 0; i < IEEE80211_NUM_ACS; i++) {
  791. struct mt76_queue *q;
  792. int j;
  793. q = dev->phy.q_tx[i];
  794. if (!q)
  795. continue;
  796. for (j = 0; j < q->ndesc; j++) {
  797. usb_free_urb(q->entry[j].urb);
  798. q->entry[j].urb = NULL;
  799. }
  800. }
  801. }
  802. void mt76u_stop_tx(struct mt76_dev *dev)
  803. {
  804. int ret;
  805. mt76_worker_disable(&dev->usb.status_worker);
  806. ret = wait_event_timeout(dev->tx_wait, !mt76_has_tx_pending(&dev->phy),
  807. HZ / 5);
  808. if (!ret) {
  809. struct mt76_queue_entry entry;
  810. struct mt76_queue *q;
  811. int i, j;
  812. dev_err(dev->dev, "timed out waiting for pending tx\n");
  813. for (i = 0; i < IEEE80211_NUM_ACS; i++) {
  814. q = dev->phy.q_tx[i];
  815. if (!q)
  816. continue;
  817. for (j = 0; j < q->ndesc; j++)
  818. usb_kill_urb(q->entry[j].urb);
  819. }
  820. mt76_worker_disable(&dev->tx_worker);
  821. /* On device removal we maight queue skb's, but mt76u_tx_kick()
  822. * will fail to submit urb, cleanup those skb's manually.
  823. */
  824. for (i = 0; i < IEEE80211_NUM_ACS; i++) {
  825. q = dev->phy.q_tx[i];
  826. if (!q)
  827. continue;
  828. while (q->queued > 0) {
  829. entry = q->entry[q->tail];
  830. q->entry[q->tail].done = false;
  831. mt76_queue_tx_complete(dev, q, &entry);
  832. }
  833. }
  834. mt76_worker_enable(&dev->tx_worker);
  835. }
  836. cancel_work_sync(&dev->usb.stat_work);
  837. clear_bit(MT76_READING_STATS, &dev->phy.state);
  838. mt76_worker_enable(&dev->usb.status_worker);
  839. mt76_tx_status_check(dev, true);
  840. }
  841. EXPORT_SYMBOL_GPL(mt76u_stop_tx);
  842. void mt76u_queues_deinit(struct mt76_dev *dev)
  843. {
  844. mt76u_stop_rx(dev);
  845. mt76u_stop_tx(dev);
  846. mt76u_free_rx(dev);
  847. mt76u_free_tx(dev);
  848. }
  849. EXPORT_SYMBOL_GPL(mt76u_queues_deinit);
  850. int mt76u_alloc_queues(struct mt76_dev *dev)
  851. {
  852. int err;
  853. err = mt76u_alloc_rx_queue(dev, MT_RXQ_MAIN);
  854. if (err < 0)
  855. return err;
  856. return mt76u_alloc_tx(dev);
  857. }
  858. EXPORT_SYMBOL_GPL(mt76u_alloc_queues);
  859. static const struct mt76_queue_ops usb_queue_ops = {
  860. .tx_queue_skb = mt76u_tx_queue_skb,
  861. .kick = mt76u_tx_kick,
  862. };
  863. int __mt76u_init(struct mt76_dev *dev, struct usb_interface *intf,
  864. struct mt76_bus_ops *ops)
  865. {
  866. struct usb_device *udev = interface_to_usbdev(intf);
  867. struct mt76_usb *usb = &dev->usb;
  868. int err;
  869. INIT_WORK(&usb->stat_work, mt76u_tx_status_data);
  870. usb->data_len = usb_maxpacket(udev, usb_sndctrlpipe(udev, 0));
  871. if (usb->data_len < 32)
  872. usb->data_len = 32;
  873. usb->data = devm_kmalloc(dev->dev, usb->data_len, GFP_KERNEL);
  874. if (!usb->data)
  875. return -ENOMEM;
  876. mutex_init(&usb->usb_ctrl_mtx);
  877. dev->bus = ops;
  878. dev->queue_ops = &usb_queue_ops;
  879. dev_set_drvdata(&udev->dev, dev);
  880. usb->sg_en = mt76u_check_sg(dev);
  881. err = mt76u_set_endpoints(intf, usb);
  882. if (err < 0)
  883. return err;
  884. err = mt76_worker_setup(dev->hw, &usb->rx_worker, mt76u_rx_worker,
  885. "usb-rx");
  886. if (err)
  887. return err;
  888. err = mt76_worker_setup(dev->hw, &usb->status_worker,
  889. mt76u_status_worker, "usb-status");
  890. if (err)
  891. return err;
  892. sched_set_fifo_low(usb->rx_worker.task);
  893. sched_set_fifo_low(usb->status_worker.task);
  894. return 0;
  895. }
  896. EXPORT_SYMBOL_GPL(__mt76u_init);
  897. int mt76u_init(struct mt76_dev *dev, struct usb_interface *intf)
  898. {
  899. static struct mt76_bus_ops bus_ops = {
  900. .rr = mt76u_rr,
  901. .wr = mt76u_wr,
  902. .rmw = mt76u_rmw,
  903. .read_copy = mt76u_read_copy,
  904. .write_copy = mt76u_copy,
  905. .wr_rp = mt76u_wr_rp,
  906. .rd_rp = mt76u_rd_rp,
  907. .type = MT76_BUS_USB,
  908. };
  909. return __mt76u_init(dev, intf, &bus_ops);
  910. }
  911. EXPORT_SYMBOL_GPL(mt76u_init);
  912. MODULE_AUTHOR("Lorenzo Bianconi <[email protected]>");
  913. MODULE_LICENSE("Dual BSD/GPL");