xhci-sec.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * xHCI secondary ring APIs
  4. *
  5. * Copyright (c) 2019,2021 The Linux Foundation. All rights reserved.
  6. * Copyright (C) 2008 Intel Corp.
  7. * Copyright (c) 2022, 2024 Qualcomm Innovation Center, Inc. All rights reserved.
  8. */
  9. #include <linux/iopoll.h>
  10. #include <linux/module.h>
  11. #include <linux/moduleparam.h>
  12. #include <linux/slab.h>
  13. #include <linux/dma-mapping.h>
  14. #include <linux/dmapool.h>
  15. #include "xhci.h"
  16. struct xhci_sec {
  17. struct xhci_ring *event_ring;
  18. struct xhci_erst erst;
  19. /* secondary interrupter */
  20. struct xhci_intr_reg __iomem *ir_set;
  21. struct xhci_hcd *xhci;
  22. int intr_num;
  23. struct list_head list;
  24. };
  25. static LIST_HEAD(xhci_sec);
  26. /* simplified redefinition from XHCI */
  27. #define hcd_to_xhci(h) \
  28. ((struct xhci_hcd *)(((h)->primary_hcd ?: (h))->hcd_priv))
  29. static int xhci_event_ring_setup(struct xhci_hcd *xhci, struct xhci_ring **er,
  30. struct xhci_intr_reg __iomem *ir_set, struct xhci_erst *erst,
  31. unsigned int intr_num, gfp_t flags)
  32. {
  33. dma_addr_t deq;
  34. u64 val_64;
  35. unsigned int val;
  36. int ret;
  37. *er = xhci_ring_alloc(xhci, ERST_NUM_SEGS, 1, TYPE_EVENT, 0, flags);
  38. if (!*er)
  39. return -ENOMEM;
  40. ret = xhci_alloc_erst(xhci, *er, erst, flags);
  41. if (ret)
  42. return ret;
  43. xhci_dbg(xhci, "intr# %d: num segs = %i, virt addr = %pK, dma addr = 0x%llx",
  44. intr_num,
  45. erst->num_entries,
  46. erst->entries,
  47. (unsigned long long)erst->erst_dma_addr);
  48. /* set ERST count with the number of entries in the segment table */
  49. val = readl_relaxed(&ir_set->erst_size);
  50. val &= ERST_SIZE_MASK;
  51. val |= ERST_NUM_SEGS;
  52. xhci_dbg(xhci, "Write ERST size = %i to ir_set %d (some bits preserved)",
  53. val, intr_num);
  54. writel_relaxed(val, &ir_set->erst_size);
  55. xhci_dbg(xhci, "intr# %d: Set ERST entries to point to event ring.",
  56. intr_num);
  57. /* set the segment table base address */
  58. xhci_dbg(xhci, "Set ERST base address for ir_set %d = 0x%llx",
  59. intr_num,
  60. (unsigned long long)erst->erst_dma_addr);
  61. val_64 = xhci_read_64(xhci, &ir_set->erst_base);
  62. val_64 &= ERST_PTR_MASK;
  63. val_64 |= (erst->erst_dma_addr & (u64) ~ERST_PTR_MASK);
  64. xhci_write_64(xhci, val_64, &ir_set->erst_base);
  65. /* Set the event ring dequeue address */
  66. deq = xhci_trb_virt_to_dma((*er)->deq_seg, (*er)->dequeue);
  67. if (deq == 0 && !in_interrupt())
  68. xhci_warn(xhci,
  69. "intr# %d:WARN something wrong with SW event ring deq ptr.\n",
  70. intr_num);
  71. /* Update HC event ring dequeue pointer */
  72. val_64 = xhci_read_64(xhci, &ir_set->erst_dequeue);
  73. val_64 &= ERST_PTR_MASK;
  74. /* Don't clear the EHB bit (which is RW1C) because
  75. * there might be more events to service.
  76. */
  77. val_64 &= ~ERST_EHB;
  78. xhci_dbg(xhci, "intr# %d:Write event ring dequeue pointer, preserving EHB bit",
  79. intr_num);
  80. xhci_write_64(xhci, ((u64) deq & (u64) ~ERST_PTR_MASK) | val_64,
  81. &ir_set->erst_dequeue);
  82. xhci_dbg(xhci, "Wrote ERST address to ir_set %d.", intr_num);
  83. return 0;
  84. }
  85. struct xhci_ring *xhci_sec_event_ring_setup(struct usb_device *udev, unsigned int intr_num)
  86. {
  87. int ret;
  88. struct usb_hcd *hcd = bus_to_hcd(udev->bus);
  89. struct xhci_hcd *xhci = hcd_to_xhci(hcd);
  90. struct xhci_sec *sec;
  91. if (udev->state == USB_STATE_NOTATTACHED || !HCD_RH_RUNNING(hcd))
  92. return ERR_PTR(-ENODEV);
  93. if (!xhci->max_interrupters)
  94. xhci->max_interrupters = HCS_MAX_INTRS(xhci->hcs_params1);
  95. if ((xhci->xhc_state & XHCI_STATE_HALTED) ||
  96. intr_num >= xhci->max_interrupters) {
  97. xhci_err(xhci, "%s:state %x intr# %d\n", __func__,
  98. xhci->xhc_state, intr_num);
  99. return ERR_PTR(-EINVAL);
  100. }
  101. list_for_each_entry(sec, &xhci_sec, list) {
  102. if (sec->xhci == xhci && sec->intr_num == intr_num)
  103. goto done;
  104. }
  105. sec = kzalloc(sizeof(*sec), GFP_KERNEL);
  106. if (!sec)
  107. return ERR_PTR(-ENOMEM);
  108. sec->intr_num = intr_num;
  109. sec->xhci = xhci;
  110. sec->ir_set = &xhci->run_regs->ir_set[intr_num];
  111. ret = xhci_event_ring_setup(xhci, &sec->event_ring, sec->ir_set,
  112. &sec->erst, intr_num, GFP_KERNEL);
  113. if (ret) {
  114. xhci_err(xhci, "sec event ring setup failed inter#%d\n",
  115. intr_num);
  116. kfree(sec);
  117. return ERR_PTR(ret);
  118. }
  119. list_add_tail(&sec->list, &xhci_sec);
  120. done:
  121. return sec->event_ring;
  122. }
  123. static void xhci_handle_sec_intr_events(struct xhci_hcd *xhci,
  124. struct xhci_ring *ring, struct xhci_intr_reg __iomem *ir_set)
  125. {
  126. union xhci_trb *erdp_trb, *current_trb;
  127. struct xhci_segment *seg;
  128. u64 erdp_reg;
  129. u32 iman_reg;
  130. dma_addr_t deq;
  131. unsigned long segment_offset;
  132. /* disable irq, ack pending interrupt and ack all pending events */
  133. iman_reg = readl_relaxed(&ir_set->irq_pending);
  134. iman_reg &= ~IMAN_IE;
  135. writel_relaxed(iman_reg, &ir_set->irq_pending);
  136. iman_reg = readl_relaxed(&ir_set->irq_pending);
  137. if (iman_reg & IMAN_IP)
  138. writel_relaxed(iman_reg, &ir_set->irq_pending);
  139. /* last acked event trb is in erdp reg */
  140. erdp_reg = xhci_read_64(xhci, &ir_set->erst_dequeue);
  141. deq = (dma_addr_t)(erdp_reg & ~ERST_PTR_MASK);
  142. if (!deq) {
  143. pr_debug("%s: event ring handling not required\n", __func__);
  144. return;
  145. }
  146. seg = ring->first_seg;
  147. segment_offset = deq - seg->dma;
  148. /* find out virtual address of the last acked event trb */
  149. erdp_trb = current_trb = &seg->trbs[0] +
  150. (segment_offset/sizeof(*current_trb));
  151. /* read cycle state of the last acked trb to find out CCS */
  152. ring->cycle_state = le32_to_cpu(current_trb->event_cmd.flags) & TRB_CYCLE;
  153. while (1) {
  154. /* last trb of the event ring: toggle cycle state */
  155. if (current_trb == &seg->trbs[TRBS_PER_SEGMENT - 1]) {
  156. ring->cycle_state ^= 1;
  157. current_trb = &seg->trbs[0];
  158. } else {
  159. current_trb++;
  160. }
  161. /* cycle state transition */
  162. if ((le32_to_cpu(current_trb->event_cmd.flags) & TRB_CYCLE) !=
  163. ring->cycle_state)
  164. break;
  165. }
  166. if (erdp_trb != current_trb) {
  167. deq = xhci_trb_virt_to_dma(ring->deq_seg, current_trb);
  168. if (deq == 0)
  169. xhci_warn(xhci,
  170. "WARN invalid SW event ring dequeue ptr.\n");
  171. /* Update HC event ring dequeue pointer */
  172. erdp_reg &= ERST_PTR_MASK;
  173. erdp_reg |= ((u64) deq & (u64) ~ERST_PTR_MASK);
  174. }
  175. /* Clear the event handler busy flag (RW1C); event ring is empty. */
  176. erdp_reg |= ERST_EHB;
  177. xhci_write_64(xhci, erdp_reg, &ir_set->erst_dequeue);
  178. }
  179. static int sec_event_ring_cleanup(struct xhci_hcd *xhci, struct xhci_ring *ring,
  180. struct xhci_intr_reg __iomem *ir_set, struct xhci_erst *erst)
  181. {
  182. int size;
  183. struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
  184. if (!HCD_RH_RUNNING(xhci_to_hcd(xhci)))
  185. return 0;
  186. size = sizeof(struct xhci_erst_entry)*(erst->num_entries);
  187. if (erst->entries) {
  188. xhci_handle_sec_intr_events(xhci, ring, ir_set);
  189. dma_free_coherent(dev, size, erst->entries,
  190. erst->erst_dma_addr);
  191. erst->entries = NULL;
  192. }
  193. xhci_ring_free(xhci, ring);
  194. xhci_dbg(xhci, "Freed sec event ring");
  195. return 0;
  196. }
  197. int xhci_sec_event_ring_cleanup(struct usb_device *udev, struct xhci_ring *ring)
  198. {
  199. struct usb_hcd *hcd = bus_to_hcd(udev->bus);
  200. struct xhci_hcd *xhci = hcd_to_xhci(hcd);
  201. struct xhci_sec *sec;
  202. unsigned long flags;
  203. spin_lock_irqsave(&xhci->lock, flags);
  204. list_for_each_entry(sec, &xhci_sec, list) {
  205. if (sec->event_ring == ring) {
  206. sec_event_ring_cleanup(xhci, ring, sec->ir_set,
  207. &sec->erst);
  208. list_del(&sec->list);
  209. kfree(sec);
  210. spin_unlock_irqrestore(&xhci->lock, flags);
  211. return 0;
  212. }
  213. }
  214. spin_unlock_irqrestore(&xhci->lock, flags);
  215. return 0;
  216. }
  217. phys_addr_t xhci_get_sec_event_ring_phys_addr(struct usb_device *udev,
  218. struct xhci_ring *ring, dma_addr_t *dma)
  219. {
  220. struct usb_hcd *hcd = bus_to_hcd(udev->bus);
  221. struct xhci_hcd *xhci = hcd_to_xhci(hcd);
  222. struct device *dev = hcd->self.sysdev;
  223. struct sg_table sgt;
  224. phys_addr_t pa;
  225. struct xhci_sec *sec;
  226. if (udev->state == USB_STATE_NOTATTACHED || !HCD_RH_RUNNING(hcd) ||
  227. (xhci->xhc_state & XHCI_STATE_HALTED))
  228. return 0;
  229. list_for_each_entry(sec, &xhci_sec, list) {
  230. if (sec->event_ring == ring) {
  231. dma_get_sgtable(dev, &sgt, ring->first_seg->trbs,
  232. ring->first_seg->dma, TRB_SEGMENT_SIZE);
  233. *dma = ring->first_seg->dma;
  234. pa = page_to_phys(sg_page(sgt.sgl));
  235. sg_free_table(&sgt);
  236. return pa;
  237. }
  238. }
  239. return 0;
  240. }
  241. /* Returns 1 if the arguments are OK;
  242. * returns 0 this is a root hub; returns -EINVAL for NULL pointers.
  243. */
  244. static int xhci_check_args(struct usb_hcd *hcd, struct usb_device *udev,
  245. struct usb_host_endpoint *ep, int check_ep, bool check_virt_dev,
  246. const char *func)
  247. {
  248. struct xhci_hcd *xhci;
  249. struct xhci_virt_device *virt_dev;
  250. if (!hcd || (check_ep && !ep) || !udev) {
  251. pr_debug("xHCI %s called with invalid args\n", func);
  252. return -EINVAL;
  253. }
  254. if (!udev->parent) {
  255. pr_debug("xHCI %s called for root hub\n", func);
  256. return 0;
  257. }
  258. xhci = hcd_to_xhci(hcd);
  259. if (check_virt_dev) {
  260. if (!udev->slot_id || !xhci->devs[udev->slot_id]) {
  261. xhci_dbg(xhci, "xHCI %s called with unaddressed device\n",
  262. func);
  263. return -EINVAL;
  264. }
  265. virt_dev = xhci->devs[udev->slot_id];
  266. if (virt_dev->udev != udev) {
  267. xhci_dbg(xhci, "xHCI %s called with udev and virt_dev does not match\n",
  268. func);
  269. return -EINVAL;
  270. }
  271. }
  272. if (xhci->xhc_state & XHCI_STATE_HALTED)
  273. return -ENODEV;
  274. return 1;
  275. }
  276. phys_addr_t xhci_get_xfer_ring_phys_addr(struct usb_device *udev,
  277. struct usb_host_endpoint *ep, dma_addr_t *dma)
  278. {
  279. int ret;
  280. unsigned int ep_index;
  281. struct xhci_virt_device *virt_dev;
  282. struct usb_hcd *hcd = bus_to_hcd(udev->bus);
  283. struct device *dev = hcd->self.sysdev;
  284. struct xhci_hcd *xhci = hcd_to_xhci(hcd);
  285. struct sg_table sgt;
  286. phys_addr_t pa;
  287. if (udev->state == USB_STATE_NOTATTACHED || !HCD_RH_RUNNING(hcd))
  288. return 0;
  289. ret = xhci_check_args(hcd, udev, ep, 1, true, __func__);
  290. if (ret <= 0) {
  291. xhci_err(xhci, "%s: invalid args\n", __func__);
  292. return 0;
  293. }
  294. virt_dev = xhci->devs[udev->slot_id];
  295. ep_index = xhci_get_endpoint_index(&ep->desc);
  296. if (virt_dev->eps[ep_index].ring &&
  297. virt_dev->eps[ep_index].ring->first_seg) {
  298. dma_get_sgtable(dev, &sgt,
  299. virt_dev->eps[ep_index].ring->first_seg->trbs,
  300. virt_dev->eps[ep_index].ring->first_seg->dma,
  301. TRB_SEGMENT_SIZE);
  302. *dma = virt_dev->eps[ep_index].ring->first_seg->dma;
  303. pa = page_to_phys(sg_page(sgt.sgl));
  304. sg_free_table(&sgt);
  305. return pa;
  306. }
  307. return 0;
  308. }
  309. /* Ring the host controller doorbell after placing a command on the ring */
  310. int xhci_stop_endpoint(struct usb_device *udev, struct usb_host_endpoint *ep)
  311. {
  312. struct usb_hcd *hcd = bus_to_hcd(udev->bus);
  313. struct xhci_hcd *xhci = hcd_to_xhci(hcd);
  314. unsigned int ep_index;
  315. struct xhci_virt_device *virt_dev;
  316. struct xhci_command *cmd;
  317. unsigned long flags;
  318. int ret = 0;
  319. ret = xhci_check_args(hcd, udev, ep, 1, true, __func__);
  320. if (ret <= 0)
  321. return ret;
  322. cmd = xhci_alloc_command(xhci, true, GFP_NOIO);
  323. if (!cmd)
  324. return -ENOMEM;
  325. spin_lock_irqsave(&xhci->lock, flags);
  326. virt_dev = xhci->devs[udev->slot_id];
  327. if (!virt_dev) {
  328. ret = -ENODEV;
  329. goto err;
  330. }
  331. ep_index = xhci_get_endpoint_index(&ep->desc);
  332. if (virt_dev->eps[ep_index].ring &&
  333. virt_dev->eps[ep_index].ring->dequeue) {
  334. ret = xhci_queue_stop_endpoint(xhci, cmd, udev->slot_id,
  335. ep_index, 0);
  336. if (ret)
  337. goto err;
  338. xhci_ring_cmd_db(xhci);
  339. spin_unlock_irqrestore(&xhci->lock, flags);
  340. /* Wait for stop endpoint command to finish */
  341. wait_for_completion(cmd->completion);
  342. if (cmd->status == COMP_COMMAND_ABORTED ||
  343. cmd->status == COMP_STOPPED) {
  344. xhci_warn(xhci,
  345. "stop endpoint command timeout for ep%d%s\n",
  346. usb_endpoint_num(&ep->desc),
  347. usb_endpoint_dir_in(&ep->desc) ? "in" : "out");
  348. ret = -ETIME;
  349. }
  350. goto free_cmd;
  351. }
  352. err:
  353. spin_unlock_irqrestore(&xhci->lock, flags);
  354. free_cmd:
  355. xhci_free_command(xhci, cmd);
  356. return ret;
  357. }