cdnsp-gadget.c 50 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Cadence CDNSP DRD Driver.
  4. *
  5. * Copyright (C) 2020 Cadence.
  6. *
  7. * Author: Pawel Laszczak <[email protected]>
  8. *
  9. */
  10. #include <linux/moduleparam.h>
  11. #include <linux/dma-mapping.h>
  12. #include <linux/module.h>
  13. #include <linux/iopoll.h>
  14. #include <linux/delay.h>
  15. #include <linux/log2.h>
  16. #include <linux/slab.h>
  17. #include <linux/pci.h>
  18. #include <linux/irq.h>
  19. #include <linux/dmi.h>
  20. #include "core.h"
  21. #include "gadget-export.h"
  22. #include "drd.h"
  23. #include "cdnsp-gadget.h"
  24. #include "cdnsp-trace.h"
  25. unsigned int cdnsp_port_speed(unsigned int port_status)
  26. {
  27. /*Detect gadget speed based on PORTSC register*/
  28. if (DEV_SUPERSPEEDPLUS(port_status))
  29. return USB_SPEED_SUPER_PLUS;
  30. else if (DEV_SUPERSPEED(port_status))
  31. return USB_SPEED_SUPER;
  32. else if (DEV_HIGHSPEED(port_status))
  33. return USB_SPEED_HIGH;
  34. else if (DEV_FULLSPEED(port_status))
  35. return USB_SPEED_FULL;
  36. /* If device is detached then speed will be USB_SPEED_UNKNOWN.*/
  37. return USB_SPEED_UNKNOWN;
  38. }
  39. /*
  40. * Given a port state, this function returns a value that would result in the
  41. * port being in the same state, if the value was written to the port status
  42. * control register.
  43. * Save Read Only (RO) bits and save read/write bits where
  44. * writing a 0 clears the bit and writing a 1 sets the bit (RWS).
  45. * For all other types (RW1S, RW1CS, RW, and RZ), writing a '0' has no effect.
  46. */
  47. u32 cdnsp_port_state_to_neutral(u32 state)
  48. {
  49. /* Save read-only status and port state. */
  50. return (state & CDNSP_PORT_RO) | (state & CDNSP_PORT_RWS);
  51. }
  52. /**
  53. * cdnsp_find_next_ext_cap - Find the offset of the extended capabilities
  54. * with capability ID id.
  55. * @base: PCI MMIO registers base address.
  56. * @start: Address at which to start looking, (0 or HCC_PARAMS to start at
  57. * beginning of list)
  58. * @id: Extended capability ID to search for.
  59. *
  60. * Returns the offset of the next matching extended capability structure.
  61. * Some capabilities can occur several times,
  62. * e.g., the EXT_CAPS_PROTOCOL, and this provides a way to find them all.
  63. */
  64. int cdnsp_find_next_ext_cap(void __iomem *base, u32 start, int id)
  65. {
  66. u32 offset = start;
  67. u32 next;
  68. u32 val;
  69. if (!start || start == HCC_PARAMS_OFFSET) {
  70. val = readl(base + HCC_PARAMS_OFFSET);
  71. if (val == ~0)
  72. return 0;
  73. offset = HCC_EXT_CAPS(val) << 2;
  74. if (!offset)
  75. return 0;
  76. }
  77. do {
  78. val = readl(base + offset);
  79. if (val == ~0)
  80. return 0;
  81. if (EXT_CAPS_ID(val) == id && offset != start)
  82. return offset;
  83. next = EXT_CAPS_NEXT(val);
  84. offset += next << 2;
  85. } while (next);
  86. return 0;
  87. }
  88. void cdnsp_set_link_state(struct cdnsp_device *pdev,
  89. __le32 __iomem *port_regs,
  90. u32 link_state)
  91. {
  92. int port_num = 0xFF;
  93. u32 temp;
  94. temp = readl(port_regs);
  95. temp = cdnsp_port_state_to_neutral(temp);
  96. temp |= PORT_WKCONN_E | PORT_WKDISC_E;
  97. writel(temp, port_regs);
  98. temp &= ~PORT_PLS_MASK;
  99. temp |= PORT_LINK_STROBE | link_state;
  100. if (pdev->active_port)
  101. port_num = pdev->active_port->port_num;
  102. trace_cdnsp_handle_port_status(port_num, readl(port_regs));
  103. writel(temp, port_regs);
  104. trace_cdnsp_link_state_changed(port_num, readl(port_regs));
  105. }
  106. static void cdnsp_disable_port(struct cdnsp_device *pdev,
  107. __le32 __iomem *port_regs)
  108. {
  109. u32 temp = cdnsp_port_state_to_neutral(readl(port_regs));
  110. writel(temp | PORT_PED, port_regs);
  111. }
  112. static void cdnsp_clear_port_change_bit(struct cdnsp_device *pdev,
  113. __le32 __iomem *port_regs)
  114. {
  115. u32 portsc = readl(port_regs);
  116. writel(cdnsp_port_state_to_neutral(portsc) |
  117. (portsc & PORT_CHANGE_BITS), port_regs);
  118. }
  119. static void cdnsp_set_chicken_bits_2(struct cdnsp_device *pdev, u32 bit)
  120. {
  121. __le32 __iomem *reg;
  122. void __iomem *base;
  123. u32 offset = 0;
  124. base = &pdev->cap_regs->hc_capbase;
  125. offset = cdnsp_find_next_ext_cap(base, offset, D_XEC_PRE_REGS_CAP);
  126. reg = base + offset + REG_CHICKEN_BITS_2_OFFSET;
  127. bit = readl(reg) | bit;
  128. writel(bit, reg);
  129. }
  130. static void cdnsp_clear_chicken_bits_2(struct cdnsp_device *pdev, u32 bit)
  131. {
  132. __le32 __iomem *reg;
  133. void __iomem *base;
  134. u32 offset = 0;
  135. base = &pdev->cap_regs->hc_capbase;
  136. offset = cdnsp_find_next_ext_cap(base, offset, D_XEC_PRE_REGS_CAP);
  137. reg = base + offset + REG_CHICKEN_BITS_2_OFFSET;
  138. bit = readl(reg) & ~bit;
  139. writel(bit, reg);
  140. }
  141. /*
  142. * Disable interrupts and begin the controller halting process.
  143. */
  144. static void cdnsp_quiesce(struct cdnsp_device *pdev)
  145. {
  146. u32 halted;
  147. u32 mask;
  148. u32 cmd;
  149. mask = ~(u32)(CDNSP_IRQS);
  150. halted = readl(&pdev->op_regs->status) & STS_HALT;
  151. if (!halted)
  152. mask &= ~(CMD_R_S | CMD_DEVEN);
  153. cmd = readl(&pdev->op_regs->command);
  154. cmd &= mask;
  155. writel(cmd, &pdev->op_regs->command);
  156. }
  157. /*
  158. * Force controller into halt state.
  159. *
  160. * Disable any IRQs and clear the run/stop bit.
  161. * Controller will complete any current and actively pipelined transactions, and
  162. * should halt within 16 ms of the run/stop bit being cleared.
  163. * Read controller Halted bit in the status register to see when the
  164. * controller is finished.
  165. */
  166. int cdnsp_halt(struct cdnsp_device *pdev)
  167. {
  168. int ret;
  169. u32 val;
  170. cdnsp_quiesce(pdev);
  171. ret = readl_poll_timeout_atomic(&pdev->op_regs->status, val,
  172. val & STS_HALT, 1,
  173. CDNSP_MAX_HALT_USEC);
  174. if (ret) {
  175. dev_err(pdev->dev, "ERROR: Device halt failed\n");
  176. return ret;
  177. }
  178. pdev->cdnsp_state |= CDNSP_STATE_HALTED;
  179. return 0;
  180. }
  181. /*
  182. * device controller died, register read returns 0xffffffff, or command never
  183. * ends.
  184. */
  185. void cdnsp_died(struct cdnsp_device *pdev)
  186. {
  187. dev_err(pdev->dev, "ERROR: CDNSP controller not responding\n");
  188. pdev->cdnsp_state |= CDNSP_STATE_DYING;
  189. cdnsp_halt(pdev);
  190. }
  191. /*
  192. * Set the run bit and wait for the device to be running.
  193. */
  194. static int cdnsp_start(struct cdnsp_device *pdev)
  195. {
  196. u32 temp;
  197. int ret;
  198. temp = readl(&pdev->op_regs->command);
  199. temp |= (CMD_R_S | CMD_DEVEN);
  200. writel(temp, &pdev->op_regs->command);
  201. pdev->cdnsp_state = 0;
  202. /*
  203. * Wait for the STS_HALT Status bit to be 0 to indicate the device is
  204. * running.
  205. */
  206. ret = readl_poll_timeout_atomic(&pdev->op_regs->status, temp,
  207. !(temp & STS_HALT), 1,
  208. CDNSP_MAX_HALT_USEC);
  209. if (ret) {
  210. pdev->cdnsp_state = CDNSP_STATE_DYING;
  211. dev_err(pdev->dev, "ERROR: Controller run failed\n");
  212. }
  213. return ret;
  214. }
  215. /*
  216. * Reset a halted controller.
  217. *
  218. * This resets pipelines, timers, counters, state machines, etc.
  219. * Transactions will be terminated immediately, and operational registers
  220. * will be set to their defaults.
  221. */
  222. int cdnsp_reset(struct cdnsp_device *pdev)
  223. {
  224. u32 command;
  225. u32 temp;
  226. int ret;
  227. temp = readl(&pdev->op_regs->status);
  228. if (temp == ~(u32)0) {
  229. dev_err(pdev->dev, "Device not accessible, reset failed.\n");
  230. return -ENODEV;
  231. }
  232. if ((temp & STS_HALT) == 0) {
  233. dev_err(pdev->dev, "Controller not halted, aborting reset.\n");
  234. return -EINVAL;
  235. }
  236. command = readl(&pdev->op_regs->command);
  237. command |= CMD_RESET;
  238. writel(command, &pdev->op_regs->command);
  239. ret = readl_poll_timeout_atomic(&pdev->op_regs->command, temp,
  240. !(temp & CMD_RESET), 1,
  241. 10 * 1000);
  242. if (ret) {
  243. dev_err(pdev->dev, "ERROR: Controller reset failed\n");
  244. return ret;
  245. }
  246. /*
  247. * CDNSP cannot write any doorbells or operational registers other
  248. * than status until the "Controller Not Ready" flag is cleared.
  249. */
  250. ret = readl_poll_timeout_atomic(&pdev->op_regs->status, temp,
  251. !(temp & STS_CNR), 1,
  252. 10 * 1000);
  253. if (ret) {
  254. dev_err(pdev->dev, "ERROR: Controller not ready to work\n");
  255. return ret;
  256. }
  257. dev_dbg(pdev->dev, "Controller ready to work");
  258. return ret;
  259. }
  260. /*
  261. * cdnsp_get_endpoint_index - Find the index for an endpoint given its
  262. * descriptor.Use the return value to right shift 1 for the bitmask.
  263. *
  264. * Index = (epnum * 2) + direction - 1,
  265. * where direction = 0 for OUT, 1 for IN.
  266. * For control endpoints, the IN index is used (OUT index is unused), so
  267. * index = (epnum * 2) + direction - 1 = (epnum * 2) + 1 - 1 = (epnum * 2)
  268. */
  269. static unsigned int
  270. cdnsp_get_endpoint_index(const struct usb_endpoint_descriptor *desc)
  271. {
  272. unsigned int index = (unsigned int)usb_endpoint_num(desc);
  273. if (usb_endpoint_xfer_control(desc))
  274. return index * 2;
  275. return (index * 2) + (usb_endpoint_dir_in(desc) ? 1 : 0) - 1;
  276. }
  277. /*
  278. * Find the flag for this endpoint (for use in the control context). Use the
  279. * endpoint index to create a bitmask. The slot context is bit 0, endpoint 0 is
  280. * bit 1, etc.
  281. */
  282. static unsigned int
  283. cdnsp_get_endpoint_flag(const struct usb_endpoint_descriptor *desc)
  284. {
  285. return 1 << (cdnsp_get_endpoint_index(desc) + 1);
  286. }
  287. int cdnsp_ep_enqueue(struct cdnsp_ep *pep, struct cdnsp_request *preq)
  288. {
  289. struct cdnsp_device *pdev = pep->pdev;
  290. struct usb_request *request;
  291. int ret;
  292. if (preq->epnum == 0 && !list_empty(&pep->pending_list)) {
  293. trace_cdnsp_request_enqueue_busy(preq);
  294. return -EBUSY;
  295. }
  296. request = &preq->request;
  297. request->actual = 0;
  298. request->status = -EINPROGRESS;
  299. preq->direction = pep->direction;
  300. preq->epnum = pep->number;
  301. preq->td.drbl = 0;
  302. ret = usb_gadget_map_request_by_dev(pdev->dev, request, pep->direction);
  303. if (ret) {
  304. trace_cdnsp_request_enqueue_error(preq);
  305. return ret;
  306. }
  307. list_add_tail(&preq->list, &pep->pending_list);
  308. trace_cdnsp_request_enqueue(preq);
  309. switch (usb_endpoint_type(pep->endpoint.desc)) {
  310. case USB_ENDPOINT_XFER_CONTROL:
  311. ret = cdnsp_queue_ctrl_tx(pdev, preq);
  312. break;
  313. case USB_ENDPOINT_XFER_BULK:
  314. case USB_ENDPOINT_XFER_INT:
  315. ret = cdnsp_queue_bulk_tx(pdev, preq);
  316. break;
  317. case USB_ENDPOINT_XFER_ISOC:
  318. ret = cdnsp_queue_isoc_tx_prepare(pdev, preq);
  319. }
  320. if (ret)
  321. goto unmap;
  322. return 0;
  323. unmap:
  324. usb_gadget_unmap_request_by_dev(pdev->dev, &preq->request,
  325. pep->direction);
  326. list_del(&preq->list);
  327. trace_cdnsp_request_enqueue_error(preq);
  328. return ret;
  329. }
  330. /*
  331. * Remove the request's TD from the endpoint ring. This may cause the
  332. * controller to stop USB transfers, potentially stopping in the middle of a
  333. * TRB buffer. The controller should pick up where it left off in the TD,
  334. * unless a Set Transfer Ring Dequeue Pointer is issued.
  335. *
  336. * The TRBs that make up the buffers for the canceled request will be "removed"
  337. * from the ring. Since the ring is a contiguous structure, they can't be
  338. * physically removed. Instead, there are two options:
  339. *
  340. * 1) If the controller is in the middle of processing the request to be
  341. * canceled, we simply move the ring's dequeue pointer past those TRBs
  342. * using the Set Transfer Ring Dequeue Pointer command. This will be
  343. * the common case, when drivers timeout on the last submitted request
  344. * and attempt to cancel.
  345. *
  346. * 2) If the controller is in the middle of a different TD, we turn the TRBs
  347. * into a series of 1-TRB transfer no-op TDs. No-ops shouldn't be chained.
  348. * The controller will need to invalidate the any TRBs it has cached after
  349. * the stop endpoint command.
  350. *
  351. * 3) The TD may have completed by the time the Stop Endpoint Command
  352. * completes, so software needs to handle that case too.
  353. *
  354. */
  355. int cdnsp_ep_dequeue(struct cdnsp_ep *pep, struct cdnsp_request *preq)
  356. {
  357. struct cdnsp_device *pdev = pep->pdev;
  358. int ret_stop = 0;
  359. int ret_rem;
  360. trace_cdnsp_request_dequeue(preq);
  361. if (GET_EP_CTX_STATE(pep->out_ctx) == EP_STATE_RUNNING)
  362. ret_stop = cdnsp_cmd_stop_ep(pdev, pep);
  363. ret_rem = cdnsp_remove_request(pdev, preq, pep);
  364. return ret_rem ? ret_rem : ret_stop;
  365. }
  366. static void cdnsp_zero_in_ctx(struct cdnsp_device *pdev)
  367. {
  368. struct cdnsp_input_control_ctx *ctrl_ctx;
  369. struct cdnsp_slot_ctx *slot_ctx;
  370. struct cdnsp_ep_ctx *ep_ctx;
  371. int i;
  372. ctrl_ctx = cdnsp_get_input_control_ctx(&pdev->in_ctx);
  373. /*
  374. * When a device's add flag and drop flag are zero, any subsequent
  375. * configure endpoint command will leave that endpoint's state
  376. * untouched. Make sure we don't leave any old state in the input
  377. * endpoint contexts.
  378. */
  379. ctrl_ctx->drop_flags = 0;
  380. ctrl_ctx->add_flags = 0;
  381. slot_ctx = cdnsp_get_slot_ctx(&pdev->in_ctx);
  382. slot_ctx->dev_info &= cpu_to_le32(~LAST_CTX_MASK);
  383. /* Endpoint 0 is always valid */
  384. slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(1));
  385. for (i = 1; i < CDNSP_ENDPOINTS_NUM; ++i) {
  386. ep_ctx = cdnsp_get_ep_ctx(&pdev->in_ctx, i);
  387. ep_ctx->ep_info = 0;
  388. ep_ctx->ep_info2 = 0;
  389. ep_ctx->deq = 0;
  390. ep_ctx->tx_info = 0;
  391. }
  392. }
  393. /* Issue a configure endpoint command and wait for it to finish. */
  394. static int cdnsp_configure_endpoint(struct cdnsp_device *pdev)
  395. {
  396. int ret;
  397. cdnsp_queue_configure_endpoint(pdev, pdev->cmd.in_ctx->dma);
  398. cdnsp_ring_cmd_db(pdev);
  399. ret = cdnsp_wait_for_cmd_compl(pdev);
  400. if (ret) {
  401. dev_err(pdev->dev,
  402. "ERR: unexpected command completion code 0x%x.\n", ret);
  403. return -EINVAL;
  404. }
  405. return ret;
  406. }
  407. static void cdnsp_invalidate_ep_events(struct cdnsp_device *pdev,
  408. struct cdnsp_ep *pep)
  409. {
  410. struct cdnsp_segment *segment;
  411. union cdnsp_trb *event;
  412. u32 cycle_state;
  413. u32 data;
  414. event = pdev->event_ring->dequeue;
  415. segment = pdev->event_ring->deq_seg;
  416. cycle_state = pdev->event_ring->cycle_state;
  417. while (1) {
  418. data = le32_to_cpu(event->trans_event.flags);
  419. /* Check the owner of the TRB. */
  420. if ((data & TRB_CYCLE) != cycle_state)
  421. break;
  422. if (TRB_FIELD_TO_TYPE(data) == TRB_TRANSFER &&
  423. TRB_TO_EP_ID(data) == (pep->idx + 1)) {
  424. data |= TRB_EVENT_INVALIDATE;
  425. event->trans_event.flags = cpu_to_le32(data);
  426. }
  427. if (cdnsp_last_trb_on_seg(segment, event)) {
  428. cycle_state ^= 1;
  429. segment = pdev->event_ring->deq_seg->next;
  430. event = segment->trbs;
  431. } else {
  432. event++;
  433. }
  434. }
  435. }
  436. int cdnsp_wait_for_cmd_compl(struct cdnsp_device *pdev)
  437. {
  438. struct cdnsp_segment *event_deq_seg;
  439. union cdnsp_trb *cmd_trb;
  440. dma_addr_t cmd_deq_dma;
  441. union cdnsp_trb *event;
  442. u32 cycle_state;
  443. int ret, val;
  444. u64 cmd_dma;
  445. u32 flags;
  446. cmd_trb = pdev->cmd.command_trb;
  447. pdev->cmd.status = 0;
  448. trace_cdnsp_cmd_wait_for_compl(pdev->cmd_ring, &cmd_trb->generic);
  449. ret = readl_poll_timeout_atomic(&pdev->op_regs->cmd_ring, val,
  450. !CMD_RING_BUSY(val), 1,
  451. CDNSP_CMD_TIMEOUT);
  452. if (ret) {
  453. dev_err(pdev->dev, "ERR: Timeout while waiting for command\n");
  454. trace_cdnsp_cmd_timeout(pdev->cmd_ring, &cmd_trb->generic);
  455. pdev->cdnsp_state = CDNSP_STATE_DYING;
  456. return -ETIMEDOUT;
  457. }
  458. event = pdev->event_ring->dequeue;
  459. event_deq_seg = pdev->event_ring->deq_seg;
  460. cycle_state = pdev->event_ring->cycle_state;
  461. cmd_deq_dma = cdnsp_trb_virt_to_dma(pdev->cmd_ring->deq_seg, cmd_trb);
  462. if (!cmd_deq_dma)
  463. return -EINVAL;
  464. while (1) {
  465. flags = le32_to_cpu(event->event_cmd.flags);
  466. /* Check the owner of the TRB. */
  467. if ((flags & TRB_CYCLE) != cycle_state)
  468. return -EINVAL;
  469. cmd_dma = le64_to_cpu(event->event_cmd.cmd_trb);
  470. /*
  471. * Check whether the completion event is for last queued
  472. * command.
  473. */
  474. if (TRB_FIELD_TO_TYPE(flags) != TRB_COMPLETION ||
  475. cmd_dma != (u64)cmd_deq_dma) {
  476. if (!cdnsp_last_trb_on_seg(event_deq_seg, event)) {
  477. event++;
  478. continue;
  479. }
  480. if (cdnsp_last_trb_on_ring(pdev->event_ring,
  481. event_deq_seg, event))
  482. cycle_state ^= 1;
  483. event_deq_seg = event_deq_seg->next;
  484. event = event_deq_seg->trbs;
  485. continue;
  486. }
  487. trace_cdnsp_handle_command(pdev->cmd_ring, &cmd_trb->generic);
  488. pdev->cmd.status = GET_COMP_CODE(le32_to_cpu(event->event_cmd.status));
  489. if (pdev->cmd.status == COMP_SUCCESS)
  490. return 0;
  491. return -pdev->cmd.status;
  492. }
  493. }
  494. int cdnsp_halt_endpoint(struct cdnsp_device *pdev,
  495. struct cdnsp_ep *pep,
  496. int value)
  497. {
  498. int ret;
  499. trace_cdnsp_ep_halt(value ? "Set" : "Clear");
  500. ret = cdnsp_cmd_stop_ep(pdev, pep);
  501. if (ret)
  502. return ret;
  503. if (value) {
  504. if (GET_EP_CTX_STATE(pep->out_ctx) == EP_STATE_STOPPED) {
  505. cdnsp_queue_halt_endpoint(pdev, pep->idx);
  506. cdnsp_ring_cmd_db(pdev);
  507. ret = cdnsp_wait_for_cmd_compl(pdev);
  508. }
  509. pep->ep_state |= EP_HALTED;
  510. } else {
  511. cdnsp_queue_reset_ep(pdev, pep->idx);
  512. cdnsp_ring_cmd_db(pdev);
  513. ret = cdnsp_wait_for_cmd_compl(pdev);
  514. trace_cdnsp_handle_cmd_reset_ep(pep->out_ctx);
  515. if (ret)
  516. return ret;
  517. pep->ep_state &= ~EP_HALTED;
  518. if (pep->idx != 0 && !(pep->ep_state & EP_WEDGE))
  519. cdnsp_ring_doorbell_for_active_rings(pdev, pep);
  520. pep->ep_state &= ~EP_WEDGE;
  521. }
  522. return 0;
  523. }
  524. static int cdnsp_update_eps_configuration(struct cdnsp_device *pdev,
  525. struct cdnsp_ep *pep)
  526. {
  527. struct cdnsp_input_control_ctx *ctrl_ctx;
  528. struct cdnsp_slot_ctx *slot_ctx;
  529. int ret = 0;
  530. u32 ep_sts;
  531. int i;
  532. ctrl_ctx = cdnsp_get_input_control_ctx(&pdev->in_ctx);
  533. /* Don't issue the command if there's no endpoints to update. */
  534. if (ctrl_ctx->add_flags == 0 && ctrl_ctx->drop_flags == 0)
  535. return 0;
  536. ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG);
  537. ctrl_ctx->add_flags &= cpu_to_le32(~EP0_FLAG);
  538. ctrl_ctx->drop_flags &= cpu_to_le32(~(SLOT_FLAG | EP0_FLAG));
  539. /* Fix up Context Entries field. Minimum value is EP0 == BIT(1). */
  540. slot_ctx = cdnsp_get_slot_ctx(&pdev->in_ctx);
  541. for (i = CDNSP_ENDPOINTS_NUM; i >= 1; i--) {
  542. __le32 le32 = cpu_to_le32(BIT(i));
  543. if ((pdev->eps[i - 1].ring && !(ctrl_ctx->drop_flags & le32)) ||
  544. (ctrl_ctx->add_flags & le32) || i == 1) {
  545. slot_ctx->dev_info &= cpu_to_le32(~LAST_CTX_MASK);
  546. slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(i));
  547. break;
  548. }
  549. }
  550. ep_sts = GET_EP_CTX_STATE(pep->out_ctx);
  551. if ((ctrl_ctx->add_flags != cpu_to_le32(SLOT_FLAG) &&
  552. ep_sts == EP_STATE_DISABLED) ||
  553. (ep_sts != EP_STATE_DISABLED && ctrl_ctx->drop_flags))
  554. ret = cdnsp_configure_endpoint(pdev);
  555. trace_cdnsp_configure_endpoint(cdnsp_get_slot_ctx(&pdev->out_ctx));
  556. trace_cdnsp_handle_cmd_config_ep(pep->out_ctx);
  557. cdnsp_zero_in_ctx(pdev);
  558. return ret;
  559. }
  560. /*
  561. * This submits a Reset Device Command, which will set the device state to 0,
  562. * set the device address to 0, and disable all the endpoints except the default
  563. * control endpoint. The USB core should come back and call
  564. * cdnsp_setup_device(), and then re-set up the configuration.
  565. */
  566. int cdnsp_reset_device(struct cdnsp_device *pdev)
  567. {
  568. struct cdnsp_slot_ctx *slot_ctx;
  569. int slot_state;
  570. int ret, i;
  571. slot_ctx = cdnsp_get_slot_ctx(&pdev->in_ctx);
  572. slot_ctx->dev_info = 0;
  573. pdev->device_address = 0;
  574. /* If device is not setup, there is no point in resetting it. */
  575. slot_ctx = cdnsp_get_slot_ctx(&pdev->out_ctx);
  576. slot_state = GET_SLOT_STATE(le32_to_cpu(slot_ctx->dev_state));
  577. trace_cdnsp_reset_device(slot_ctx);
  578. if (slot_state <= SLOT_STATE_DEFAULT &&
  579. pdev->eps[0].ep_state & EP_HALTED) {
  580. cdnsp_halt_endpoint(pdev, &pdev->eps[0], 0);
  581. }
  582. /*
  583. * During Reset Device command controller shall transition the
  584. * endpoint ep0 to the Running State.
  585. */
  586. pdev->eps[0].ep_state &= ~(EP_STOPPED | EP_HALTED);
  587. pdev->eps[0].ep_state |= EP_ENABLED;
  588. if (slot_state <= SLOT_STATE_DEFAULT)
  589. return 0;
  590. cdnsp_queue_reset_device(pdev);
  591. cdnsp_ring_cmd_db(pdev);
  592. ret = cdnsp_wait_for_cmd_compl(pdev);
  593. /*
  594. * After Reset Device command all not default endpoints
  595. * are in Disabled state.
  596. */
  597. for (i = 1; i < CDNSP_ENDPOINTS_NUM; ++i)
  598. pdev->eps[i].ep_state |= EP_STOPPED | EP_UNCONFIGURED;
  599. trace_cdnsp_handle_cmd_reset_dev(slot_ctx);
  600. if (ret)
  601. dev_err(pdev->dev, "Reset device failed with error code %d",
  602. ret);
  603. return ret;
  604. }
  605. /*
  606. * Sets the MaxPStreams field and the Linear Stream Array field.
  607. * Sets the dequeue pointer to the stream context array.
  608. */
  609. static void cdnsp_setup_streams_ep_input_ctx(struct cdnsp_device *pdev,
  610. struct cdnsp_ep_ctx *ep_ctx,
  611. struct cdnsp_stream_info *stream_info)
  612. {
  613. u32 max_primary_streams;
  614. /* MaxPStreams is the number of stream context array entries, not the
  615. * number we're actually using. Must be in 2^(MaxPstreams + 1) format.
  616. * fls(0) = 0, fls(0x1) = 1, fls(0x10) = 2, fls(0x100) = 3, etc.
  617. */
  618. max_primary_streams = fls(stream_info->num_stream_ctxs) - 2;
  619. ep_ctx->ep_info &= cpu_to_le32(~EP_MAXPSTREAMS_MASK);
  620. ep_ctx->ep_info |= cpu_to_le32(EP_MAXPSTREAMS(max_primary_streams)
  621. | EP_HAS_LSA);
  622. ep_ctx->deq = cpu_to_le64(stream_info->ctx_array_dma);
  623. }
  624. /*
  625. * The drivers use this function to prepare a bulk endpoints to use streams.
  626. *
  627. * Don't allow the call to succeed if endpoint only supports one stream
  628. * (which means it doesn't support streams at all).
  629. */
  630. int cdnsp_alloc_streams(struct cdnsp_device *pdev, struct cdnsp_ep *pep)
  631. {
  632. unsigned int num_streams = usb_ss_max_streams(pep->endpoint.comp_desc);
  633. unsigned int num_stream_ctxs;
  634. int ret;
  635. if (num_streams == 0)
  636. return 0;
  637. if (num_streams > STREAM_NUM_STREAMS)
  638. return -EINVAL;
  639. /*
  640. * Add two to the number of streams requested to account for
  641. * stream 0 that is reserved for controller usage and one additional
  642. * for TASK SET FULL response.
  643. */
  644. num_streams += 2;
  645. /* The stream context array size must be a power of two */
  646. num_stream_ctxs = roundup_pow_of_two(num_streams);
  647. trace_cdnsp_stream_number(pep, num_stream_ctxs, num_streams);
  648. ret = cdnsp_alloc_stream_info(pdev, pep, num_stream_ctxs, num_streams);
  649. if (ret)
  650. return ret;
  651. cdnsp_setup_streams_ep_input_ctx(pdev, pep->in_ctx, &pep->stream_info);
  652. pep->ep_state |= EP_HAS_STREAMS;
  653. pep->stream_info.td_count = 0;
  654. pep->stream_info.first_prime_det = 0;
  655. /* Subtract 1 for stream 0, which drivers can't use. */
  656. return num_streams - 1;
  657. }
  658. int cdnsp_disable_slot(struct cdnsp_device *pdev)
  659. {
  660. int ret;
  661. cdnsp_queue_slot_control(pdev, TRB_DISABLE_SLOT);
  662. cdnsp_ring_cmd_db(pdev);
  663. ret = cdnsp_wait_for_cmd_compl(pdev);
  664. pdev->slot_id = 0;
  665. pdev->active_port = NULL;
  666. trace_cdnsp_handle_cmd_disable_slot(cdnsp_get_slot_ctx(&pdev->out_ctx));
  667. memset(pdev->in_ctx.bytes, 0, CDNSP_CTX_SIZE);
  668. memset(pdev->out_ctx.bytes, 0, CDNSP_CTX_SIZE);
  669. return ret;
  670. }
  671. int cdnsp_enable_slot(struct cdnsp_device *pdev)
  672. {
  673. struct cdnsp_slot_ctx *slot_ctx;
  674. int slot_state;
  675. int ret;
  676. /* If device is not setup, there is no point in resetting it */
  677. slot_ctx = cdnsp_get_slot_ctx(&pdev->out_ctx);
  678. slot_state = GET_SLOT_STATE(le32_to_cpu(slot_ctx->dev_state));
  679. if (slot_state != SLOT_STATE_DISABLED)
  680. return 0;
  681. cdnsp_queue_slot_control(pdev, TRB_ENABLE_SLOT);
  682. cdnsp_ring_cmd_db(pdev);
  683. ret = cdnsp_wait_for_cmd_compl(pdev);
  684. if (ret)
  685. goto show_trace;
  686. pdev->slot_id = 1;
  687. show_trace:
  688. trace_cdnsp_handle_cmd_enable_slot(cdnsp_get_slot_ctx(&pdev->out_ctx));
  689. return ret;
  690. }
  691. /*
  692. * Issue an Address Device command with BSR=0 if setup is SETUP_CONTEXT_ONLY
  693. * or with BSR = 1 if set_address is SETUP_CONTEXT_ADDRESS.
  694. */
  695. int cdnsp_setup_device(struct cdnsp_device *pdev, enum cdnsp_setup_dev setup)
  696. {
  697. struct cdnsp_input_control_ctx *ctrl_ctx;
  698. struct cdnsp_slot_ctx *slot_ctx;
  699. int dev_state = 0;
  700. int ret;
  701. if (!pdev->slot_id) {
  702. trace_cdnsp_slot_id("incorrect");
  703. return -EINVAL;
  704. }
  705. if (!pdev->active_port->port_num)
  706. return -EINVAL;
  707. slot_ctx = cdnsp_get_slot_ctx(&pdev->out_ctx);
  708. dev_state = GET_SLOT_STATE(le32_to_cpu(slot_ctx->dev_state));
  709. if (setup == SETUP_CONTEXT_ONLY && dev_state == SLOT_STATE_DEFAULT) {
  710. trace_cdnsp_slot_already_in_default(slot_ctx);
  711. return 0;
  712. }
  713. slot_ctx = cdnsp_get_slot_ctx(&pdev->in_ctx);
  714. ctrl_ctx = cdnsp_get_input_control_ctx(&pdev->in_ctx);
  715. if (!slot_ctx->dev_info || dev_state == SLOT_STATE_DEFAULT) {
  716. ret = cdnsp_setup_addressable_priv_dev(pdev);
  717. if (ret)
  718. return ret;
  719. }
  720. cdnsp_copy_ep0_dequeue_into_input_ctx(pdev);
  721. ctrl_ctx->add_flags = cpu_to_le32(SLOT_FLAG | EP0_FLAG);
  722. ctrl_ctx->drop_flags = 0;
  723. trace_cdnsp_setup_device_slot(slot_ctx);
  724. cdnsp_queue_address_device(pdev, pdev->in_ctx.dma, setup);
  725. cdnsp_ring_cmd_db(pdev);
  726. ret = cdnsp_wait_for_cmd_compl(pdev);
  727. trace_cdnsp_handle_cmd_addr_dev(cdnsp_get_slot_ctx(&pdev->out_ctx));
  728. /* Zero the input context control for later use. */
  729. ctrl_ctx->add_flags = 0;
  730. ctrl_ctx->drop_flags = 0;
  731. return ret;
  732. }
  733. void cdnsp_set_usb2_hardware_lpm(struct cdnsp_device *pdev,
  734. struct usb_request *req,
  735. int enable)
  736. {
  737. if (pdev->active_port != &pdev->usb2_port || !pdev->gadget.lpm_capable)
  738. return;
  739. trace_cdnsp_lpm(enable);
  740. if (enable)
  741. writel(PORT_BESL(CDNSP_DEFAULT_BESL) | PORT_L1S_NYET | PORT_HLE,
  742. &pdev->active_port->regs->portpmsc);
  743. else
  744. writel(PORT_L1S_NYET, &pdev->active_port->regs->portpmsc);
  745. }
  746. static int cdnsp_get_frame(struct cdnsp_device *pdev)
  747. {
  748. return readl(&pdev->run_regs->microframe_index) >> 3;
  749. }
  750. static int cdnsp_gadget_ep_enable(struct usb_ep *ep,
  751. const struct usb_endpoint_descriptor *desc)
  752. {
  753. struct cdnsp_input_control_ctx *ctrl_ctx;
  754. struct cdnsp_device *pdev;
  755. struct cdnsp_ep *pep;
  756. unsigned long flags;
  757. u32 added_ctxs;
  758. int ret;
  759. if (!ep || !desc || desc->bDescriptorType != USB_DT_ENDPOINT ||
  760. !desc->wMaxPacketSize)
  761. return -EINVAL;
  762. pep = to_cdnsp_ep(ep);
  763. pdev = pep->pdev;
  764. pep->ep_state &= ~EP_UNCONFIGURED;
  765. if (dev_WARN_ONCE(pdev->dev, pep->ep_state & EP_ENABLED,
  766. "%s is already enabled\n", pep->name))
  767. return 0;
  768. spin_lock_irqsave(&pdev->lock, flags);
  769. added_ctxs = cdnsp_get_endpoint_flag(desc);
  770. if (added_ctxs == SLOT_FLAG || added_ctxs == EP0_FLAG) {
  771. dev_err(pdev->dev, "ERROR: Bad endpoint number\n");
  772. ret = -EINVAL;
  773. goto unlock;
  774. }
  775. pep->interval = desc->bInterval ? BIT(desc->bInterval - 1) : 0;
  776. if (pdev->gadget.speed == USB_SPEED_FULL) {
  777. if (usb_endpoint_type(desc) == USB_ENDPOINT_XFER_INT)
  778. pep->interval = desc->bInterval << 3;
  779. if (usb_endpoint_type(desc) == USB_ENDPOINT_XFER_ISOC)
  780. pep->interval = BIT(desc->bInterval - 1) << 3;
  781. }
  782. if (usb_endpoint_type(desc) == USB_ENDPOINT_XFER_ISOC) {
  783. if (pep->interval > BIT(12)) {
  784. dev_err(pdev->dev, "bInterval %d not supported\n",
  785. desc->bInterval);
  786. ret = -EINVAL;
  787. goto unlock;
  788. }
  789. cdnsp_set_chicken_bits_2(pdev, CHICKEN_XDMA_2_TP_CACHE_DIS);
  790. }
  791. ret = cdnsp_endpoint_init(pdev, pep, GFP_ATOMIC);
  792. if (ret)
  793. goto unlock;
  794. ctrl_ctx = cdnsp_get_input_control_ctx(&pdev->in_ctx);
  795. ctrl_ctx->add_flags = cpu_to_le32(added_ctxs);
  796. ctrl_ctx->drop_flags = 0;
  797. ret = cdnsp_update_eps_configuration(pdev, pep);
  798. if (ret) {
  799. cdnsp_free_endpoint_rings(pdev, pep);
  800. goto unlock;
  801. }
  802. pep->ep_state |= EP_ENABLED;
  803. pep->ep_state &= ~EP_STOPPED;
  804. unlock:
  805. trace_cdnsp_ep_enable_end(pep, 0);
  806. spin_unlock_irqrestore(&pdev->lock, flags);
  807. return ret;
  808. }
  809. static int cdnsp_gadget_ep_disable(struct usb_ep *ep)
  810. {
  811. struct cdnsp_input_control_ctx *ctrl_ctx;
  812. struct cdnsp_request *preq;
  813. struct cdnsp_device *pdev;
  814. struct cdnsp_ep *pep;
  815. unsigned long flags;
  816. u32 drop_flag;
  817. int ret = 0;
  818. if (!ep)
  819. return -EINVAL;
  820. pep = to_cdnsp_ep(ep);
  821. pdev = pep->pdev;
  822. spin_lock_irqsave(&pdev->lock, flags);
  823. if (!(pep->ep_state & EP_ENABLED)) {
  824. dev_err(pdev->dev, "%s is already disabled\n", pep->name);
  825. ret = -EINVAL;
  826. goto finish;
  827. }
  828. pep->ep_state |= EP_DIS_IN_RROGRESS;
  829. /* Endpoint was unconfigured by Reset Device command. */
  830. if (!(pep->ep_state & EP_UNCONFIGURED)) {
  831. cdnsp_cmd_stop_ep(pdev, pep);
  832. cdnsp_cmd_flush_ep(pdev, pep);
  833. }
  834. /* Remove all queued USB requests. */
  835. while (!list_empty(&pep->pending_list)) {
  836. preq = next_request(&pep->pending_list);
  837. cdnsp_ep_dequeue(pep, preq);
  838. }
  839. cdnsp_invalidate_ep_events(pdev, pep);
  840. pep->ep_state &= ~EP_DIS_IN_RROGRESS;
  841. drop_flag = cdnsp_get_endpoint_flag(pep->endpoint.desc);
  842. ctrl_ctx = cdnsp_get_input_control_ctx(&pdev->in_ctx);
  843. ctrl_ctx->drop_flags = cpu_to_le32(drop_flag);
  844. ctrl_ctx->add_flags = 0;
  845. cdnsp_endpoint_zero(pdev, pep);
  846. if (!(pep->ep_state & EP_UNCONFIGURED))
  847. ret = cdnsp_update_eps_configuration(pdev, pep);
  848. cdnsp_free_endpoint_rings(pdev, pep);
  849. pep->ep_state &= ~(EP_ENABLED | EP_UNCONFIGURED);
  850. pep->ep_state |= EP_STOPPED;
  851. finish:
  852. trace_cdnsp_ep_disable_end(pep, 0);
  853. spin_unlock_irqrestore(&pdev->lock, flags);
  854. return ret;
  855. }
  856. static struct usb_request *cdnsp_gadget_ep_alloc_request(struct usb_ep *ep,
  857. gfp_t gfp_flags)
  858. {
  859. struct cdnsp_ep *pep = to_cdnsp_ep(ep);
  860. struct cdnsp_request *preq;
  861. preq = kzalloc(sizeof(*preq), gfp_flags);
  862. if (!preq)
  863. return NULL;
  864. preq->epnum = pep->number;
  865. preq->pep = pep;
  866. trace_cdnsp_alloc_request(preq);
  867. return &preq->request;
  868. }
  869. static void cdnsp_gadget_ep_free_request(struct usb_ep *ep,
  870. struct usb_request *request)
  871. {
  872. struct cdnsp_request *preq = to_cdnsp_request(request);
  873. trace_cdnsp_free_request(preq);
  874. kfree(preq);
  875. }
  876. static int cdnsp_gadget_ep_queue(struct usb_ep *ep,
  877. struct usb_request *request,
  878. gfp_t gfp_flags)
  879. {
  880. struct cdnsp_request *preq;
  881. struct cdnsp_device *pdev;
  882. struct cdnsp_ep *pep;
  883. unsigned long flags;
  884. int ret;
  885. if (!request || !ep)
  886. return -EINVAL;
  887. pep = to_cdnsp_ep(ep);
  888. pdev = pep->pdev;
  889. if (!(pep->ep_state & EP_ENABLED)) {
  890. dev_err(pdev->dev, "%s: can't queue to disabled endpoint\n",
  891. pep->name);
  892. return -EINVAL;
  893. }
  894. preq = to_cdnsp_request(request);
  895. spin_lock_irqsave(&pdev->lock, flags);
  896. ret = cdnsp_ep_enqueue(pep, preq);
  897. spin_unlock_irqrestore(&pdev->lock, flags);
  898. return ret;
  899. }
  900. static int cdnsp_gadget_ep_dequeue(struct usb_ep *ep,
  901. struct usb_request *request)
  902. {
  903. struct cdnsp_ep *pep = to_cdnsp_ep(ep);
  904. struct cdnsp_device *pdev = pep->pdev;
  905. unsigned long flags;
  906. int ret;
  907. if (request->status != -EINPROGRESS)
  908. return 0;
  909. if (!pep->endpoint.desc) {
  910. dev_err(pdev->dev,
  911. "%s: can't dequeue to disabled endpoint\n",
  912. pep->name);
  913. return -ESHUTDOWN;
  914. }
  915. /* Requests has been dequeued during disabling endpoint. */
  916. if (!(pep->ep_state & EP_ENABLED))
  917. return 0;
  918. spin_lock_irqsave(&pdev->lock, flags);
  919. ret = cdnsp_ep_dequeue(pep, to_cdnsp_request(request));
  920. spin_unlock_irqrestore(&pdev->lock, flags);
  921. return ret;
  922. }
  923. static int cdnsp_gadget_ep_set_halt(struct usb_ep *ep, int value)
  924. {
  925. struct cdnsp_ep *pep = to_cdnsp_ep(ep);
  926. struct cdnsp_device *pdev = pep->pdev;
  927. struct cdnsp_request *preq;
  928. unsigned long flags;
  929. int ret;
  930. spin_lock_irqsave(&pdev->lock, flags);
  931. preq = next_request(&pep->pending_list);
  932. if (value) {
  933. if (preq) {
  934. trace_cdnsp_ep_busy_try_halt_again(pep, 0);
  935. ret = -EAGAIN;
  936. goto done;
  937. }
  938. }
  939. ret = cdnsp_halt_endpoint(pdev, pep, value);
  940. done:
  941. spin_unlock_irqrestore(&pdev->lock, flags);
  942. return ret;
  943. }
  944. static int cdnsp_gadget_ep_set_wedge(struct usb_ep *ep)
  945. {
  946. struct cdnsp_ep *pep = to_cdnsp_ep(ep);
  947. struct cdnsp_device *pdev = pep->pdev;
  948. unsigned long flags;
  949. int ret;
  950. spin_lock_irqsave(&pdev->lock, flags);
  951. pep->ep_state |= EP_WEDGE;
  952. ret = cdnsp_halt_endpoint(pdev, pep, 1);
  953. spin_unlock_irqrestore(&pdev->lock, flags);
  954. return ret;
  955. }
  956. static const struct usb_ep_ops cdnsp_gadget_ep0_ops = {
  957. .enable = cdnsp_gadget_ep_enable,
  958. .disable = cdnsp_gadget_ep_disable,
  959. .alloc_request = cdnsp_gadget_ep_alloc_request,
  960. .free_request = cdnsp_gadget_ep_free_request,
  961. .queue = cdnsp_gadget_ep_queue,
  962. .dequeue = cdnsp_gadget_ep_dequeue,
  963. .set_halt = cdnsp_gadget_ep_set_halt,
  964. .set_wedge = cdnsp_gadget_ep_set_wedge,
  965. };
  966. static const struct usb_ep_ops cdnsp_gadget_ep_ops = {
  967. .enable = cdnsp_gadget_ep_enable,
  968. .disable = cdnsp_gadget_ep_disable,
  969. .alloc_request = cdnsp_gadget_ep_alloc_request,
  970. .free_request = cdnsp_gadget_ep_free_request,
  971. .queue = cdnsp_gadget_ep_queue,
  972. .dequeue = cdnsp_gadget_ep_dequeue,
  973. .set_halt = cdnsp_gadget_ep_set_halt,
  974. .set_wedge = cdnsp_gadget_ep_set_wedge,
  975. };
  976. void cdnsp_gadget_giveback(struct cdnsp_ep *pep,
  977. struct cdnsp_request *preq,
  978. int status)
  979. {
  980. struct cdnsp_device *pdev = pep->pdev;
  981. list_del(&preq->list);
  982. if (preq->request.status == -EINPROGRESS)
  983. preq->request.status = status;
  984. usb_gadget_unmap_request_by_dev(pdev->dev, &preq->request,
  985. preq->direction);
  986. trace_cdnsp_request_giveback(preq);
  987. if (preq != &pdev->ep0_preq) {
  988. spin_unlock(&pdev->lock);
  989. usb_gadget_giveback_request(&pep->endpoint, &preq->request);
  990. spin_lock(&pdev->lock);
  991. }
  992. }
  993. static struct usb_endpoint_descriptor cdnsp_gadget_ep0_desc = {
  994. .bLength = USB_DT_ENDPOINT_SIZE,
  995. .bDescriptorType = USB_DT_ENDPOINT,
  996. .bmAttributes = USB_ENDPOINT_XFER_CONTROL,
  997. };
  998. static int cdnsp_run(struct cdnsp_device *pdev,
  999. enum usb_device_speed speed)
  1000. {
  1001. u32 fs_speed = 0;
  1002. u32 temp;
  1003. int ret;
  1004. temp = readl(&pdev->ir_set->irq_control);
  1005. temp &= ~IMOD_INTERVAL_MASK;
  1006. temp |= ((IMOD_DEFAULT_INTERVAL / 250) & IMOD_INTERVAL_MASK);
  1007. writel(temp, &pdev->ir_set->irq_control);
  1008. temp = readl(&pdev->port3x_regs->mode_addr);
  1009. switch (speed) {
  1010. case USB_SPEED_SUPER_PLUS:
  1011. temp |= CFG_3XPORT_SSP_SUPPORT;
  1012. break;
  1013. case USB_SPEED_SUPER:
  1014. temp &= ~CFG_3XPORT_SSP_SUPPORT;
  1015. break;
  1016. case USB_SPEED_HIGH:
  1017. break;
  1018. case USB_SPEED_FULL:
  1019. fs_speed = PORT_REG6_FORCE_FS;
  1020. break;
  1021. default:
  1022. dev_err(pdev->dev, "invalid maximum_speed parameter %d\n",
  1023. speed);
  1024. fallthrough;
  1025. case USB_SPEED_UNKNOWN:
  1026. /* Default to superspeed. */
  1027. speed = USB_SPEED_SUPER;
  1028. break;
  1029. }
  1030. if (speed >= USB_SPEED_SUPER) {
  1031. writel(temp, &pdev->port3x_regs->mode_addr);
  1032. cdnsp_set_link_state(pdev, &pdev->usb3_port.regs->portsc,
  1033. XDEV_RXDETECT);
  1034. } else {
  1035. cdnsp_disable_port(pdev, &pdev->usb3_port.regs->portsc);
  1036. }
  1037. cdnsp_set_link_state(pdev, &pdev->usb2_port.regs->portsc,
  1038. XDEV_RXDETECT);
  1039. cdnsp_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512);
  1040. writel(PORT_REG6_L1_L0_HW_EN | fs_speed, &pdev->port20_regs->port_reg6);
  1041. ret = cdnsp_start(pdev);
  1042. if (ret) {
  1043. ret = -ENODEV;
  1044. goto err;
  1045. }
  1046. temp = readl(&pdev->op_regs->command);
  1047. temp |= (CMD_INTE);
  1048. writel(temp, &pdev->op_regs->command);
  1049. temp = readl(&pdev->ir_set->irq_pending);
  1050. writel(IMAN_IE_SET(temp), &pdev->ir_set->irq_pending);
  1051. trace_cdnsp_init("Controller ready to work");
  1052. return 0;
  1053. err:
  1054. cdnsp_halt(pdev);
  1055. return ret;
  1056. }
  1057. static int cdnsp_gadget_udc_start(struct usb_gadget *g,
  1058. struct usb_gadget_driver *driver)
  1059. {
  1060. enum usb_device_speed max_speed = driver->max_speed;
  1061. struct cdnsp_device *pdev = gadget_to_cdnsp(g);
  1062. unsigned long flags;
  1063. int ret;
  1064. spin_lock_irqsave(&pdev->lock, flags);
  1065. pdev->gadget_driver = driver;
  1066. /* limit speed if necessary */
  1067. max_speed = min(driver->max_speed, g->max_speed);
  1068. ret = cdnsp_run(pdev, max_speed);
  1069. spin_unlock_irqrestore(&pdev->lock, flags);
  1070. return ret;
  1071. }
  1072. /*
  1073. * Update Event Ring Dequeue Pointer:
  1074. * - When all events have finished
  1075. * - To avoid "Event Ring Full Error" condition
  1076. */
  1077. void cdnsp_update_erst_dequeue(struct cdnsp_device *pdev,
  1078. union cdnsp_trb *event_ring_deq,
  1079. u8 clear_ehb)
  1080. {
  1081. u64 temp_64;
  1082. dma_addr_t deq;
  1083. temp_64 = cdnsp_read_64(&pdev->ir_set->erst_dequeue);
  1084. /* If necessary, update the HW's version of the event ring deq ptr. */
  1085. if (event_ring_deq != pdev->event_ring->dequeue) {
  1086. deq = cdnsp_trb_virt_to_dma(pdev->event_ring->deq_seg,
  1087. pdev->event_ring->dequeue);
  1088. temp_64 &= ERST_PTR_MASK;
  1089. temp_64 |= ((u64)deq & (u64)~ERST_PTR_MASK);
  1090. }
  1091. /* Clear the event handler busy flag (RW1C). */
  1092. if (clear_ehb)
  1093. temp_64 |= ERST_EHB;
  1094. else
  1095. temp_64 &= ~ERST_EHB;
  1096. cdnsp_write_64(temp_64, &pdev->ir_set->erst_dequeue);
  1097. }
  1098. static void cdnsp_clear_cmd_ring(struct cdnsp_device *pdev)
  1099. {
  1100. struct cdnsp_segment *seg;
  1101. u64 val_64;
  1102. int i;
  1103. cdnsp_initialize_ring_info(pdev->cmd_ring);
  1104. seg = pdev->cmd_ring->first_seg;
  1105. for (i = 0; i < pdev->cmd_ring->num_segs; i++) {
  1106. memset(seg->trbs, 0,
  1107. sizeof(union cdnsp_trb) * (TRBS_PER_SEGMENT - 1));
  1108. seg = seg->next;
  1109. }
  1110. /* Set the address in the Command Ring Control register. */
  1111. val_64 = cdnsp_read_64(&pdev->op_regs->cmd_ring);
  1112. val_64 = (val_64 & (u64)CMD_RING_RSVD_BITS) |
  1113. (pdev->cmd_ring->first_seg->dma & (u64)~CMD_RING_RSVD_BITS) |
  1114. pdev->cmd_ring->cycle_state;
  1115. cdnsp_write_64(val_64, &pdev->op_regs->cmd_ring);
  1116. }
  1117. static void cdnsp_consume_all_events(struct cdnsp_device *pdev)
  1118. {
  1119. struct cdnsp_segment *event_deq_seg;
  1120. union cdnsp_trb *event_ring_deq;
  1121. union cdnsp_trb *event;
  1122. u32 cycle_bit;
  1123. event_ring_deq = pdev->event_ring->dequeue;
  1124. event_deq_seg = pdev->event_ring->deq_seg;
  1125. event = pdev->event_ring->dequeue;
  1126. /* Update ring dequeue pointer. */
  1127. while (1) {
  1128. cycle_bit = (le32_to_cpu(event->event_cmd.flags) & TRB_CYCLE);
  1129. /* Does the controller or driver own the TRB? */
  1130. if (cycle_bit != pdev->event_ring->cycle_state)
  1131. break;
  1132. cdnsp_inc_deq(pdev, pdev->event_ring);
  1133. if (!cdnsp_last_trb_on_seg(event_deq_seg, event)) {
  1134. event++;
  1135. continue;
  1136. }
  1137. if (cdnsp_last_trb_on_ring(pdev->event_ring, event_deq_seg,
  1138. event))
  1139. cycle_bit ^= 1;
  1140. event_deq_seg = event_deq_seg->next;
  1141. event = event_deq_seg->trbs;
  1142. }
  1143. cdnsp_update_erst_dequeue(pdev, event_ring_deq, 1);
  1144. }
  1145. static void cdnsp_stop(struct cdnsp_device *pdev)
  1146. {
  1147. u32 temp;
  1148. cdnsp_cmd_flush_ep(pdev, &pdev->eps[0]);
  1149. /* Remove internally queued request for ep0. */
  1150. if (!list_empty(&pdev->eps[0].pending_list)) {
  1151. struct cdnsp_request *req;
  1152. req = next_request(&pdev->eps[0].pending_list);
  1153. if (req == &pdev->ep0_preq)
  1154. cdnsp_ep_dequeue(&pdev->eps[0], req);
  1155. }
  1156. cdnsp_disable_port(pdev, &pdev->usb2_port.regs->portsc);
  1157. cdnsp_disable_port(pdev, &pdev->usb3_port.regs->portsc);
  1158. cdnsp_disable_slot(pdev);
  1159. cdnsp_halt(pdev);
  1160. temp = readl(&pdev->op_regs->status);
  1161. writel((temp & ~0x1fff) | STS_EINT, &pdev->op_regs->status);
  1162. temp = readl(&pdev->ir_set->irq_pending);
  1163. writel(IMAN_IE_CLEAR(temp), &pdev->ir_set->irq_pending);
  1164. cdnsp_clear_port_change_bit(pdev, &pdev->usb2_port.regs->portsc);
  1165. cdnsp_clear_port_change_bit(pdev, &pdev->usb3_port.regs->portsc);
  1166. /* Clear interrupt line */
  1167. temp = readl(&pdev->ir_set->irq_pending);
  1168. temp |= IMAN_IP;
  1169. writel(temp, &pdev->ir_set->irq_pending);
  1170. cdnsp_consume_all_events(pdev);
  1171. cdnsp_clear_cmd_ring(pdev);
  1172. trace_cdnsp_exit("Controller stopped.");
  1173. }
  1174. /*
  1175. * Stop controller.
  1176. * This function is called by the gadget core when the driver is removed.
  1177. * Disable slot, disable IRQs, and quiesce the controller.
  1178. */
  1179. static int cdnsp_gadget_udc_stop(struct usb_gadget *g)
  1180. {
  1181. struct cdnsp_device *pdev = gadget_to_cdnsp(g);
  1182. unsigned long flags;
  1183. spin_lock_irqsave(&pdev->lock, flags);
  1184. cdnsp_stop(pdev);
  1185. pdev->gadget_driver = NULL;
  1186. spin_unlock_irqrestore(&pdev->lock, flags);
  1187. return 0;
  1188. }
  1189. static int cdnsp_gadget_get_frame(struct usb_gadget *g)
  1190. {
  1191. struct cdnsp_device *pdev = gadget_to_cdnsp(g);
  1192. return cdnsp_get_frame(pdev);
  1193. }
  1194. static void __cdnsp_gadget_wakeup(struct cdnsp_device *pdev)
  1195. {
  1196. struct cdnsp_port_regs __iomem *port_regs;
  1197. u32 portpm, portsc;
  1198. port_regs = pdev->active_port->regs;
  1199. portsc = readl(&port_regs->portsc) & PORT_PLS_MASK;
  1200. /* Remote wakeup feature is not enabled by host. */
  1201. if (pdev->gadget.speed < USB_SPEED_SUPER && portsc == XDEV_U2) {
  1202. portpm = readl(&port_regs->portpmsc);
  1203. if (!(portpm & PORT_RWE))
  1204. return;
  1205. }
  1206. if (portsc == XDEV_U3 && !pdev->may_wakeup)
  1207. return;
  1208. cdnsp_set_link_state(pdev, &port_regs->portsc, XDEV_U0);
  1209. pdev->cdnsp_state |= CDNSP_WAKEUP_PENDING;
  1210. }
  1211. static int cdnsp_gadget_wakeup(struct usb_gadget *g)
  1212. {
  1213. struct cdnsp_device *pdev = gadget_to_cdnsp(g);
  1214. unsigned long flags;
  1215. spin_lock_irqsave(&pdev->lock, flags);
  1216. __cdnsp_gadget_wakeup(pdev);
  1217. spin_unlock_irqrestore(&pdev->lock, flags);
  1218. return 0;
  1219. }
  1220. static int cdnsp_gadget_set_selfpowered(struct usb_gadget *g,
  1221. int is_selfpowered)
  1222. {
  1223. struct cdnsp_device *pdev = gadget_to_cdnsp(g);
  1224. unsigned long flags;
  1225. spin_lock_irqsave(&pdev->lock, flags);
  1226. g->is_selfpowered = !!is_selfpowered;
  1227. spin_unlock_irqrestore(&pdev->lock, flags);
  1228. return 0;
  1229. }
  1230. static int cdnsp_gadget_pullup(struct usb_gadget *gadget, int is_on)
  1231. {
  1232. struct cdnsp_device *pdev = gadget_to_cdnsp(gadget);
  1233. struct cdns *cdns = dev_get_drvdata(pdev->dev);
  1234. unsigned long flags;
  1235. trace_cdnsp_pullup(is_on);
  1236. /*
  1237. * Disable events handling while controller is being
  1238. * enabled/disabled.
  1239. */
  1240. disable_irq(cdns->dev_irq);
  1241. spin_lock_irqsave(&pdev->lock, flags);
  1242. if (!is_on) {
  1243. cdnsp_reset_device(pdev);
  1244. cdns_clear_vbus(cdns);
  1245. } else {
  1246. cdns_set_vbus(cdns);
  1247. }
  1248. spin_unlock_irqrestore(&pdev->lock, flags);
  1249. enable_irq(cdns->dev_irq);
  1250. return 0;
  1251. }
  1252. static const struct usb_gadget_ops cdnsp_gadget_ops = {
  1253. .get_frame = cdnsp_gadget_get_frame,
  1254. .wakeup = cdnsp_gadget_wakeup,
  1255. .set_selfpowered = cdnsp_gadget_set_selfpowered,
  1256. .pullup = cdnsp_gadget_pullup,
  1257. .udc_start = cdnsp_gadget_udc_start,
  1258. .udc_stop = cdnsp_gadget_udc_stop,
  1259. };
  1260. static void cdnsp_get_ep_buffering(struct cdnsp_device *pdev,
  1261. struct cdnsp_ep *pep)
  1262. {
  1263. void __iomem *reg = &pdev->cap_regs->hc_capbase;
  1264. int endpoints;
  1265. reg += cdnsp_find_next_ext_cap(reg, 0, XBUF_CAP_ID);
  1266. if (!pep->direction) {
  1267. pep->buffering = readl(reg + XBUF_RX_TAG_MASK_0_OFFSET);
  1268. pep->buffering_period = readl(reg + XBUF_RX_TAG_MASK_1_OFFSET);
  1269. pep->buffering = (pep->buffering + 1) / 2;
  1270. pep->buffering_period = (pep->buffering_period + 1) / 2;
  1271. return;
  1272. }
  1273. endpoints = HCS_ENDPOINTS(pdev->hcs_params1) / 2;
  1274. /* Set to XBUF_TX_TAG_MASK_0 register. */
  1275. reg += XBUF_TX_CMD_OFFSET + (endpoints * 2 + 2) * sizeof(u32);
  1276. /* Set reg to XBUF_TX_TAG_MASK_N related with this endpoint. */
  1277. reg += pep->number * sizeof(u32) * 2;
  1278. pep->buffering = (readl(reg) + 1) / 2;
  1279. pep->buffering_period = pep->buffering;
  1280. }
  1281. static int cdnsp_gadget_init_endpoints(struct cdnsp_device *pdev)
  1282. {
  1283. int max_streams = HCC_MAX_PSA(pdev->hcc_params);
  1284. struct cdnsp_ep *pep;
  1285. int i;
  1286. INIT_LIST_HEAD(&pdev->gadget.ep_list);
  1287. if (max_streams < STREAM_LOG_STREAMS) {
  1288. dev_err(pdev->dev, "Stream size %d not supported\n",
  1289. max_streams);
  1290. return -EINVAL;
  1291. }
  1292. max_streams = STREAM_LOG_STREAMS;
  1293. for (i = 0; i < CDNSP_ENDPOINTS_NUM; i++) {
  1294. bool direction = !(i & 1); /* Start from OUT endpoint. */
  1295. u8 epnum = ((i + 1) >> 1);
  1296. if (!CDNSP_IF_EP_EXIST(pdev, epnum, direction))
  1297. continue;
  1298. pep = &pdev->eps[i];
  1299. pep->pdev = pdev;
  1300. pep->number = epnum;
  1301. pep->direction = direction; /* 0 for OUT, 1 for IN. */
  1302. /*
  1303. * Ep0 is bidirectional, so ep0in and ep0out are represented by
  1304. * pdev->eps[0]
  1305. */
  1306. if (epnum == 0) {
  1307. snprintf(pep->name, sizeof(pep->name), "ep%d%s",
  1308. epnum, "BiDir");
  1309. pep->idx = 0;
  1310. usb_ep_set_maxpacket_limit(&pep->endpoint, 512);
  1311. pep->endpoint.maxburst = 1;
  1312. pep->endpoint.ops = &cdnsp_gadget_ep0_ops;
  1313. pep->endpoint.desc = &cdnsp_gadget_ep0_desc;
  1314. pep->endpoint.comp_desc = NULL;
  1315. pep->endpoint.caps.type_control = true;
  1316. pep->endpoint.caps.dir_in = true;
  1317. pep->endpoint.caps.dir_out = true;
  1318. pdev->ep0_preq.epnum = pep->number;
  1319. pdev->ep0_preq.pep = pep;
  1320. pdev->gadget.ep0 = &pep->endpoint;
  1321. } else {
  1322. snprintf(pep->name, sizeof(pep->name), "ep%d%s",
  1323. epnum, (pep->direction) ? "in" : "out");
  1324. pep->idx = (epnum * 2 + (direction ? 1 : 0)) - 1;
  1325. usb_ep_set_maxpacket_limit(&pep->endpoint, 1024);
  1326. pep->endpoint.max_streams = max_streams;
  1327. pep->endpoint.ops = &cdnsp_gadget_ep_ops;
  1328. list_add_tail(&pep->endpoint.ep_list,
  1329. &pdev->gadget.ep_list);
  1330. pep->endpoint.caps.type_iso = true;
  1331. pep->endpoint.caps.type_bulk = true;
  1332. pep->endpoint.caps.type_int = true;
  1333. pep->endpoint.caps.dir_in = direction;
  1334. pep->endpoint.caps.dir_out = !direction;
  1335. }
  1336. pep->endpoint.name = pep->name;
  1337. pep->in_ctx = cdnsp_get_ep_ctx(&pdev->in_ctx, pep->idx);
  1338. pep->out_ctx = cdnsp_get_ep_ctx(&pdev->out_ctx, pep->idx);
  1339. cdnsp_get_ep_buffering(pdev, pep);
  1340. dev_dbg(pdev->dev, "Init %s, MPS: %04x SupType: "
  1341. "CTRL: %s, INT: %s, BULK: %s, ISOC %s, "
  1342. "SupDir IN: %s, OUT: %s\n",
  1343. pep->name, 1024,
  1344. (pep->endpoint.caps.type_control) ? "yes" : "no",
  1345. (pep->endpoint.caps.type_int) ? "yes" : "no",
  1346. (pep->endpoint.caps.type_bulk) ? "yes" : "no",
  1347. (pep->endpoint.caps.type_iso) ? "yes" : "no",
  1348. (pep->endpoint.caps.dir_in) ? "yes" : "no",
  1349. (pep->endpoint.caps.dir_out) ? "yes" : "no");
  1350. INIT_LIST_HEAD(&pep->pending_list);
  1351. }
  1352. return 0;
  1353. }
  1354. static void cdnsp_gadget_free_endpoints(struct cdnsp_device *pdev)
  1355. {
  1356. struct cdnsp_ep *pep;
  1357. int i;
  1358. for (i = 0; i < CDNSP_ENDPOINTS_NUM; i++) {
  1359. pep = &pdev->eps[i];
  1360. if (pep->number != 0 && pep->out_ctx)
  1361. list_del(&pep->endpoint.ep_list);
  1362. }
  1363. }
  1364. void cdnsp_disconnect_gadget(struct cdnsp_device *pdev)
  1365. {
  1366. pdev->cdnsp_state |= CDNSP_STATE_DISCONNECT_PENDING;
  1367. if (pdev->gadget_driver && pdev->gadget_driver->disconnect) {
  1368. spin_unlock(&pdev->lock);
  1369. pdev->gadget_driver->disconnect(&pdev->gadget);
  1370. spin_lock(&pdev->lock);
  1371. }
  1372. pdev->gadget.speed = USB_SPEED_UNKNOWN;
  1373. usb_gadget_set_state(&pdev->gadget, USB_STATE_NOTATTACHED);
  1374. pdev->cdnsp_state &= ~CDNSP_STATE_DISCONNECT_PENDING;
  1375. }
  1376. void cdnsp_suspend_gadget(struct cdnsp_device *pdev)
  1377. {
  1378. if (pdev->gadget_driver && pdev->gadget_driver->suspend) {
  1379. spin_unlock(&pdev->lock);
  1380. pdev->gadget_driver->suspend(&pdev->gadget);
  1381. spin_lock(&pdev->lock);
  1382. }
  1383. }
  1384. void cdnsp_resume_gadget(struct cdnsp_device *pdev)
  1385. {
  1386. if (pdev->gadget_driver && pdev->gadget_driver->resume) {
  1387. spin_unlock(&pdev->lock);
  1388. pdev->gadget_driver->resume(&pdev->gadget);
  1389. spin_lock(&pdev->lock);
  1390. }
  1391. }
  1392. void cdnsp_irq_reset(struct cdnsp_device *pdev)
  1393. {
  1394. struct cdnsp_port_regs __iomem *port_regs;
  1395. cdnsp_reset_device(pdev);
  1396. port_regs = pdev->active_port->regs;
  1397. pdev->gadget.speed = cdnsp_port_speed(readl(port_regs));
  1398. spin_unlock(&pdev->lock);
  1399. usb_gadget_udc_reset(&pdev->gadget, pdev->gadget_driver);
  1400. spin_lock(&pdev->lock);
  1401. switch (pdev->gadget.speed) {
  1402. case USB_SPEED_SUPER_PLUS:
  1403. case USB_SPEED_SUPER:
  1404. cdnsp_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512);
  1405. pdev->gadget.ep0->maxpacket = 512;
  1406. break;
  1407. case USB_SPEED_HIGH:
  1408. case USB_SPEED_FULL:
  1409. cdnsp_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(64);
  1410. pdev->gadget.ep0->maxpacket = 64;
  1411. break;
  1412. default:
  1413. /* Low speed is not supported. */
  1414. dev_err(pdev->dev, "Unknown device speed\n");
  1415. break;
  1416. }
  1417. cdnsp_clear_chicken_bits_2(pdev, CHICKEN_XDMA_2_TP_CACHE_DIS);
  1418. cdnsp_setup_device(pdev, SETUP_CONTEXT_ONLY);
  1419. usb_gadget_set_state(&pdev->gadget, USB_STATE_DEFAULT);
  1420. }
  1421. static void cdnsp_get_rev_cap(struct cdnsp_device *pdev)
  1422. {
  1423. void __iomem *reg = &pdev->cap_regs->hc_capbase;
  1424. reg += cdnsp_find_next_ext_cap(reg, 0, RTL_REV_CAP);
  1425. pdev->rev_cap = reg;
  1426. dev_info(pdev->dev, "Rev: %08x/%08x, eps: %08x, buff: %08x/%08x\n",
  1427. readl(&pdev->rev_cap->ctrl_revision),
  1428. readl(&pdev->rev_cap->rtl_revision),
  1429. readl(&pdev->rev_cap->ep_supported),
  1430. readl(&pdev->rev_cap->rx_buff_size),
  1431. readl(&pdev->rev_cap->tx_buff_size));
  1432. }
  1433. static int cdnsp_gen_setup(struct cdnsp_device *pdev)
  1434. {
  1435. int ret;
  1436. u32 reg;
  1437. pdev->cap_regs = pdev->regs;
  1438. pdev->op_regs = pdev->regs +
  1439. HC_LENGTH(readl(&pdev->cap_regs->hc_capbase));
  1440. pdev->run_regs = pdev->regs +
  1441. (readl(&pdev->cap_regs->run_regs_off) & RTSOFF_MASK);
  1442. /* Cache read-only capability registers */
  1443. pdev->hcs_params1 = readl(&pdev->cap_regs->hcs_params1);
  1444. pdev->hcc_params = readl(&pdev->cap_regs->hc_capbase);
  1445. pdev->hci_version = HC_VERSION(pdev->hcc_params);
  1446. pdev->hcc_params = readl(&pdev->cap_regs->hcc_params);
  1447. cdnsp_get_rev_cap(pdev);
  1448. /* Make sure the Device Controller is halted. */
  1449. ret = cdnsp_halt(pdev);
  1450. if (ret)
  1451. return ret;
  1452. /* Reset the internal controller memory state and registers. */
  1453. ret = cdnsp_reset(pdev);
  1454. if (ret)
  1455. return ret;
  1456. /*
  1457. * Set dma_mask and coherent_dma_mask to 64-bits,
  1458. * if controller supports 64-bit addressing.
  1459. */
  1460. if (HCC_64BIT_ADDR(pdev->hcc_params) &&
  1461. !dma_set_mask(pdev->dev, DMA_BIT_MASK(64))) {
  1462. dev_dbg(pdev->dev, "Enabling 64-bit DMA addresses.\n");
  1463. dma_set_coherent_mask(pdev->dev, DMA_BIT_MASK(64));
  1464. } else {
  1465. /*
  1466. * This is to avoid error in cases where a 32-bit USB
  1467. * controller is used on a 64-bit capable system.
  1468. */
  1469. ret = dma_set_mask(pdev->dev, DMA_BIT_MASK(32));
  1470. if (ret)
  1471. return ret;
  1472. dev_dbg(pdev->dev, "Enabling 32-bit DMA addresses.\n");
  1473. dma_set_coherent_mask(pdev->dev, DMA_BIT_MASK(32));
  1474. }
  1475. spin_lock_init(&pdev->lock);
  1476. ret = cdnsp_mem_init(pdev);
  1477. if (ret)
  1478. return ret;
  1479. /*
  1480. * Software workaround for U1: after transition
  1481. * to U1 the controller starts gating clock, and in some cases,
  1482. * it causes that controller stack.
  1483. */
  1484. reg = readl(&pdev->port3x_regs->mode_2);
  1485. reg &= ~CFG_3XPORT_U1_PIPE_CLK_GATE_EN;
  1486. writel(reg, &pdev->port3x_regs->mode_2);
  1487. return 0;
  1488. }
  1489. static int __cdnsp_gadget_init(struct cdns *cdns)
  1490. {
  1491. struct cdnsp_device *pdev;
  1492. u32 max_speed;
  1493. int ret = -ENOMEM;
  1494. cdns_drd_gadget_on(cdns);
  1495. pdev = kzalloc(sizeof(*pdev), GFP_KERNEL);
  1496. if (!pdev)
  1497. return -ENOMEM;
  1498. pm_runtime_get_sync(cdns->dev);
  1499. cdns->gadget_dev = pdev;
  1500. pdev->dev = cdns->dev;
  1501. pdev->regs = cdns->dev_regs;
  1502. max_speed = usb_get_maximum_speed(cdns->dev);
  1503. switch (max_speed) {
  1504. case USB_SPEED_FULL:
  1505. case USB_SPEED_HIGH:
  1506. case USB_SPEED_SUPER:
  1507. case USB_SPEED_SUPER_PLUS:
  1508. break;
  1509. default:
  1510. dev_err(cdns->dev, "invalid speed parameter %d\n", max_speed);
  1511. fallthrough;
  1512. case USB_SPEED_UNKNOWN:
  1513. /* Default to SSP */
  1514. max_speed = USB_SPEED_SUPER_PLUS;
  1515. break;
  1516. }
  1517. pdev->gadget.ops = &cdnsp_gadget_ops;
  1518. pdev->gadget.name = "cdnsp-gadget";
  1519. pdev->gadget.speed = USB_SPEED_UNKNOWN;
  1520. pdev->gadget.sg_supported = 1;
  1521. pdev->gadget.max_speed = max_speed;
  1522. pdev->gadget.lpm_capable = 1;
  1523. pdev->setup_buf = kzalloc(CDNSP_EP0_SETUP_SIZE, GFP_KERNEL);
  1524. if (!pdev->setup_buf)
  1525. goto free_pdev;
  1526. /*
  1527. * Controller supports not aligned buffer but it should improve
  1528. * performance.
  1529. */
  1530. pdev->gadget.quirk_ep_out_aligned_size = true;
  1531. ret = cdnsp_gen_setup(pdev);
  1532. if (ret) {
  1533. dev_err(pdev->dev, "Generic initialization failed %d\n", ret);
  1534. goto free_setup;
  1535. }
  1536. ret = cdnsp_gadget_init_endpoints(pdev);
  1537. if (ret) {
  1538. dev_err(pdev->dev, "failed to initialize endpoints\n");
  1539. goto halt_pdev;
  1540. }
  1541. ret = usb_add_gadget_udc(pdev->dev, &pdev->gadget);
  1542. if (ret) {
  1543. dev_err(pdev->dev, "failed to register udc\n");
  1544. goto free_endpoints;
  1545. }
  1546. ret = devm_request_threaded_irq(pdev->dev, cdns->dev_irq,
  1547. cdnsp_irq_handler,
  1548. cdnsp_thread_irq_handler, IRQF_SHARED,
  1549. dev_name(pdev->dev), pdev);
  1550. if (ret)
  1551. goto del_gadget;
  1552. return 0;
  1553. del_gadget:
  1554. usb_del_gadget_udc(&pdev->gadget);
  1555. free_endpoints:
  1556. cdnsp_gadget_free_endpoints(pdev);
  1557. halt_pdev:
  1558. cdnsp_halt(pdev);
  1559. cdnsp_reset(pdev);
  1560. cdnsp_mem_cleanup(pdev);
  1561. free_setup:
  1562. kfree(pdev->setup_buf);
  1563. free_pdev:
  1564. kfree(pdev);
  1565. return ret;
  1566. }
  1567. static void cdnsp_gadget_exit(struct cdns *cdns)
  1568. {
  1569. struct cdnsp_device *pdev = cdns->gadget_dev;
  1570. devm_free_irq(pdev->dev, cdns->dev_irq, pdev);
  1571. pm_runtime_mark_last_busy(cdns->dev);
  1572. pm_runtime_put_autosuspend(cdns->dev);
  1573. usb_del_gadget_udc(&pdev->gadget);
  1574. cdnsp_gadget_free_endpoints(pdev);
  1575. cdnsp_mem_cleanup(pdev);
  1576. kfree(pdev);
  1577. cdns->gadget_dev = NULL;
  1578. cdns_drd_gadget_off(cdns);
  1579. }
  1580. static int cdnsp_gadget_suspend(struct cdns *cdns, bool do_wakeup)
  1581. {
  1582. struct cdnsp_device *pdev = cdns->gadget_dev;
  1583. unsigned long flags;
  1584. if (pdev->link_state == XDEV_U3)
  1585. return 0;
  1586. spin_lock_irqsave(&pdev->lock, flags);
  1587. cdnsp_disconnect_gadget(pdev);
  1588. cdnsp_stop(pdev);
  1589. spin_unlock_irqrestore(&pdev->lock, flags);
  1590. return 0;
  1591. }
  1592. static int cdnsp_gadget_resume(struct cdns *cdns, bool hibernated)
  1593. {
  1594. struct cdnsp_device *pdev = cdns->gadget_dev;
  1595. enum usb_device_speed max_speed;
  1596. unsigned long flags;
  1597. int ret;
  1598. if (!pdev->gadget_driver)
  1599. return 0;
  1600. spin_lock_irqsave(&pdev->lock, flags);
  1601. max_speed = pdev->gadget_driver->max_speed;
  1602. /* Limit speed if necessary. */
  1603. max_speed = min(max_speed, pdev->gadget.max_speed);
  1604. ret = cdnsp_run(pdev, max_speed);
  1605. if (pdev->link_state == XDEV_U3)
  1606. __cdnsp_gadget_wakeup(pdev);
  1607. spin_unlock_irqrestore(&pdev->lock, flags);
  1608. return ret;
  1609. }
  1610. /**
  1611. * cdnsp_gadget_init - initialize device structure
  1612. * @cdns: cdnsp instance
  1613. *
  1614. * This function initializes the gadget.
  1615. */
  1616. int cdnsp_gadget_init(struct cdns *cdns)
  1617. {
  1618. struct cdns_role_driver *rdrv;
  1619. rdrv = devm_kzalloc(cdns->dev, sizeof(*rdrv), GFP_KERNEL);
  1620. if (!rdrv)
  1621. return -ENOMEM;
  1622. rdrv->start = __cdnsp_gadget_init;
  1623. rdrv->stop = cdnsp_gadget_exit;
  1624. rdrv->suspend = cdnsp_gadget_suspend;
  1625. rdrv->resume = cdnsp_gadget_resume;
  1626. rdrv->state = CDNS_ROLE_STATE_INACTIVE;
  1627. rdrv->name = "gadget";
  1628. cdns->roles[USB_ROLE_DEVICE] = rdrv;
  1629. return 0;
  1630. }