cdnsp-mem.c 35 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Cadence CDNSP DRD Driver.
  4. *
  5. * Copyright (C) 2020 Cadence.
  6. *
  7. * Author: Pawel Laszczak <[email protected]>
  8. *
  9. * Code based on Linux XHCI driver.
  10. * Origin: Copyright (C) 2008 Intel Corp.
  11. */
  12. #include <linux/dma-mapping.h>
  13. #include <linux/dmapool.h>
  14. #include <linux/slab.h>
  15. #include <linux/usb.h>
  16. #include "cdnsp-gadget.h"
  17. #include "cdnsp-trace.h"
  18. static void cdnsp_free_stream_info(struct cdnsp_device *pdev,
  19. struct cdnsp_ep *pep);
  20. /*
  21. * Allocates a generic ring segment from the ring pool, sets the dma address,
  22. * initializes the segment to zero, and sets the private next pointer to NULL.
  23. *
  24. * "All components of all Command and Transfer TRBs shall be initialized to '0'"
  25. */
  26. static struct cdnsp_segment *cdnsp_segment_alloc(struct cdnsp_device *pdev,
  27. unsigned int cycle_state,
  28. unsigned int max_packet,
  29. gfp_t flags)
  30. {
  31. struct cdnsp_segment *seg;
  32. dma_addr_t dma;
  33. int i;
  34. seg = kzalloc(sizeof(*seg), flags);
  35. if (!seg)
  36. return NULL;
  37. seg->trbs = dma_pool_zalloc(pdev->segment_pool, flags, &dma);
  38. if (!seg->trbs) {
  39. kfree(seg);
  40. return NULL;
  41. }
  42. if (max_packet) {
  43. seg->bounce_buf = kzalloc(max_packet, flags | GFP_DMA);
  44. if (!seg->bounce_buf)
  45. goto free_dma;
  46. }
  47. /* If the cycle state is 0, set the cycle bit to 1 for all the TRBs. */
  48. if (cycle_state == 0) {
  49. for (i = 0; i < TRBS_PER_SEGMENT; i++)
  50. seg->trbs[i].link.control |= cpu_to_le32(TRB_CYCLE);
  51. }
  52. seg->dma = dma;
  53. seg->next = NULL;
  54. return seg;
  55. free_dma:
  56. dma_pool_free(pdev->segment_pool, seg->trbs, dma);
  57. kfree(seg);
  58. return NULL;
  59. }
  60. static void cdnsp_segment_free(struct cdnsp_device *pdev,
  61. struct cdnsp_segment *seg)
  62. {
  63. if (seg->trbs)
  64. dma_pool_free(pdev->segment_pool, seg->trbs, seg->dma);
  65. kfree(seg->bounce_buf);
  66. kfree(seg);
  67. }
  68. static void cdnsp_free_segments_for_ring(struct cdnsp_device *pdev,
  69. struct cdnsp_segment *first)
  70. {
  71. struct cdnsp_segment *seg;
  72. seg = first->next;
  73. while (seg != first) {
  74. struct cdnsp_segment *next = seg->next;
  75. cdnsp_segment_free(pdev, seg);
  76. seg = next;
  77. }
  78. cdnsp_segment_free(pdev, first);
  79. }
  80. /*
  81. * Make the prev segment point to the next segment.
  82. *
  83. * Change the last TRB in the prev segment to be a Link TRB which points to the
  84. * DMA address of the next segment. The caller needs to set any Link TRB
  85. * related flags, such as End TRB, Toggle Cycle, and no snoop.
  86. */
  87. static void cdnsp_link_segments(struct cdnsp_device *pdev,
  88. struct cdnsp_segment *prev,
  89. struct cdnsp_segment *next,
  90. enum cdnsp_ring_type type)
  91. {
  92. struct cdnsp_link_trb *link;
  93. u32 val;
  94. if (!prev || !next)
  95. return;
  96. prev->next = next;
  97. if (type != TYPE_EVENT) {
  98. link = &prev->trbs[TRBS_PER_SEGMENT - 1].link;
  99. link->segment_ptr = cpu_to_le64(next->dma);
  100. /*
  101. * Set the last TRB in the segment to have a TRB type ID
  102. * of Link TRB
  103. */
  104. val = le32_to_cpu(link->control);
  105. val &= ~TRB_TYPE_BITMASK;
  106. val |= TRB_TYPE(TRB_LINK);
  107. link->control = cpu_to_le32(val);
  108. }
  109. }
  110. /*
  111. * Link the ring to the new segments.
  112. * Set Toggle Cycle for the new ring if needed.
  113. */
  114. static void cdnsp_link_rings(struct cdnsp_device *pdev,
  115. struct cdnsp_ring *ring,
  116. struct cdnsp_segment *first,
  117. struct cdnsp_segment *last,
  118. unsigned int num_segs)
  119. {
  120. struct cdnsp_segment *next;
  121. if (!ring || !first || !last)
  122. return;
  123. next = ring->enq_seg->next;
  124. cdnsp_link_segments(pdev, ring->enq_seg, first, ring->type);
  125. cdnsp_link_segments(pdev, last, next, ring->type);
  126. ring->num_segs += num_segs;
  127. ring->num_trbs_free += (TRBS_PER_SEGMENT - 1) * num_segs;
  128. if (ring->type != TYPE_EVENT && ring->enq_seg == ring->last_seg) {
  129. ring->last_seg->trbs[TRBS_PER_SEGMENT - 1].link.control &=
  130. ~cpu_to_le32(LINK_TOGGLE);
  131. last->trbs[TRBS_PER_SEGMENT - 1].link.control |=
  132. cpu_to_le32(LINK_TOGGLE);
  133. ring->last_seg = last;
  134. }
  135. }
  136. /*
  137. * We need a radix tree for mapping physical addresses of TRBs to which stream
  138. * ID they belong to. We need to do this because the device controller won't
  139. * tell us which stream ring the TRB came from. We could store the stream ID
  140. * in an event data TRB, but that doesn't help us for the cancellation case,
  141. * since the endpoint may stop before it reaches that event data TRB.
  142. *
  143. * The radix tree maps the upper portion of the TRB DMA address to a ring
  144. * segment that has the same upper portion of DMA addresses. For example,
  145. * say I have segments of size 1KB, that are always 1KB aligned. A segment may
  146. * start at 0x10c91000 and end at 0x10c913f0. If I use the upper 10 bits, the
  147. * key to the stream ID is 0x43244. I can use the DMA address of the TRB to
  148. * pass the radix tree a key to get the right stream ID:
  149. *
  150. * 0x10c90fff >> 10 = 0x43243
  151. * 0x10c912c0 >> 10 = 0x43244
  152. * 0x10c91400 >> 10 = 0x43245
  153. *
  154. * Obviously, only those TRBs with DMA addresses that are within the segment
  155. * will make the radix tree return the stream ID for that ring.
  156. *
  157. * Caveats for the radix tree:
  158. *
  159. * The radix tree uses an unsigned long as a key pair. On 32-bit systems, an
  160. * unsigned long will be 32-bits; on a 64-bit system an unsigned long will be
  161. * 64-bits. Since we only request 32-bit DMA addresses, we can use that as the
  162. * key on 32-bit or 64-bit systems (it would also be fine if we asked for 64-bit
  163. * PCI DMA addresses on a 64-bit system). There might be a problem on 32-bit
  164. * extended systems (where the DMA address can be bigger than 32-bits),
  165. * if we allow the PCI dma mask to be bigger than 32-bits. So don't do that.
  166. */
  167. static int cdnsp_insert_segment_mapping(struct radix_tree_root *trb_address_map,
  168. struct cdnsp_ring *ring,
  169. struct cdnsp_segment *seg,
  170. gfp_t mem_flags)
  171. {
  172. unsigned long key;
  173. int ret;
  174. key = (unsigned long)(seg->dma >> TRB_SEGMENT_SHIFT);
  175. /* Skip any segments that were already added. */
  176. if (radix_tree_lookup(trb_address_map, key))
  177. return 0;
  178. ret = radix_tree_maybe_preload(mem_flags);
  179. if (ret)
  180. return ret;
  181. ret = radix_tree_insert(trb_address_map, key, ring);
  182. radix_tree_preload_end();
  183. return ret;
  184. }
  185. static void cdnsp_remove_segment_mapping(struct radix_tree_root *trb_address_map,
  186. struct cdnsp_segment *seg)
  187. {
  188. unsigned long key;
  189. key = (unsigned long)(seg->dma >> TRB_SEGMENT_SHIFT);
  190. if (radix_tree_lookup(trb_address_map, key))
  191. radix_tree_delete(trb_address_map, key);
  192. }
  193. static int cdnsp_update_stream_segment_mapping(struct radix_tree_root *trb_address_map,
  194. struct cdnsp_ring *ring,
  195. struct cdnsp_segment *first_seg,
  196. struct cdnsp_segment *last_seg,
  197. gfp_t mem_flags)
  198. {
  199. struct cdnsp_segment *failed_seg;
  200. struct cdnsp_segment *seg;
  201. int ret;
  202. seg = first_seg;
  203. do {
  204. ret = cdnsp_insert_segment_mapping(trb_address_map, ring, seg,
  205. mem_flags);
  206. if (ret)
  207. goto remove_streams;
  208. if (seg == last_seg)
  209. return 0;
  210. seg = seg->next;
  211. } while (seg != first_seg);
  212. return 0;
  213. remove_streams:
  214. failed_seg = seg;
  215. seg = first_seg;
  216. do {
  217. cdnsp_remove_segment_mapping(trb_address_map, seg);
  218. if (seg == failed_seg)
  219. return ret;
  220. seg = seg->next;
  221. } while (seg != first_seg);
  222. return ret;
  223. }
  224. static void cdnsp_remove_stream_mapping(struct cdnsp_ring *ring)
  225. {
  226. struct cdnsp_segment *seg;
  227. seg = ring->first_seg;
  228. do {
  229. cdnsp_remove_segment_mapping(ring->trb_address_map, seg);
  230. seg = seg->next;
  231. } while (seg != ring->first_seg);
  232. }
  233. static int cdnsp_update_stream_mapping(struct cdnsp_ring *ring)
  234. {
  235. return cdnsp_update_stream_segment_mapping(ring->trb_address_map, ring,
  236. ring->first_seg, ring->last_seg, GFP_ATOMIC);
  237. }
  238. static void cdnsp_ring_free(struct cdnsp_device *pdev, struct cdnsp_ring *ring)
  239. {
  240. if (!ring)
  241. return;
  242. trace_cdnsp_ring_free(ring);
  243. if (ring->first_seg) {
  244. if (ring->type == TYPE_STREAM)
  245. cdnsp_remove_stream_mapping(ring);
  246. cdnsp_free_segments_for_ring(pdev, ring->first_seg);
  247. }
  248. kfree(ring);
  249. }
  250. void cdnsp_initialize_ring_info(struct cdnsp_ring *ring)
  251. {
  252. ring->enqueue = ring->first_seg->trbs;
  253. ring->enq_seg = ring->first_seg;
  254. ring->dequeue = ring->enqueue;
  255. ring->deq_seg = ring->first_seg;
  256. /*
  257. * The ring is initialized to 0. The producer must write 1 to the cycle
  258. * bit to handover ownership of the TRB, so PCS = 1. The consumer must
  259. * compare CCS to the cycle bit to check ownership, so CCS = 1.
  260. *
  261. * New rings are initialized with cycle state equal to 1; if we are
  262. * handling ring expansion, set the cycle state equal to the old ring.
  263. */
  264. ring->cycle_state = 1;
  265. /*
  266. * Each segment has a link TRB, and leave an extra TRB for SW
  267. * accounting purpose
  268. */
  269. ring->num_trbs_free = ring->num_segs * (TRBS_PER_SEGMENT - 1) - 1;
  270. }
  271. /* Allocate segments and link them for a ring. */
  272. static int cdnsp_alloc_segments_for_ring(struct cdnsp_device *pdev,
  273. struct cdnsp_segment **first,
  274. struct cdnsp_segment **last,
  275. unsigned int num_segs,
  276. unsigned int cycle_state,
  277. enum cdnsp_ring_type type,
  278. unsigned int max_packet,
  279. gfp_t flags)
  280. {
  281. struct cdnsp_segment *prev;
  282. /* Allocate first segment. */
  283. prev = cdnsp_segment_alloc(pdev, cycle_state, max_packet, flags);
  284. if (!prev)
  285. return -ENOMEM;
  286. num_segs--;
  287. *first = prev;
  288. /* Allocate all other segments. */
  289. while (num_segs > 0) {
  290. struct cdnsp_segment *next;
  291. next = cdnsp_segment_alloc(pdev, cycle_state,
  292. max_packet, flags);
  293. if (!next) {
  294. cdnsp_free_segments_for_ring(pdev, *first);
  295. return -ENOMEM;
  296. }
  297. cdnsp_link_segments(pdev, prev, next, type);
  298. prev = next;
  299. num_segs--;
  300. }
  301. cdnsp_link_segments(pdev, prev, *first, type);
  302. *last = prev;
  303. return 0;
  304. }
  305. /*
  306. * Create a new ring with zero or more segments.
  307. *
  308. * Link each segment together into a ring.
  309. * Set the end flag and the cycle toggle bit on the last segment.
  310. */
  311. static struct cdnsp_ring *cdnsp_ring_alloc(struct cdnsp_device *pdev,
  312. unsigned int num_segs,
  313. enum cdnsp_ring_type type,
  314. unsigned int max_packet,
  315. gfp_t flags)
  316. {
  317. struct cdnsp_ring *ring;
  318. int ret;
  319. ring = kzalloc(sizeof *(ring), flags);
  320. if (!ring)
  321. return NULL;
  322. ring->num_segs = num_segs;
  323. ring->bounce_buf_len = max_packet;
  324. INIT_LIST_HEAD(&ring->td_list);
  325. ring->type = type;
  326. if (num_segs == 0)
  327. return ring;
  328. ret = cdnsp_alloc_segments_for_ring(pdev, &ring->first_seg,
  329. &ring->last_seg, num_segs,
  330. 1, type, max_packet, flags);
  331. if (ret)
  332. goto fail;
  333. /* Only event ring does not use link TRB. */
  334. if (type != TYPE_EVENT)
  335. ring->last_seg->trbs[TRBS_PER_SEGMENT - 1].link.control |=
  336. cpu_to_le32(LINK_TOGGLE);
  337. cdnsp_initialize_ring_info(ring);
  338. trace_cdnsp_ring_alloc(ring);
  339. return ring;
  340. fail:
  341. kfree(ring);
  342. return NULL;
  343. }
  344. void cdnsp_free_endpoint_rings(struct cdnsp_device *pdev, struct cdnsp_ep *pep)
  345. {
  346. cdnsp_ring_free(pdev, pep->ring);
  347. pep->ring = NULL;
  348. cdnsp_free_stream_info(pdev, pep);
  349. }
  350. /*
  351. * Expand an existing ring.
  352. * Allocate a new ring which has same segment numbers and link the two rings.
  353. */
  354. int cdnsp_ring_expansion(struct cdnsp_device *pdev,
  355. struct cdnsp_ring *ring,
  356. unsigned int num_trbs,
  357. gfp_t flags)
  358. {
  359. unsigned int num_segs_needed;
  360. struct cdnsp_segment *first;
  361. struct cdnsp_segment *last;
  362. unsigned int num_segs;
  363. int ret;
  364. num_segs_needed = (num_trbs + (TRBS_PER_SEGMENT - 1) - 1) /
  365. (TRBS_PER_SEGMENT - 1);
  366. /* Allocate number of segments we needed, or double the ring size. */
  367. num_segs = max(ring->num_segs, num_segs_needed);
  368. ret = cdnsp_alloc_segments_for_ring(pdev, &first, &last, num_segs,
  369. ring->cycle_state, ring->type,
  370. ring->bounce_buf_len, flags);
  371. if (ret)
  372. return -ENOMEM;
  373. if (ring->type == TYPE_STREAM)
  374. ret = cdnsp_update_stream_segment_mapping(ring->trb_address_map,
  375. ring, first,
  376. last, flags);
  377. if (ret) {
  378. cdnsp_free_segments_for_ring(pdev, first);
  379. return ret;
  380. }
  381. cdnsp_link_rings(pdev, ring, first, last, num_segs);
  382. trace_cdnsp_ring_expansion(ring);
  383. return 0;
  384. }
  385. static int cdnsp_init_device_ctx(struct cdnsp_device *pdev)
  386. {
  387. int size = HCC_64BYTE_CONTEXT(pdev->hcc_params) ? 2048 : 1024;
  388. pdev->out_ctx.type = CDNSP_CTX_TYPE_DEVICE;
  389. pdev->out_ctx.size = size;
  390. pdev->out_ctx.ctx_size = CTX_SIZE(pdev->hcc_params);
  391. pdev->out_ctx.bytes = dma_pool_zalloc(pdev->device_pool, GFP_ATOMIC,
  392. &pdev->out_ctx.dma);
  393. if (!pdev->out_ctx.bytes)
  394. return -ENOMEM;
  395. pdev->in_ctx.type = CDNSP_CTX_TYPE_INPUT;
  396. pdev->in_ctx.ctx_size = pdev->out_ctx.ctx_size;
  397. pdev->in_ctx.size = size + pdev->out_ctx.ctx_size;
  398. pdev->in_ctx.bytes = dma_pool_zalloc(pdev->device_pool, GFP_ATOMIC,
  399. &pdev->in_ctx.dma);
  400. if (!pdev->in_ctx.bytes) {
  401. dma_pool_free(pdev->device_pool, pdev->out_ctx.bytes,
  402. pdev->out_ctx.dma);
  403. return -ENOMEM;
  404. }
  405. return 0;
  406. }
  407. struct cdnsp_input_control_ctx
  408. *cdnsp_get_input_control_ctx(struct cdnsp_container_ctx *ctx)
  409. {
  410. if (ctx->type != CDNSP_CTX_TYPE_INPUT)
  411. return NULL;
  412. return (struct cdnsp_input_control_ctx *)ctx->bytes;
  413. }
  414. struct cdnsp_slot_ctx *cdnsp_get_slot_ctx(struct cdnsp_container_ctx *ctx)
  415. {
  416. if (ctx->type == CDNSP_CTX_TYPE_DEVICE)
  417. return (struct cdnsp_slot_ctx *)ctx->bytes;
  418. return (struct cdnsp_slot_ctx *)(ctx->bytes + ctx->ctx_size);
  419. }
  420. struct cdnsp_ep_ctx *cdnsp_get_ep_ctx(struct cdnsp_container_ctx *ctx,
  421. unsigned int ep_index)
  422. {
  423. /* Increment ep index by offset of start of ep ctx array. */
  424. ep_index++;
  425. if (ctx->type == CDNSP_CTX_TYPE_INPUT)
  426. ep_index++;
  427. return (struct cdnsp_ep_ctx *)(ctx->bytes + (ep_index * ctx->ctx_size));
  428. }
  429. static void cdnsp_free_stream_ctx(struct cdnsp_device *pdev,
  430. struct cdnsp_ep *pep)
  431. {
  432. dma_pool_free(pdev->device_pool, pep->stream_info.stream_ctx_array,
  433. pep->stream_info.ctx_array_dma);
  434. }
  435. /* The stream context array must be a power of 2. */
  436. static struct cdnsp_stream_ctx
  437. *cdnsp_alloc_stream_ctx(struct cdnsp_device *pdev, struct cdnsp_ep *pep)
  438. {
  439. size_t size = sizeof(struct cdnsp_stream_ctx) *
  440. pep->stream_info.num_stream_ctxs;
  441. if (size > CDNSP_CTX_SIZE)
  442. return NULL;
  443. /**
  444. * Driver uses intentionally the device_pool to allocated stream
  445. * context array. Device Pool has 2048 bytes of size what gives us
  446. * 128 entries.
  447. */
  448. return dma_pool_zalloc(pdev->device_pool, GFP_DMA32 | GFP_ATOMIC,
  449. &pep->stream_info.ctx_array_dma);
  450. }
  451. struct cdnsp_ring *cdnsp_dma_to_transfer_ring(struct cdnsp_ep *pep, u64 address)
  452. {
  453. if (pep->ep_state & EP_HAS_STREAMS)
  454. return radix_tree_lookup(&pep->stream_info.trb_address_map,
  455. address >> TRB_SEGMENT_SHIFT);
  456. return pep->ring;
  457. }
  458. /*
  459. * Change an endpoint's internal structure so it supports stream IDs.
  460. * The number of requested streams includes stream 0, which cannot be used by
  461. * driver.
  462. *
  463. * The number of stream contexts in the stream context array may be bigger than
  464. * the number of streams the driver wants to use. This is because the number of
  465. * stream context array entries must be a power of two.
  466. */
  467. int cdnsp_alloc_stream_info(struct cdnsp_device *pdev,
  468. struct cdnsp_ep *pep,
  469. unsigned int num_stream_ctxs,
  470. unsigned int num_streams)
  471. {
  472. struct cdnsp_stream_info *stream_info;
  473. struct cdnsp_ring *cur_ring;
  474. u32 cur_stream;
  475. u64 addr;
  476. int ret;
  477. int mps;
  478. stream_info = &pep->stream_info;
  479. stream_info->num_streams = num_streams;
  480. stream_info->num_stream_ctxs = num_stream_ctxs;
  481. /* Initialize the array of virtual pointers to stream rings. */
  482. stream_info->stream_rings = kcalloc(num_streams,
  483. sizeof(struct cdnsp_ring *),
  484. GFP_ATOMIC);
  485. if (!stream_info->stream_rings)
  486. return -ENOMEM;
  487. /* Initialize the array of DMA addresses for stream rings for the HW. */
  488. stream_info->stream_ctx_array = cdnsp_alloc_stream_ctx(pdev, pep);
  489. if (!stream_info->stream_ctx_array)
  490. goto cleanup_stream_rings;
  491. memset(stream_info->stream_ctx_array, 0,
  492. sizeof(struct cdnsp_stream_ctx) * num_stream_ctxs);
  493. INIT_RADIX_TREE(&stream_info->trb_address_map, GFP_ATOMIC);
  494. mps = usb_endpoint_maxp(pep->endpoint.desc);
  495. /*
  496. * Allocate rings for all the streams that the driver will use,
  497. * and add their segment DMA addresses to the radix tree.
  498. * Stream 0 is reserved.
  499. */
  500. for (cur_stream = 1; cur_stream < num_streams; cur_stream++) {
  501. cur_ring = cdnsp_ring_alloc(pdev, 2, TYPE_STREAM, mps,
  502. GFP_ATOMIC);
  503. stream_info->stream_rings[cur_stream] = cur_ring;
  504. if (!cur_ring)
  505. goto cleanup_rings;
  506. cur_ring->stream_id = cur_stream;
  507. cur_ring->trb_address_map = &stream_info->trb_address_map;
  508. /* Set deq ptr, cycle bit, and stream context type. */
  509. addr = cur_ring->first_seg->dma | SCT_FOR_CTX(SCT_PRI_TR) |
  510. cur_ring->cycle_state;
  511. stream_info->stream_ctx_array[cur_stream].stream_ring =
  512. cpu_to_le64(addr);
  513. trace_cdnsp_set_stream_ring(cur_ring);
  514. ret = cdnsp_update_stream_mapping(cur_ring);
  515. if (ret)
  516. goto cleanup_rings;
  517. }
  518. return 0;
  519. cleanup_rings:
  520. for (cur_stream = 1; cur_stream < num_streams; cur_stream++) {
  521. cur_ring = stream_info->stream_rings[cur_stream];
  522. if (cur_ring) {
  523. cdnsp_ring_free(pdev, cur_ring);
  524. stream_info->stream_rings[cur_stream] = NULL;
  525. }
  526. }
  527. cleanup_stream_rings:
  528. kfree(pep->stream_info.stream_rings);
  529. return -ENOMEM;
  530. }
  531. /* Frees all stream contexts associated with the endpoint. */
  532. static void cdnsp_free_stream_info(struct cdnsp_device *pdev,
  533. struct cdnsp_ep *pep)
  534. {
  535. struct cdnsp_stream_info *stream_info = &pep->stream_info;
  536. struct cdnsp_ring *cur_ring;
  537. int cur_stream;
  538. if (!(pep->ep_state & EP_HAS_STREAMS))
  539. return;
  540. for (cur_stream = 1; cur_stream < stream_info->num_streams;
  541. cur_stream++) {
  542. cur_ring = stream_info->stream_rings[cur_stream];
  543. if (cur_ring) {
  544. cdnsp_ring_free(pdev, cur_ring);
  545. stream_info->stream_rings[cur_stream] = NULL;
  546. }
  547. }
  548. if (stream_info->stream_ctx_array)
  549. cdnsp_free_stream_ctx(pdev, pep);
  550. kfree(stream_info->stream_rings);
  551. pep->ep_state &= ~EP_HAS_STREAMS;
  552. }
  553. /* All the cdnsp_tds in the ring's TD list should be freed at this point.*/
  554. static void cdnsp_free_priv_device(struct cdnsp_device *pdev)
  555. {
  556. pdev->dcbaa->dev_context_ptrs[1] = 0;
  557. cdnsp_free_endpoint_rings(pdev, &pdev->eps[0]);
  558. if (pdev->in_ctx.bytes)
  559. dma_pool_free(pdev->device_pool, pdev->in_ctx.bytes,
  560. pdev->in_ctx.dma);
  561. if (pdev->out_ctx.bytes)
  562. dma_pool_free(pdev->device_pool, pdev->out_ctx.bytes,
  563. pdev->out_ctx.dma);
  564. pdev->in_ctx.bytes = NULL;
  565. pdev->out_ctx.bytes = NULL;
  566. }
  567. static int cdnsp_alloc_priv_device(struct cdnsp_device *pdev)
  568. {
  569. int ret;
  570. ret = cdnsp_init_device_ctx(pdev);
  571. if (ret)
  572. return ret;
  573. /* Allocate endpoint 0 ring. */
  574. pdev->eps[0].ring = cdnsp_ring_alloc(pdev, 2, TYPE_CTRL, 0, GFP_ATOMIC);
  575. if (!pdev->eps[0].ring)
  576. goto fail;
  577. /* Point to output device context in dcbaa. */
  578. pdev->dcbaa->dev_context_ptrs[1] = cpu_to_le64(pdev->out_ctx.dma);
  579. pdev->cmd.in_ctx = &pdev->in_ctx;
  580. trace_cdnsp_alloc_priv_device(pdev);
  581. return 0;
  582. fail:
  583. dma_pool_free(pdev->device_pool, pdev->out_ctx.bytes,
  584. pdev->out_ctx.dma);
  585. dma_pool_free(pdev->device_pool, pdev->in_ctx.bytes,
  586. pdev->in_ctx.dma);
  587. return ret;
  588. }
  589. void cdnsp_copy_ep0_dequeue_into_input_ctx(struct cdnsp_device *pdev)
  590. {
  591. struct cdnsp_ep_ctx *ep0_ctx = pdev->eps[0].in_ctx;
  592. struct cdnsp_ring *ep_ring = pdev->eps[0].ring;
  593. dma_addr_t dma;
  594. dma = cdnsp_trb_virt_to_dma(ep_ring->enq_seg, ep_ring->enqueue);
  595. ep0_ctx->deq = cpu_to_le64(dma | ep_ring->cycle_state);
  596. }
  597. /* Setup an controller private device for a Set Address command. */
  598. int cdnsp_setup_addressable_priv_dev(struct cdnsp_device *pdev)
  599. {
  600. struct cdnsp_slot_ctx *slot_ctx;
  601. struct cdnsp_ep_ctx *ep0_ctx;
  602. u32 max_packets, port;
  603. ep0_ctx = cdnsp_get_ep_ctx(&pdev->in_ctx, 0);
  604. slot_ctx = cdnsp_get_slot_ctx(&pdev->in_ctx);
  605. /* Only the control endpoint is valid - one endpoint context. */
  606. slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(1));
  607. switch (pdev->gadget.speed) {
  608. case USB_SPEED_SUPER_PLUS:
  609. slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_SSP);
  610. max_packets = MAX_PACKET(512);
  611. break;
  612. case USB_SPEED_SUPER:
  613. slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_SS);
  614. max_packets = MAX_PACKET(512);
  615. break;
  616. case USB_SPEED_HIGH:
  617. slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_HS);
  618. max_packets = MAX_PACKET(64);
  619. break;
  620. case USB_SPEED_FULL:
  621. slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_FS);
  622. max_packets = MAX_PACKET(64);
  623. break;
  624. default:
  625. /* Speed was not set , this shouldn't happen. */
  626. return -EINVAL;
  627. }
  628. port = DEV_PORT(pdev->active_port->port_num);
  629. slot_ctx->dev_port |= cpu_to_le32(port);
  630. slot_ctx->dev_state = cpu_to_le32((pdev->device_address &
  631. DEV_ADDR_MASK));
  632. ep0_ctx->tx_info = cpu_to_le32(EP_AVG_TRB_LENGTH(0x8));
  633. ep0_ctx->ep_info2 = cpu_to_le32(EP_TYPE(CTRL_EP));
  634. ep0_ctx->ep_info2 |= cpu_to_le32(MAX_BURST(0) | ERROR_COUNT(3) |
  635. max_packets);
  636. ep0_ctx->deq = cpu_to_le64(pdev->eps[0].ring->first_seg->dma |
  637. pdev->eps[0].ring->cycle_state);
  638. trace_cdnsp_setup_addressable_priv_device(pdev);
  639. return 0;
  640. }
  641. /*
  642. * Convert interval expressed as 2^(bInterval - 1) == interval into
  643. * straight exponent value 2^n == interval.
  644. */
  645. static unsigned int cdnsp_parse_exponent_interval(struct usb_gadget *g,
  646. struct cdnsp_ep *pep)
  647. {
  648. unsigned int interval;
  649. interval = clamp_val(pep->endpoint.desc->bInterval, 1, 16) - 1;
  650. if (interval != pep->endpoint.desc->bInterval - 1)
  651. dev_warn(&g->dev, "ep %s - rounding interval to %d %sframes\n",
  652. pep->name, 1 << interval,
  653. g->speed == USB_SPEED_FULL ? "" : "micro");
  654. /*
  655. * Full speed isoc endpoints specify interval in frames,
  656. * not microframes. We are using microframes everywhere,
  657. * so adjust accordingly.
  658. */
  659. if (g->speed == USB_SPEED_FULL)
  660. interval += 3; /* 1 frame = 2^3 uframes */
  661. /* Controller handles only up to 512ms (2^12). */
  662. if (interval > 12)
  663. interval = 12;
  664. return interval;
  665. }
  666. /*
  667. * Convert bInterval expressed in microframes (in 1-255 range) to exponent of
  668. * microframes, rounded down to nearest power of 2.
  669. */
  670. static unsigned int cdnsp_microframes_to_exponent(struct usb_gadget *g,
  671. struct cdnsp_ep *pep,
  672. unsigned int desc_interval,
  673. unsigned int min_exponent,
  674. unsigned int max_exponent)
  675. {
  676. unsigned int interval;
  677. interval = fls(desc_interval) - 1;
  678. return clamp_val(interval, min_exponent, max_exponent);
  679. }
  680. /*
  681. * Return the polling interval.
  682. *
  683. * The polling interval is expressed in "microframes". If controllers's Interval
  684. * field is set to N, it will service the endpoint every 2^(Interval)*125us.
  685. */
  686. static unsigned int cdnsp_get_endpoint_interval(struct usb_gadget *g,
  687. struct cdnsp_ep *pep)
  688. {
  689. unsigned int interval = 0;
  690. switch (g->speed) {
  691. case USB_SPEED_HIGH:
  692. case USB_SPEED_SUPER_PLUS:
  693. case USB_SPEED_SUPER:
  694. if (usb_endpoint_xfer_int(pep->endpoint.desc) ||
  695. usb_endpoint_xfer_isoc(pep->endpoint.desc))
  696. interval = cdnsp_parse_exponent_interval(g, pep);
  697. break;
  698. case USB_SPEED_FULL:
  699. if (usb_endpoint_xfer_isoc(pep->endpoint.desc)) {
  700. interval = cdnsp_parse_exponent_interval(g, pep);
  701. } else if (usb_endpoint_xfer_int(pep->endpoint.desc)) {
  702. interval = pep->endpoint.desc->bInterval << 3;
  703. interval = cdnsp_microframes_to_exponent(g, pep,
  704. interval,
  705. 3, 10);
  706. }
  707. break;
  708. default:
  709. WARN_ON(1);
  710. }
  711. return interval;
  712. }
  713. /*
  714. * The "Mult" field in the endpoint context is only set for SuperSpeed isoc eps.
  715. * High speed endpoint descriptors can define "the number of additional
  716. * transaction opportunities per microframe", but that goes in the Max Burst
  717. * endpoint context field.
  718. */
  719. static u32 cdnsp_get_endpoint_mult(struct usb_gadget *g, struct cdnsp_ep *pep)
  720. {
  721. if (g->speed < USB_SPEED_SUPER ||
  722. !usb_endpoint_xfer_isoc(pep->endpoint.desc))
  723. return 0;
  724. return pep->endpoint.comp_desc->bmAttributes;
  725. }
  726. static u32 cdnsp_get_endpoint_max_burst(struct usb_gadget *g,
  727. struct cdnsp_ep *pep)
  728. {
  729. /* Super speed and Plus have max burst in ep companion desc */
  730. if (g->speed >= USB_SPEED_SUPER)
  731. return pep->endpoint.comp_desc->bMaxBurst;
  732. if (g->speed == USB_SPEED_HIGH &&
  733. (usb_endpoint_xfer_isoc(pep->endpoint.desc) ||
  734. usb_endpoint_xfer_int(pep->endpoint.desc)))
  735. return usb_endpoint_maxp_mult(pep->endpoint.desc) - 1;
  736. return 0;
  737. }
  738. static u32 cdnsp_get_endpoint_type(const struct usb_endpoint_descriptor *desc)
  739. {
  740. int in;
  741. in = usb_endpoint_dir_in(desc);
  742. switch (usb_endpoint_type(desc)) {
  743. case USB_ENDPOINT_XFER_CONTROL:
  744. return CTRL_EP;
  745. case USB_ENDPOINT_XFER_BULK:
  746. return in ? BULK_IN_EP : BULK_OUT_EP;
  747. case USB_ENDPOINT_XFER_ISOC:
  748. return in ? ISOC_IN_EP : ISOC_OUT_EP;
  749. case USB_ENDPOINT_XFER_INT:
  750. return in ? INT_IN_EP : INT_OUT_EP;
  751. }
  752. return 0;
  753. }
  754. /*
  755. * Return the maximum endpoint service interval time (ESIT) payload.
  756. * Basically, this is the maxpacket size, multiplied by the burst size
  757. * and mult size.
  758. */
  759. static u32 cdnsp_get_max_esit_payload(struct usb_gadget *g,
  760. struct cdnsp_ep *pep)
  761. {
  762. int max_packet;
  763. int max_burst;
  764. /* Only applies for interrupt or isochronous endpoints*/
  765. if (usb_endpoint_xfer_control(pep->endpoint.desc) ||
  766. usb_endpoint_xfer_bulk(pep->endpoint.desc))
  767. return 0;
  768. /* SuperSpeedPlus Isoc ep sending over 48k per EIST. */
  769. if (g->speed >= USB_SPEED_SUPER_PLUS &&
  770. USB_SS_SSP_ISOC_COMP(pep->endpoint.desc->bmAttributes))
  771. return le16_to_cpu(pep->endpoint.comp_desc->wBytesPerInterval);
  772. /* SuperSpeed or SuperSpeedPlus Isoc ep with less than 48k per esit */
  773. else if (g->speed >= USB_SPEED_SUPER)
  774. return le16_to_cpu(pep->endpoint.comp_desc->wBytesPerInterval);
  775. max_packet = usb_endpoint_maxp(pep->endpoint.desc);
  776. max_burst = usb_endpoint_maxp_mult(pep->endpoint.desc);
  777. /* A 0 in max burst means 1 transfer per ESIT */
  778. return max_packet * max_burst;
  779. }
  780. int cdnsp_endpoint_init(struct cdnsp_device *pdev,
  781. struct cdnsp_ep *pep,
  782. gfp_t mem_flags)
  783. {
  784. enum cdnsp_ring_type ring_type;
  785. struct cdnsp_ep_ctx *ep_ctx;
  786. unsigned int err_count = 0;
  787. unsigned int avg_trb_len;
  788. unsigned int max_packet;
  789. unsigned int max_burst;
  790. unsigned int interval;
  791. u32 max_esit_payload;
  792. unsigned int mult;
  793. u32 endpoint_type;
  794. int ret;
  795. ep_ctx = pep->in_ctx;
  796. endpoint_type = cdnsp_get_endpoint_type(pep->endpoint.desc);
  797. if (!endpoint_type)
  798. return -EINVAL;
  799. ring_type = usb_endpoint_type(pep->endpoint.desc);
  800. /*
  801. * Get values to fill the endpoint context, mostly from ep descriptor.
  802. * The average TRB buffer length for bulk endpoints is unclear as we
  803. * have no clue on scatter gather list entry size. For Isoc and Int,
  804. * set it to max available.
  805. */
  806. max_esit_payload = cdnsp_get_max_esit_payload(&pdev->gadget, pep);
  807. interval = cdnsp_get_endpoint_interval(&pdev->gadget, pep);
  808. mult = cdnsp_get_endpoint_mult(&pdev->gadget, pep);
  809. max_packet = usb_endpoint_maxp(pep->endpoint.desc);
  810. max_burst = cdnsp_get_endpoint_max_burst(&pdev->gadget, pep);
  811. avg_trb_len = max_esit_payload;
  812. /* Allow 3 retries for everything but isoc, set CErr = 3. */
  813. if (!usb_endpoint_xfer_isoc(pep->endpoint.desc))
  814. err_count = 3;
  815. if (usb_endpoint_xfer_bulk(pep->endpoint.desc) &&
  816. pdev->gadget.speed == USB_SPEED_HIGH)
  817. max_packet = 512;
  818. /* Controller spec indicates that ctrl ep avg TRB Length should be 8. */
  819. if (usb_endpoint_xfer_control(pep->endpoint.desc))
  820. avg_trb_len = 8;
  821. /* Set up the endpoint ring. */
  822. pep->ring = cdnsp_ring_alloc(pdev, 2, ring_type, max_packet, mem_flags);
  823. if (!pep->ring)
  824. return -ENOMEM;
  825. pep->skip = false;
  826. /* Fill the endpoint context */
  827. ep_ctx->ep_info = cpu_to_le32(EP_MAX_ESIT_PAYLOAD_HI(max_esit_payload) |
  828. EP_INTERVAL(interval) | EP_MULT(mult));
  829. ep_ctx->ep_info2 = cpu_to_le32(EP_TYPE(endpoint_type) |
  830. MAX_PACKET(max_packet) | MAX_BURST(max_burst) |
  831. ERROR_COUNT(err_count));
  832. ep_ctx->deq = cpu_to_le64(pep->ring->first_seg->dma |
  833. pep->ring->cycle_state);
  834. ep_ctx->tx_info = cpu_to_le32(EP_MAX_ESIT_PAYLOAD_LO(max_esit_payload) |
  835. EP_AVG_TRB_LENGTH(avg_trb_len));
  836. if (usb_endpoint_xfer_bulk(pep->endpoint.desc) &&
  837. pdev->gadget.speed > USB_SPEED_HIGH) {
  838. ret = cdnsp_alloc_streams(pdev, pep);
  839. if (ret < 0)
  840. return ret;
  841. }
  842. return 0;
  843. }
  844. void cdnsp_endpoint_zero(struct cdnsp_device *pdev, struct cdnsp_ep *pep)
  845. {
  846. pep->in_ctx->ep_info = 0;
  847. pep->in_ctx->ep_info2 = 0;
  848. pep->in_ctx->deq = 0;
  849. pep->in_ctx->tx_info = 0;
  850. }
  851. static int cdnsp_alloc_erst(struct cdnsp_device *pdev,
  852. struct cdnsp_ring *evt_ring,
  853. struct cdnsp_erst *erst)
  854. {
  855. struct cdnsp_erst_entry *entry;
  856. struct cdnsp_segment *seg;
  857. unsigned int val;
  858. size_t size;
  859. size = sizeof(struct cdnsp_erst_entry) * evt_ring->num_segs;
  860. erst->entries = dma_alloc_coherent(pdev->dev, size,
  861. &erst->erst_dma_addr, GFP_KERNEL);
  862. if (!erst->entries)
  863. return -ENOMEM;
  864. erst->num_entries = evt_ring->num_segs;
  865. seg = evt_ring->first_seg;
  866. for (val = 0; val < evt_ring->num_segs; val++) {
  867. entry = &erst->entries[val];
  868. entry->seg_addr = cpu_to_le64(seg->dma);
  869. entry->seg_size = cpu_to_le32(TRBS_PER_SEGMENT);
  870. entry->rsvd = 0;
  871. seg = seg->next;
  872. }
  873. return 0;
  874. }
  875. static void cdnsp_free_erst(struct cdnsp_device *pdev, struct cdnsp_erst *erst)
  876. {
  877. size_t size = sizeof(struct cdnsp_erst_entry) * (erst->num_entries);
  878. struct device *dev = pdev->dev;
  879. if (erst->entries)
  880. dma_free_coherent(dev, size, erst->entries,
  881. erst->erst_dma_addr);
  882. erst->entries = NULL;
  883. }
  884. void cdnsp_mem_cleanup(struct cdnsp_device *pdev)
  885. {
  886. struct device *dev = pdev->dev;
  887. cdnsp_free_priv_device(pdev);
  888. cdnsp_free_erst(pdev, &pdev->erst);
  889. if (pdev->event_ring)
  890. cdnsp_ring_free(pdev, pdev->event_ring);
  891. pdev->event_ring = NULL;
  892. if (pdev->cmd_ring)
  893. cdnsp_ring_free(pdev, pdev->cmd_ring);
  894. pdev->cmd_ring = NULL;
  895. dma_pool_destroy(pdev->segment_pool);
  896. pdev->segment_pool = NULL;
  897. dma_pool_destroy(pdev->device_pool);
  898. pdev->device_pool = NULL;
  899. dma_free_coherent(dev, sizeof(*pdev->dcbaa),
  900. pdev->dcbaa, pdev->dcbaa->dma);
  901. pdev->dcbaa = NULL;
  902. pdev->usb2_port.exist = 0;
  903. pdev->usb3_port.exist = 0;
  904. pdev->usb2_port.port_num = 0;
  905. pdev->usb3_port.port_num = 0;
  906. pdev->active_port = NULL;
  907. }
  908. static void cdnsp_set_event_deq(struct cdnsp_device *pdev)
  909. {
  910. dma_addr_t deq;
  911. u64 temp;
  912. deq = cdnsp_trb_virt_to_dma(pdev->event_ring->deq_seg,
  913. pdev->event_ring->dequeue);
  914. /* Update controller event ring dequeue pointer */
  915. temp = cdnsp_read_64(&pdev->ir_set->erst_dequeue);
  916. temp &= ERST_PTR_MASK;
  917. /*
  918. * Don't clear the EHB bit (which is RW1C) because
  919. * there might be more events to service.
  920. */
  921. temp &= ~ERST_EHB;
  922. cdnsp_write_64(((u64)deq & (u64)~ERST_PTR_MASK) | temp,
  923. &pdev->ir_set->erst_dequeue);
  924. }
  925. static void cdnsp_add_in_port(struct cdnsp_device *pdev,
  926. struct cdnsp_port *port,
  927. __le32 __iomem *addr)
  928. {
  929. u32 temp, port_offset, port_count;
  930. temp = readl(addr);
  931. port->maj_rev = CDNSP_EXT_PORT_MAJOR(temp);
  932. port->min_rev = CDNSP_EXT_PORT_MINOR(temp);
  933. /* Port offset and count in the third dword.*/
  934. temp = readl(addr + 2);
  935. port_offset = CDNSP_EXT_PORT_OFF(temp);
  936. port_count = CDNSP_EXT_PORT_COUNT(temp);
  937. trace_cdnsp_port_info(addr, port_offset, port_count, port->maj_rev);
  938. port->port_num = port_offset;
  939. port->exist = 1;
  940. }
  941. /*
  942. * Scan the Extended Capabilities for the "Supported Protocol Capabilities" that
  943. * specify what speeds each port is supposed to be.
  944. */
  945. static int cdnsp_setup_port_arrays(struct cdnsp_device *pdev)
  946. {
  947. void __iomem *base;
  948. u32 offset;
  949. int i;
  950. base = &pdev->cap_regs->hc_capbase;
  951. offset = cdnsp_find_next_ext_cap(base, 0,
  952. EXT_CAP_CFG_DEV_20PORT_CAP_ID);
  953. pdev->port20_regs = base + offset;
  954. offset = cdnsp_find_next_ext_cap(base, 0, D_XEC_CFG_3XPORT_CAP);
  955. pdev->port3x_regs = base + offset;
  956. offset = 0;
  957. base = &pdev->cap_regs->hc_capbase;
  958. /* Driver expects max 2 extended protocol capability. */
  959. for (i = 0; i < 2; i++) {
  960. u32 temp;
  961. offset = cdnsp_find_next_ext_cap(base, offset,
  962. EXT_CAPS_PROTOCOL);
  963. temp = readl(base + offset);
  964. if (CDNSP_EXT_PORT_MAJOR(temp) == 0x03 &&
  965. !pdev->usb3_port.port_num)
  966. cdnsp_add_in_port(pdev, &pdev->usb3_port,
  967. base + offset);
  968. if (CDNSP_EXT_PORT_MAJOR(temp) == 0x02 &&
  969. !pdev->usb2_port.port_num)
  970. cdnsp_add_in_port(pdev, &pdev->usb2_port,
  971. base + offset);
  972. }
  973. if (!pdev->usb2_port.exist || !pdev->usb3_port.exist) {
  974. dev_err(pdev->dev, "Error: Only one port detected\n");
  975. return -ENODEV;
  976. }
  977. trace_cdnsp_init("Found USB 2.0 ports and USB 3.0 ports.");
  978. pdev->usb2_port.regs = (struct cdnsp_port_regs __iomem *)
  979. (&pdev->op_regs->port_reg_base + NUM_PORT_REGS *
  980. (pdev->usb2_port.port_num - 1));
  981. pdev->usb3_port.regs = (struct cdnsp_port_regs __iomem *)
  982. (&pdev->op_regs->port_reg_base + NUM_PORT_REGS *
  983. (pdev->usb3_port.port_num - 1));
  984. return 0;
  985. }
  986. /*
  987. * Initialize memory for CDNSP (one-time init).
  988. *
  989. * Program the PAGESIZE register, initialize the device context array, create
  990. * device contexts, set up a command ring segment, create event
  991. * ring (one for now).
  992. */
  993. int cdnsp_mem_init(struct cdnsp_device *pdev)
  994. {
  995. struct device *dev = pdev->dev;
  996. int ret = -ENOMEM;
  997. unsigned int val;
  998. dma_addr_t dma;
  999. u32 page_size;
  1000. u64 val_64;
  1001. /*
  1002. * Use 4K pages, since that's common and the minimum the
  1003. * controller supports
  1004. */
  1005. page_size = 1 << 12;
  1006. val = readl(&pdev->op_regs->config_reg);
  1007. val |= ((val & ~MAX_DEVS) | CDNSP_DEV_MAX_SLOTS) | CONFIG_U3E;
  1008. writel(val, &pdev->op_regs->config_reg);
  1009. /*
  1010. * Doorbell array must be physically contiguous
  1011. * and 64-byte (cache line) aligned.
  1012. */
  1013. pdev->dcbaa = dma_alloc_coherent(dev, sizeof(*pdev->dcbaa),
  1014. &dma, GFP_KERNEL);
  1015. if (!pdev->dcbaa)
  1016. return -ENOMEM;
  1017. pdev->dcbaa->dma = dma;
  1018. cdnsp_write_64(dma, &pdev->op_regs->dcbaa_ptr);
  1019. /*
  1020. * Initialize the ring segment pool. The ring must be a contiguous
  1021. * structure comprised of TRBs. The TRBs must be 16 byte aligned,
  1022. * however, the command ring segment needs 64-byte aligned segments
  1023. * and our use of dma addresses in the trb_address_map radix tree needs
  1024. * TRB_SEGMENT_SIZE alignment, so driver pick the greater alignment
  1025. * need.
  1026. */
  1027. pdev->segment_pool = dma_pool_create("CDNSP ring segments", dev,
  1028. TRB_SEGMENT_SIZE, TRB_SEGMENT_SIZE,
  1029. page_size);
  1030. if (!pdev->segment_pool)
  1031. goto release_dcbaa;
  1032. pdev->device_pool = dma_pool_create("CDNSP input/output contexts", dev,
  1033. CDNSP_CTX_SIZE, 64, page_size);
  1034. if (!pdev->device_pool)
  1035. goto destroy_segment_pool;
  1036. /* Set up the command ring to have one segments for now. */
  1037. pdev->cmd_ring = cdnsp_ring_alloc(pdev, 1, TYPE_COMMAND, 0, GFP_KERNEL);
  1038. if (!pdev->cmd_ring)
  1039. goto destroy_device_pool;
  1040. /* Set the address in the Command Ring Control register */
  1041. val_64 = cdnsp_read_64(&pdev->op_regs->cmd_ring);
  1042. val_64 = (val_64 & (u64)CMD_RING_RSVD_BITS) |
  1043. (pdev->cmd_ring->first_seg->dma & (u64)~CMD_RING_RSVD_BITS) |
  1044. pdev->cmd_ring->cycle_state;
  1045. cdnsp_write_64(val_64, &pdev->op_regs->cmd_ring);
  1046. val = readl(&pdev->cap_regs->db_off);
  1047. val &= DBOFF_MASK;
  1048. pdev->dba = (void __iomem *)pdev->cap_regs + val;
  1049. /* Set ir_set to interrupt register set 0 */
  1050. pdev->ir_set = &pdev->run_regs->ir_set[0];
  1051. /*
  1052. * Event ring setup: Allocate a normal ring, but also setup
  1053. * the event ring segment table (ERST).
  1054. */
  1055. pdev->event_ring = cdnsp_ring_alloc(pdev, ERST_NUM_SEGS, TYPE_EVENT,
  1056. 0, GFP_KERNEL);
  1057. if (!pdev->event_ring)
  1058. goto free_cmd_ring;
  1059. ret = cdnsp_alloc_erst(pdev, pdev->event_ring, &pdev->erst);
  1060. if (ret)
  1061. goto free_event_ring;
  1062. /* Set ERST count with the number of entries in the segment table. */
  1063. val = readl(&pdev->ir_set->erst_size);
  1064. val &= ERST_SIZE_MASK;
  1065. val |= ERST_NUM_SEGS;
  1066. writel(val, &pdev->ir_set->erst_size);
  1067. /* Set the segment table base address. */
  1068. val_64 = cdnsp_read_64(&pdev->ir_set->erst_base);
  1069. val_64 &= ERST_PTR_MASK;
  1070. val_64 |= (pdev->erst.erst_dma_addr & (u64)~ERST_PTR_MASK);
  1071. cdnsp_write_64(val_64, &pdev->ir_set->erst_base);
  1072. /* Set the event ring dequeue address. */
  1073. cdnsp_set_event_deq(pdev);
  1074. ret = cdnsp_setup_port_arrays(pdev);
  1075. if (ret)
  1076. goto free_erst;
  1077. ret = cdnsp_alloc_priv_device(pdev);
  1078. if (ret) {
  1079. dev_err(pdev->dev,
  1080. "Could not allocate cdnsp_device data structures\n");
  1081. goto free_erst;
  1082. }
  1083. return 0;
  1084. free_erst:
  1085. cdnsp_free_erst(pdev, &pdev->erst);
  1086. free_event_ring:
  1087. cdnsp_ring_free(pdev, pdev->event_ring);
  1088. free_cmd_ring:
  1089. cdnsp_ring_free(pdev, pdev->cmd_ring);
  1090. destroy_device_pool:
  1091. dma_pool_destroy(pdev->device_pool);
  1092. destroy_segment_pool:
  1093. dma_pool_destroy(pdev->segment_pool);
  1094. release_dcbaa:
  1095. dma_free_coherent(dev, sizeof(*pdev->dcbaa), pdev->dcbaa,
  1096. pdev->dcbaa->dma);
  1097. cdnsp_reset(pdev);
  1098. return ret;
  1099. }