fifo.c 34 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486
  1. // SPDX-License-Identifier: GPL-1.0+
  2. /*
  3. * Renesas USB driver
  4. *
  5. * Copyright (C) 2011 Renesas Solutions Corp.
  6. * Copyright (C) 2019 Renesas Electronics Corporation
  7. * Kuninori Morimoto <[email protected]>
  8. */
  9. #include <linux/delay.h>
  10. #include <linux/io.h>
  11. #include <linux/scatterlist.h>
  12. #include "common.h"
  13. #include "pipe.h"
  14. #define usbhsf_get_cfifo(p) (&((p)->fifo_info.cfifo))
  15. #define usbhsf_fifo_is_busy(f) ((f)->pipe) /* see usbhs_pipe_select_fifo */
  16. /*
  17. * packet initialize
  18. */
  19. void usbhs_pkt_init(struct usbhs_pkt *pkt)
  20. {
  21. INIT_LIST_HEAD(&pkt->node);
  22. }
  23. /*
  24. * packet control function
  25. */
  26. static int usbhsf_null_handle(struct usbhs_pkt *pkt, int *is_done)
  27. {
  28. struct usbhs_priv *priv = usbhs_pipe_to_priv(pkt->pipe);
  29. struct device *dev = usbhs_priv_to_dev(priv);
  30. dev_err(dev, "null handler\n");
  31. return -EINVAL;
  32. }
  33. static const struct usbhs_pkt_handle usbhsf_null_handler = {
  34. .prepare = usbhsf_null_handle,
  35. .try_run = usbhsf_null_handle,
  36. };
  37. void usbhs_pkt_push(struct usbhs_pipe *pipe, struct usbhs_pkt *pkt,
  38. void (*done)(struct usbhs_priv *priv,
  39. struct usbhs_pkt *pkt),
  40. void *buf, int len, int zero, int sequence)
  41. {
  42. struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
  43. struct device *dev = usbhs_priv_to_dev(priv);
  44. unsigned long flags;
  45. if (!done) {
  46. dev_err(dev, "no done function\n");
  47. return;
  48. }
  49. /******************** spin lock ********************/
  50. usbhs_lock(priv, flags);
  51. if (!pipe->handler) {
  52. dev_err(dev, "no handler function\n");
  53. pipe->handler = &usbhsf_null_handler;
  54. }
  55. list_move_tail(&pkt->node, &pipe->list);
  56. /*
  57. * each pkt must hold own handler.
  58. * because handler might be changed by its situation.
  59. * dma handler -> pio handler.
  60. */
  61. pkt->pipe = pipe;
  62. pkt->buf = buf;
  63. pkt->handler = pipe->handler;
  64. pkt->length = len;
  65. pkt->zero = zero;
  66. pkt->actual = 0;
  67. pkt->done = done;
  68. pkt->sequence = sequence;
  69. usbhs_unlock(priv, flags);
  70. /******************** spin unlock ******************/
  71. }
  72. static void __usbhsf_pkt_del(struct usbhs_pkt *pkt)
  73. {
  74. list_del_init(&pkt->node);
  75. }
  76. struct usbhs_pkt *__usbhsf_pkt_get(struct usbhs_pipe *pipe)
  77. {
  78. return list_first_entry_or_null(&pipe->list, struct usbhs_pkt, node);
  79. }
  80. static void usbhsf_fifo_unselect(struct usbhs_pipe *pipe,
  81. struct usbhs_fifo *fifo);
  82. static struct dma_chan *usbhsf_dma_chan_get(struct usbhs_fifo *fifo,
  83. struct usbhs_pkt *pkt);
  84. #define usbhsf_dma_map(p) __usbhsf_dma_map_ctrl(p, 1)
  85. #define usbhsf_dma_unmap(p) __usbhsf_dma_map_ctrl(p, 0)
  86. static int __usbhsf_dma_map_ctrl(struct usbhs_pkt *pkt, int map);
  87. static void usbhsf_tx_irq_ctrl(struct usbhs_pipe *pipe, int enable);
  88. static void usbhsf_rx_irq_ctrl(struct usbhs_pipe *pipe, int enable);
  89. struct usbhs_pkt *usbhs_pkt_pop(struct usbhs_pipe *pipe, struct usbhs_pkt *pkt)
  90. {
  91. struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
  92. struct usbhs_fifo *fifo = usbhs_pipe_to_fifo(pipe);
  93. unsigned long flags;
  94. /******************** spin lock ********************/
  95. usbhs_lock(priv, flags);
  96. usbhs_pipe_disable(pipe);
  97. if (!pkt)
  98. pkt = __usbhsf_pkt_get(pipe);
  99. if (pkt) {
  100. struct dma_chan *chan = NULL;
  101. if (fifo)
  102. chan = usbhsf_dma_chan_get(fifo, pkt);
  103. if (chan) {
  104. dmaengine_terminate_all(chan);
  105. usbhsf_dma_unmap(pkt);
  106. } else {
  107. if (usbhs_pipe_is_dir_in(pipe))
  108. usbhsf_rx_irq_ctrl(pipe, 0);
  109. else
  110. usbhsf_tx_irq_ctrl(pipe, 0);
  111. }
  112. usbhs_pipe_clear_without_sequence(pipe, 0, 0);
  113. usbhs_pipe_running(pipe, 0);
  114. __usbhsf_pkt_del(pkt);
  115. }
  116. if (fifo)
  117. usbhsf_fifo_unselect(pipe, fifo);
  118. usbhs_unlock(priv, flags);
  119. /******************** spin unlock ******************/
  120. return pkt;
  121. }
  122. enum {
  123. USBHSF_PKT_PREPARE,
  124. USBHSF_PKT_TRY_RUN,
  125. USBHSF_PKT_DMA_DONE,
  126. };
  127. static int usbhsf_pkt_handler(struct usbhs_pipe *pipe, int type)
  128. {
  129. struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
  130. struct usbhs_pkt *pkt;
  131. struct device *dev = usbhs_priv_to_dev(priv);
  132. int (*func)(struct usbhs_pkt *pkt, int *is_done);
  133. unsigned long flags;
  134. int ret = 0;
  135. int is_done = 0;
  136. /******************** spin lock ********************/
  137. usbhs_lock(priv, flags);
  138. pkt = __usbhsf_pkt_get(pipe);
  139. if (!pkt) {
  140. ret = -EINVAL;
  141. goto __usbhs_pkt_handler_end;
  142. }
  143. switch (type) {
  144. case USBHSF_PKT_PREPARE:
  145. func = pkt->handler->prepare;
  146. break;
  147. case USBHSF_PKT_TRY_RUN:
  148. func = pkt->handler->try_run;
  149. break;
  150. case USBHSF_PKT_DMA_DONE:
  151. func = pkt->handler->dma_done;
  152. break;
  153. default:
  154. dev_err(dev, "unknown pkt handler\n");
  155. goto __usbhs_pkt_handler_end;
  156. }
  157. if (likely(func))
  158. ret = func(pkt, &is_done);
  159. if (is_done)
  160. __usbhsf_pkt_del(pkt);
  161. __usbhs_pkt_handler_end:
  162. usbhs_unlock(priv, flags);
  163. /******************** spin unlock ******************/
  164. if (is_done) {
  165. pkt->done(priv, pkt);
  166. usbhs_pkt_start(pipe);
  167. }
  168. return ret;
  169. }
  170. void usbhs_pkt_start(struct usbhs_pipe *pipe)
  171. {
  172. usbhsf_pkt_handler(pipe, USBHSF_PKT_PREPARE);
  173. }
  174. /*
  175. * irq enable/disable function
  176. */
  177. #define usbhsf_irq_empty_ctrl(p, e) usbhsf_irq_callback_ctrl(p, irq_bempsts, e)
  178. #define usbhsf_irq_ready_ctrl(p, e) usbhsf_irq_callback_ctrl(p, irq_brdysts, e)
  179. #define usbhsf_irq_callback_ctrl(pipe, status, enable) \
  180. ({ \
  181. struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe); \
  182. struct usbhs_mod *mod = usbhs_mod_get_current(priv); \
  183. u16 status = (1 << usbhs_pipe_number(pipe)); \
  184. if (!mod) \
  185. return; \
  186. if (enable) \
  187. mod->status |= status; \
  188. else \
  189. mod->status &= ~status; \
  190. usbhs_irq_callback_update(priv, mod); \
  191. })
  192. static void usbhsf_tx_irq_ctrl(struct usbhs_pipe *pipe, int enable)
  193. {
  194. /*
  195. * And DCP pipe can NOT use "ready interrupt" for "send"
  196. * it should use "empty" interrupt.
  197. * see
  198. * "Operation" - "Interrupt Function" - "BRDY Interrupt"
  199. *
  200. * on the other hand, normal pipe can use "ready interrupt" for "send"
  201. * even though it is single/double buffer
  202. */
  203. if (usbhs_pipe_is_dcp(pipe))
  204. usbhsf_irq_empty_ctrl(pipe, enable);
  205. else
  206. usbhsf_irq_ready_ctrl(pipe, enable);
  207. }
  208. static void usbhsf_rx_irq_ctrl(struct usbhs_pipe *pipe, int enable)
  209. {
  210. usbhsf_irq_ready_ctrl(pipe, enable);
  211. }
  212. /*
  213. * FIFO ctrl
  214. */
  215. static void usbhsf_send_terminator(struct usbhs_pipe *pipe,
  216. struct usbhs_fifo *fifo)
  217. {
  218. struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
  219. usbhs_bset(priv, fifo->ctr, BVAL, BVAL);
  220. }
  221. static int usbhsf_fifo_barrier(struct usbhs_priv *priv,
  222. struct usbhs_fifo *fifo)
  223. {
  224. /* The FIFO port is accessible */
  225. if (usbhs_read(priv, fifo->ctr) & FRDY)
  226. return 0;
  227. return -EBUSY;
  228. }
  229. static void usbhsf_fifo_clear(struct usbhs_pipe *pipe,
  230. struct usbhs_fifo *fifo)
  231. {
  232. struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
  233. int ret = 0;
  234. if (!usbhs_pipe_is_dcp(pipe)) {
  235. /*
  236. * This driver checks the pipe condition first to avoid -EBUSY
  237. * from usbhsf_fifo_barrier() if the pipe is RX direction and
  238. * empty.
  239. */
  240. if (usbhs_pipe_is_dir_in(pipe))
  241. ret = usbhs_pipe_is_accessible(pipe);
  242. if (!ret)
  243. ret = usbhsf_fifo_barrier(priv, fifo);
  244. }
  245. /*
  246. * if non-DCP pipe, this driver should set BCLR when
  247. * usbhsf_fifo_barrier() returns 0.
  248. */
  249. if (!ret)
  250. usbhs_write(priv, fifo->ctr, BCLR);
  251. }
  252. static int usbhsf_fifo_rcv_len(struct usbhs_priv *priv,
  253. struct usbhs_fifo *fifo)
  254. {
  255. return usbhs_read(priv, fifo->ctr) & DTLN_MASK;
  256. }
  257. static void usbhsf_fifo_unselect(struct usbhs_pipe *pipe,
  258. struct usbhs_fifo *fifo)
  259. {
  260. struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
  261. usbhs_pipe_select_fifo(pipe, NULL);
  262. usbhs_write(priv, fifo->sel, 0);
  263. }
  264. static int usbhsf_fifo_select(struct usbhs_pipe *pipe,
  265. struct usbhs_fifo *fifo,
  266. int write)
  267. {
  268. struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
  269. struct device *dev = usbhs_priv_to_dev(priv);
  270. int timeout = 1024;
  271. u16 mask = ((1 << 5) | 0xF); /* mask of ISEL | CURPIPE */
  272. u16 base = usbhs_pipe_number(pipe); /* CURPIPE */
  273. if (usbhs_pipe_is_busy(pipe) ||
  274. usbhsf_fifo_is_busy(fifo))
  275. return -EBUSY;
  276. if (usbhs_pipe_is_dcp(pipe)) {
  277. base |= (1 == write) << 5; /* ISEL */
  278. if (usbhs_mod_is_host(priv))
  279. usbhs_dcp_dir_for_host(pipe, write);
  280. }
  281. /* "base" will be used below */
  282. usbhs_write(priv, fifo->sel, base | MBW_32);
  283. /* check ISEL and CURPIPE value */
  284. while (timeout--) {
  285. if (base == (mask & usbhs_read(priv, fifo->sel))) {
  286. usbhs_pipe_select_fifo(pipe, fifo);
  287. return 0;
  288. }
  289. udelay(10);
  290. }
  291. dev_err(dev, "fifo select error\n");
  292. return -EIO;
  293. }
  294. /*
  295. * DCP status stage
  296. */
  297. static int usbhs_dcp_dir_switch_to_write(struct usbhs_pkt *pkt, int *is_done)
  298. {
  299. struct usbhs_pipe *pipe = pkt->pipe;
  300. struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
  301. struct usbhs_fifo *fifo = usbhsf_get_cfifo(priv); /* CFIFO */
  302. struct device *dev = usbhs_priv_to_dev(priv);
  303. int ret;
  304. usbhs_pipe_disable(pipe);
  305. ret = usbhsf_fifo_select(pipe, fifo, 1);
  306. if (ret < 0) {
  307. dev_err(dev, "%s() failed\n", __func__);
  308. return ret;
  309. }
  310. usbhs_pipe_sequence_data1(pipe); /* DATA1 */
  311. usbhsf_fifo_clear(pipe, fifo);
  312. usbhsf_send_terminator(pipe, fifo);
  313. usbhsf_fifo_unselect(pipe, fifo);
  314. usbhsf_tx_irq_ctrl(pipe, 1);
  315. usbhs_pipe_enable(pipe);
  316. return ret;
  317. }
  318. static int usbhs_dcp_dir_switch_to_read(struct usbhs_pkt *pkt, int *is_done)
  319. {
  320. struct usbhs_pipe *pipe = pkt->pipe;
  321. struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
  322. struct usbhs_fifo *fifo = usbhsf_get_cfifo(priv); /* CFIFO */
  323. struct device *dev = usbhs_priv_to_dev(priv);
  324. int ret;
  325. usbhs_pipe_disable(pipe);
  326. ret = usbhsf_fifo_select(pipe, fifo, 0);
  327. if (ret < 0) {
  328. dev_err(dev, "%s() fail\n", __func__);
  329. return ret;
  330. }
  331. usbhs_pipe_sequence_data1(pipe); /* DATA1 */
  332. usbhsf_fifo_clear(pipe, fifo);
  333. usbhsf_fifo_unselect(pipe, fifo);
  334. usbhsf_rx_irq_ctrl(pipe, 1);
  335. usbhs_pipe_enable(pipe);
  336. return ret;
  337. }
  338. static int usbhs_dcp_dir_switch_done(struct usbhs_pkt *pkt, int *is_done)
  339. {
  340. struct usbhs_pipe *pipe = pkt->pipe;
  341. if (pkt->handler == &usbhs_dcp_status_stage_in_handler)
  342. usbhsf_tx_irq_ctrl(pipe, 0);
  343. else
  344. usbhsf_rx_irq_ctrl(pipe, 0);
  345. pkt->actual = pkt->length;
  346. *is_done = 1;
  347. return 0;
  348. }
  349. const struct usbhs_pkt_handle usbhs_dcp_status_stage_in_handler = {
  350. .prepare = usbhs_dcp_dir_switch_to_write,
  351. .try_run = usbhs_dcp_dir_switch_done,
  352. };
  353. const struct usbhs_pkt_handle usbhs_dcp_status_stage_out_handler = {
  354. .prepare = usbhs_dcp_dir_switch_to_read,
  355. .try_run = usbhs_dcp_dir_switch_done,
  356. };
  357. /*
  358. * DCP data stage (push)
  359. */
  360. static int usbhsf_dcp_data_stage_try_push(struct usbhs_pkt *pkt, int *is_done)
  361. {
  362. struct usbhs_pipe *pipe = pkt->pipe;
  363. usbhs_pipe_sequence_data1(pipe); /* DATA1 */
  364. /*
  365. * change handler to PIO push
  366. */
  367. pkt->handler = &usbhs_fifo_pio_push_handler;
  368. return pkt->handler->prepare(pkt, is_done);
  369. }
  370. const struct usbhs_pkt_handle usbhs_dcp_data_stage_out_handler = {
  371. .prepare = usbhsf_dcp_data_stage_try_push,
  372. };
  373. /*
  374. * DCP data stage (pop)
  375. */
  376. static int usbhsf_dcp_data_stage_prepare_pop(struct usbhs_pkt *pkt,
  377. int *is_done)
  378. {
  379. struct usbhs_pipe *pipe = pkt->pipe;
  380. struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
  381. struct usbhs_fifo *fifo = usbhsf_get_cfifo(priv);
  382. if (usbhs_pipe_is_busy(pipe))
  383. return 0;
  384. /*
  385. * prepare pop for DCP should
  386. * - change DCP direction,
  387. * - clear fifo
  388. * - DATA1
  389. */
  390. usbhs_pipe_disable(pipe);
  391. usbhs_pipe_sequence_data1(pipe); /* DATA1 */
  392. usbhsf_fifo_select(pipe, fifo, 0);
  393. usbhsf_fifo_clear(pipe, fifo);
  394. usbhsf_fifo_unselect(pipe, fifo);
  395. /*
  396. * change handler to PIO pop
  397. */
  398. pkt->handler = &usbhs_fifo_pio_pop_handler;
  399. return pkt->handler->prepare(pkt, is_done);
  400. }
  401. const struct usbhs_pkt_handle usbhs_dcp_data_stage_in_handler = {
  402. .prepare = usbhsf_dcp_data_stage_prepare_pop,
  403. };
  404. /*
  405. * PIO push handler
  406. */
  407. static int usbhsf_pio_try_push(struct usbhs_pkt *pkt, int *is_done)
  408. {
  409. struct usbhs_pipe *pipe = pkt->pipe;
  410. struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
  411. struct device *dev = usbhs_priv_to_dev(priv);
  412. struct usbhs_fifo *fifo = usbhsf_get_cfifo(priv); /* CFIFO */
  413. void __iomem *addr = priv->base + fifo->port;
  414. u8 *buf;
  415. int maxp = usbhs_pipe_get_maxpacket(pipe);
  416. int total_len;
  417. int i, ret, len;
  418. int is_short;
  419. usbhs_pipe_data_sequence(pipe, pkt->sequence);
  420. pkt->sequence = -1; /* -1 sequence will be ignored */
  421. usbhs_pipe_set_trans_count_if_bulk(pipe, pkt->length);
  422. ret = usbhsf_fifo_select(pipe, fifo, 1);
  423. if (ret < 0)
  424. return 0;
  425. ret = usbhs_pipe_is_accessible(pipe);
  426. if (ret < 0) {
  427. /* inaccessible pipe is not an error */
  428. ret = 0;
  429. goto usbhs_fifo_write_busy;
  430. }
  431. ret = usbhsf_fifo_barrier(priv, fifo);
  432. if (ret < 0)
  433. goto usbhs_fifo_write_busy;
  434. buf = pkt->buf + pkt->actual;
  435. len = pkt->length - pkt->actual;
  436. len = min(len, maxp);
  437. total_len = len;
  438. is_short = total_len < maxp;
  439. /*
  440. * FIXME
  441. *
  442. * 32-bit access only
  443. */
  444. if (len >= 4 && !((unsigned long)buf & 0x03)) {
  445. iowrite32_rep(addr, buf, len / 4);
  446. len %= 4;
  447. buf += total_len - len;
  448. }
  449. /* the rest operation */
  450. if (usbhs_get_dparam(priv, cfifo_byte_addr)) {
  451. for (i = 0; i < len; i++)
  452. iowrite8(buf[i], addr + (i & 0x03));
  453. } else {
  454. for (i = 0; i < len; i++)
  455. iowrite8(buf[i], addr + (0x03 - (i & 0x03)));
  456. }
  457. /*
  458. * variable update
  459. */
  460. pkt->actual += total_len;
  461. if (pkt->actual < pkt->length)
  462. *is_done = 0; /* there are remainder data */
  463. else if (is_short)
  464. *is_done = 1; /* short packet */
  465. else
  466. *is_done = !pkt->zero; /* send zero packet ? */
  467. /*
  468. * pipe/irq handling
  469. */
  470. if (is_short)
  471. usbhsf_send_terminator(pipe, fifo);
  472. usbhsf_tx_irq_ctrl(pipe, !*is_done);
  473. usbhs_pipe_running(pipe, !*is_done);
  474. usbhs_pipe_enable(pipe);
  475. dev_dbg(dev, " send %d (%d/ %d/ %d/ %d)\n",
  476. usbhs_pipe_number(pipe),
  477. pkt->length, pkt->actual, *is_done, pkt->zero);
  478. usbhsf_fifo_unselect(pipe, fifo);
  479. return 0;
  480. usbhs_fifo_write_busy:
  481. usbhsf_fifo_unselect(pipe, fifo);
  482. /*
  483. * pipe is busy.
  484. * retry in interrupt
  485. */
  486. usbhsf_tx_irq_ctrl(pipe, 1);
  487. usbhs_pipe_running(pipe, 1);
  488. return ret;
  489. }
  490. static int usbhsf_pio_prepare_push(struct usbhs_pkt *pkt, int *is_done)
  491. {
  492. if (usbhs_pipe_is_running(pkt->pipe))
  493. return 0;
  494. return usbhsf_pio_try_push(pkt, is_done);
  495. }
  496. const struct usbhs_pkt_handle usbhs_fifo_pio_push_handler = {
  497. .prepare = usbhsf_pio_prepare_push,
  498. .try_run = usbhsf_pio_try_push,
  499. };
  500. /*
  501. * PIO pop handler
  502. */
  503. static int usbhsf_prepare_pop(struct usbhs_pkt *pkt, int *is_done)
  504. {
  505. struct usbhs_pipe *pipe = pkt->pipe;
  506. struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
  507. struct usbhs_fifo *fifo = usbhsf_get_cfifo(priv);
  508. if (usbhs_pipe_is_busy(pipe))
  509. return 0;
  510. if (usbhs_pipe_is_running(pipe))
  511. return 0;
  512. /*
  513. * pipe enable to prepare packet receive
  514. */
  515. usbhs_pipe_data_sequence(pipe, pkt->sequence);
  516. pkt->sequence = -1; /* -1 sequence will be ignored */
  517. if (usbhs_pipe_is_dcp(pipe))
  518. usbhsf_fifo_clear(pipe, fifo);
  519. usbhs_pipe_set_trans_count_if_bulk(pipe, pkt->length);
  520. usbhs_pipe_enable(pipe);
  521. usbhs_pipe_running(pipe, 1);
  522. usbhsf_rx_irq_ctrl(pipe, 1);
  523. return 0;
  524. }
  525. static int usbhsf_pio_try_pop(struct usbhs_pkt *pkt, int *is_done)
  526. {
  527. struct usbhs_pipe *pipe = pkt->pipe;
  528. struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
  529. struct device *dev = usbhs_priv_to_dev(priv);
  530. struct usbhs_fifo *fifo = usbhsf_get_cfifo(priv); /* CFIFO */
  531. void __iomem *addr = priv->base + fifo->port;
  532. u8 *buf;
  533. u32 data = 0;
  534. int maxp = usbhs_pipe_get_maxpacket(pipe);
  535. int rcv_len, len;
  536. int i, ret;
  537. int total_len = 0;
  538. ret = usbhsf_fifo_select(pipe, fifo, 0);
  539. if (ret < 0)
  540. return 0;
  541. ret = usbhsf_fifo_barrier(priv, fifo);
  542. if (ret < 0)
  543. goto usbhs_fifo_read_busy;
  544. rcv_len = usbhsf_fifo_rcv_len(priv, fifo);
  545. buf = pkt->buf + pkt->actual;
  546. len = pkt->length - pkt->actual;
  547. len = min(len, rcv_len);
  548. total_len = len;
  549. /*
  550. * update actual length first here to decide disable pipe.
  551. * if this pipe keeps BUF status and all data were popped,
  552. * then, next interrupt/token will be issued again
  553. */
  554. pkt->actual += total_len;
  555. if ((pkt->actual == pkt->length) || /* receive all data */
  556. (total_len < maxp)) { /* short packet */
  557. *is_done = 1;
  558. usbhsf_rx_irq_ctrl(pipe, 0);
  559. usbhs_pipe_running(pipe, 0);
  560. /*
  561. * If function mode, since this controller is possible to enter
  562. * Control Write status stage at this timing, this driver
  563. * should not disable the pipe. If such a case happens, this
  564. * controller is not able to complete the status stage.
  565. */
  566. if (!usbhs_mod_is_host(priv) && !usbhs_pipe_is_dcp(pipe))
  567. usbhs_pipe_disable(pipe); /* disable pipe first */
  568. }
  569. /*
  570. * Buffer clear if Zero-Length packet
  571. *
  572. * see
  573. * "Operation" - "FIFO Buffer Memory" - "FIFO Port Function"
  574. */
  575. if (0 == rcv_len) {
  576. pkt->zero = 1;
  577. usbhsf_fifo_clear(pipe, fifo);
  578. goto usbhs_fifo_read_end;
  579. }
  580. /*
  581. * FIXME
  582. *
  583. * 32-bit access only
  584. */
  585. if (len >= 4 && !((unsigned long)buf & 0x03)) {
  586. ioread32_rep(addr, buf, len / 4);
  587. len %= 4;
  588. buf += total_len - len;
  589. }
  590. /* the rest operation */
  591. for (i = 0; i < len; i++) {
  592. if (!(i & 0x03))
  593. data = ioread32(addr);
  594. buf[i] = (data >> ((i & 0x03) * 8)) & 0xff;
  595. }
  596. usbhs_fifo_read_end:
  597. dev_dbg(dev, " recv %d (%d/ %d/ %d/ %d)\n",
  598. usbhs_pipe_number(pipe),
  599. pkt->length, pkt->actual, *is_done, pkt->zero);
  600. usbhs_fifo_read_busy:
  601. usbhsf_fifo_unselect(pipe, fifo);
  602. return ret;
  603. }
  604. const struct usbhs_pkt_handle usbhs_fifo_pio_pop_handler = {
  605. .prepare = usbhsf_prepare_pop,
  606. .try_run = usbhsf_pio_try_pop,
  607. };
  608. /*
  609. * DCP ctrol statge handler
  610. */
  611. static int usbhsf_ctrl_stage_end(struct usbhs_pkt *pkt, int *is_done)
  612. {
  613. usbhs_dcp_control_transfer_done(pkt->pipe);
  614. *is_done = 1;
  615. return 0;
  616. }
  617. const struct usbhs_pkt_handle usbhs_ctrl_stage_end_handler = {
  618. .prepare = usbhsf_ctrl_stage_end,
  619. .try_run = usbhsf_ctrl_stage_end,
  620. };
  621. /*
  622. * DMA fifo functions
  623. */
  624. static struct dma_chan *usbhsf_dma_chan_get(struct usbhs_fifo *fifo,
  625. struct usbhs_pkt *pkt)
  626. {
  627. if (&usbhs_fifo_dma_push_handler == pkt->handler)
  628. return fifo->tx_chan;
  629. if (&usbhs_fifo_dma_pop_handler == pkt->handler)
  630. return fifo->rx_chan;
  631. return NULL;
  632. }
  633. static struct usbhs_fifo *usbhsf_get_dma_fifo(struct usbhs_priv *priv,
  634. struct usbhs_pkt *pkt)
  635. {
  636. struct usbhs_fifo *fifo;
  637. int i;
  638. usbhs_for_each_dfifo(priv, fifo, i) {
  639. if (usbhsf_dma_chan_get(fifo, pkt) &&
  640. !usbhsf_fifo_is_busy(fifo))
  641. return fifo;
  642. }
  643. return NULL;
  644. }
  645. #define usbhsf_dma_start(p, f) __usbhsf_dma_ctrl(p, f, DREQE)
  646. #define usbhsf_dma_stop(p, f) __usbhsf_dma_ctrl(p, f, 0)
  647. static void __usbhsf_dma_ctrl(struct usbhs_pipe *pipe,
  648. struct usbhs_fifo *fifo,
  649. u16 dreqe)
  650. {
  651. struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
  652. usbhs_bset(priv, fifo->sel, DREQE, dreqe);
  653. }
  654. static int __usbhsf_dma_map_ctrl(struct usbhs_pkt *pkt, int map)
  655. {
  656. struct usbhs_pipe *pipe = pkt->pipe;
  657. struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
  658. struct usbhs_pipe_info *info = usbhs_priv_to_pipeinfo(priv);
  659. struct usbhs_fifo *fifo = usbhs_pipe_to_fifo(pipe);
  660. struct dma_chan *chan = usbhsf_dma_chan_get(fifo, pkt);
  661. return info->dma_map_ctrl(chan->device->dev, pkt, map);
  662. }
  663. static void usbhsf_dma_complete(void *arg,
  664. const struct dmaengine_result *result);
  665. static void usbhsf_dma_xfer_preparing(struct usbhs_pkt *pkt)
  666. {
  667. struct usbhs_pipe *pipe = pkt->pipe;
  668. struct usbhs_fifo *fifo;
  669. struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
  670. struct dma_async_tx_descriptor *desc;
  671. struct dma_chan *chan;
  672. struct device *dev = usbhs_priv_to_dev(priv);
  673. enum dma_transfer_direction dir;
  674. dma_cookie_t cookie;
  675. fifo = usbhs_pipe_to_fifo(pipe);
  676. if (!fifo)
  677. return;
  678. chan = usbhsf_dma_chan_get(fifo, pkt);
  679. dir = usbhs_pipe_is_dir_in(pipe) ? DMA_DEV_TO_MEM : DMA_MEM_TO_DEV;
  680. desc = dmaengine_prep_slave_single(chan, pkt->dma + pkt->actual,
  681. pkt->trans, dir,
  682. DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
  683. if (!desc)
  684. return;
  685. desc->callback_result = usbhsf_dma_complete;
  686. desc->callback_param = pkt;
  687. cookie = dmaengine_submit(desc);
  688. if (cookie < 0) {
  689. dev_err(dev, "Failed to submit dma descriptor\n");
  690. return;
  691. }
  692. dev_dbg(dev, " %s %d (%d/ %d)\n",
  693. fifo->name, usbhs_pipe_number(pipe), pkt->length, pkt->zero);
  694. usbhs_pipe_running(pipe, 1);
  695. usbhs_pipe_set_trans_count_if_bulk(pipe, pkt->trans);
  696. dma_async_issue_pending(chan);
  697. usbhsf_dma_start(pipe, fifo);
  698. usbhs_pipe_enable(pipe);
  699. }
  700. static void xfer_work(struct work_struct *work)
  701. {
  702. struct usbhs_pkt *pkt = container_of(work, struct usbhs_pkt, work);
  703. struct usbhs_pipe *pipe = pkt->pipe;
  704. struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
  705. unsigned long flags;
  706. usbhs_lock(priv, flags);
  707. usbhsf_dma_xfer_preparing(pkt);
  708. usbhs_unlock(priv, flags);
  709. }
  710. /*
  711. * DMA push handler
  712. */
  713. static int usbhsf_dma_prepare_push(struct usbhs_pkt *pkt, int *is_done)
  714. {
  715. struct usbhs_pipe *pipe = pkt->pipe;
  716. struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
  717. struct usbhs_fifo *fifo;
  718. int len = pkt->length - pkt->actual;
  719. int ret;
  720. uintptr_t align_mask;
  721. if (usbhs_pipe_is_busy(pipe))
  722. return 0;
  723. /* use PIO if packet is less than pio_dma_border or pipe is DCP */
  724. if ((len < usbhs_get_dparam(priv, pio_dma_border)) ||
  725. usbhs_pipe_type_is(pipe, USB_ENDPOINT_XFER_ISOC))
  726. goto usbhsf_pio_prepare_push;
  727. /* check data length if this driver don't use USB-DMAC */
  728. if (!usbhs_get_dparam(priv, has_usb_dmac) && len & 0x7)
  729. goto usbhsf_pio_prepare_push;
  730. /* check buffer alignment */
  731. align_mask = usbhs_get_dparam(priv, has_usb_dmac) ?
  732. USBHS_USB_DMAC_XFER_SIZE - 1 : 0x7;
  733. if ((uintptr_t)(pkt->buf + pkt->actual) & align_mask)
  734. goto usbhsf_pio_prepare_push;
  735. /* return at this time if the pipe is running */
  736. if (usbhs_pipe_is_running(pipe))
  737. return 0;
  738. /* get enable DMA fifo */
  739. fifo = usbhsf_get_dma_fifo(priv, pkt);
  740. if (!fifo)
  741. goto usbhsf_pio_prepare_push;
  742. ret = usbhsf_fifo_select(pipe, fifo, 0);
  743. if (ret < 0)
  744. goto usbhsf_pio_prepare_push;
  745. if (usbhsf_dma_map(pkt) < 0)
  746. goto usbhsf_pio_prepare_push_unselect;
  747. pkt->trans = len;
  748. usbhsf_tx_irq_ctrl(pipe, 0);
  749. /* FIXME: Workaound for usb dmac that driver can be used in atomic */
  750. if (usbhs_get_dparam(priv, has_usb_dmac)) {
  751. usbhsf_dma_xfer_preparing(pkt);
  752. } else {
  753. INIT_WORK(&pkt->work, xfer_work);
  754. schedule_work(&pkt->work);
  755. }
  756. return 0;
  757. usbhsf_pio_prepare_push_unselect:
  758. usbhsf_fifo_unselect(pipe, fifo);
  759. usbhsf_pio_prepare_push:
  760. /*
  761. * change handler to PIO
  762. */
  763. pkt->handler = &usbhs_fifo_pio_push_handler;
  764. return pkt->handler->prepare(pkt, is_done);
  765. }
  766. static int usbhsf_dma_push_done(struct usbhs_pkt *pkt, int *is_done)
  767. {
  768. struct usbhs_pipe *pipe = pkt->pipe;
  769. int is_short = pkt->trans % usbhs_pipe_get_maxpacket(pipe);
  770. pkt->actual += pkt->trans;
  771. if (pkt->actual < pkt->length)
  772. *is_done = 0; /* there are remainder data */
  773. else if (is_short)
  774. *is_done = 1; /* short packet */
  775. else
  776. *is_done = !pkt->zero; /* send zero packet? */
  777. usbhs_pipe_running(pipe, !*is_done);
  778. usbhsf_dma_stop(pipe, pipe->fifo);
  779. usbhsf_dma_unmap(pkt);
  780. usbhsf_fifo_unselect(pipe, pipe->fifo);
  781. if (!*is_done) {
  782. /* change handler to PIO */
  783. pkt->handler = &usbhs_fifo_pio_push_handler;
  784. return pkt->handler->try_run(pkt, is_done);
  785. }
  786. return 0;
  787. }
  788. const struct usbhs_pkt_handle usbhs_fifo_dma_push_handler = {
  789. .prepare = usbhsf_dma_prepare_push,
  790. .dma_done = usbhsf_dma_push_done,
  791. };
  792. /*
  793. * DMA pop handler
  794. */
  795. static int usbhsf_dma_prepare_pop_with_rx_irq(struct usbhs_pkt *pkt,
  796. int *is_done)
  797. {
  798. return usbhsf_prepare_pop(pkt, is_done);
  799. }
  800. static int usbhsf_dma_prepare_pop_with_usb_dmac(struct usbhs_pkt *pkt,
  801. int *is_done)
  802. {
  803. struct usbhs_pipe *pipe = pkt->pipe;
  804. struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
  805. struct usbhs_fifo *fifo;
  806. int ret;
  807. if (usbhs_pipe_is_busy(pipe))
  808. return 0;
  809. /* use PIO if packet is less than pio_dma_border or pipe is DCP */
  810. if ((pkt->length < usbhs_get_dparam(priv, pio_dma_border)) ||
  811. usbhs_pipe_type_is(pipe, USB_ENDPOINT_XFER_ISOC))
  812. goto usbhsf_pio_prepare_pop;
  813. fifo = usbhsf_get_dma_fifo(priv, pkt);
  814. if (!fifo)
  815. goto usbhsf_pio_prepare_pop;
  816. if ((uintptr_t)pkt->buf & (USBHS_USB_DMAC_XFER_SIZE - 1))
  817. goto usbhsf_pio_prepare_pop;
  818. /* return at this time if the pipe is running */
  819. if (usbhs_pipe_is_running(pipe))
  820. return 0;
  821. usbhs_pipe_config_change_bfre(pipe, 1);
  822. ret = usbhsf_fifo_select(pipe, fifo, 0);
  823. if (ret < 0)
  824. goto usbhsf_pio_prepare_pop;
  825. if (usbhsf_dma_map(pkt) < 0)
  826. goto usbhsf_pio_prepare_pop_unselect;
  827. /* DMA */
  828. /*
  829. * usbhs_fifo_dma_pop_handler :: prepare
  830. * enabled irq to come here.
  831. * but it is no longer needed for DMA. disable it.
  832. */
  833. usbhsf_rx_irq_ctrl(pipe, 0);
  834. pkt->trans = pkt->length;
  835. usbhsf_dma_xfer_preparing(pkt);
  836. return 0;
  837. usbhsf_pio_prepare_pop_unselect:
  838. usbhsf_fifo_unselect(pipe, fifo);
  839. usbhsf_pio_prepare_pop:
  840. /*
  841. * change handler to PIO
  842. */
  843. pkt->handler = &usbhs_fifo_pio_pop_handler;
  844. usbhs_pipe_config_change_bfre(pipe, 0);
  845. return pkt->handler->prepare(pkt, is_done);
  846. }
  847. static int usbhsf_dma_prepare_pop(struct usbhs_pkt *pkt, int *is_done)
  848. {
  849. struct usbhs_priv *priv = usbhs_pipe_to_priv(pkt->pipe);
  850. if (usbhs_get_dparam(priv, has_usb_dmac))
  851. return usbhsf_dma_prepare_pop_with_usb_dmac(pkt, is_done);
  852. else
  853. return usbhsf_dma_prepare_pop_with_rx_irq(pkt, is_done);
  854. }
  855. static int usbhsf_dma_try_pop_with_rx_irq(struct usbhs_pkt *pkt, int *is_done)
  856. {
  857. struct usbhs_pipe *pipe = pkt->pipe;
  858. struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
  859. struct usbhs_fifo *fifo;
  860. int len, ret;
  861. if (usbhs_pipe_is_busy(pipe))
  862. return 0;
  863. if (usbhs_pipe_is_dcp(pipe))
  864. goto usbhsf_pio_prepare_pop;
  865. /* get enable DMA fifo */
  866. fifo = usbhsf_get_dma_fifo(priv, pkt);
  867. if (!fifo)
  868. goto usbhsf_pio_prepare_pop;
  869. if ((uintptr_t)(pkt->buf + pkt->actual) & 0x7) /* 8byte alignment */
  870. goto usbhsf_pio_prepare_pop;
  871. ret = usbhsf_fifo_select(pipe, fifo, 0);
  872. if (ret < 0)
  873. goto usbhsf_pio_prepare_pop;
  874. /* use PIO if packet is less than pio_dma_border */
  875. len = usbhsf_fifo_rcv_len(priv, fifo);
  876. len = min(pkt->length - pkt->actual, len);
  877. if (len & 0x7) /* 8byte alignment */
  878. goto usbhsf_pio_prepare_pop_unselect;
  879. if (len < usbhs_get_dparam(priv, pio_dma_border))
  880. goto usbhsf_pio_prepare_pop_unselect;
  881. ret = usbhsf_fifo_barrier(priv, fifo);
  882. if (ret < 0)
  883. goto usbhsf_pio_prepare_pop_unselect;
  884. if (usbhsf_dma_map(pkt) < 0)
  885. goto usbhsf_pio_prepare_pop_unselect;
  886. /* DMA */
  887. /*
  888. * usbhs_fifo_dma_pop_handler :: prepare
  889. * enabled irq to come here.
  890. * but it is no longer needed for DMA. disable it.
  891. */
  892. usbhsf_rx_irq_ctrl(pipe, 0);
  893. pkt->trans = len;
  894. INIT_WORK(&pkt->work, xfer_work);
  895. schedule_work(&pkt->work);
  896. return 0;
  897. usbhsf_pio_prepare_pop_unselect:
  898. usbhsf_fifo_unselect(pipe, fifo);
  899. usbhsf_pio_prepare_pop:
  900. /*
  901. * change handler to PIO
  902. */
  903. pkt->handler = &usbhs_fifo_pio_pop_handler;
  904. return pkt->handler->try_run(pkt, is_done);
  905. }
  906. static int usbhsf_dma_try_pop(struct usbhs_pkt *pkt, int *is_done)
  907. {
  908. struct usbhs_priv *priv = usbhs_pipe_to_priv(pkt->pipe);
  909. BUG_ON(usbhs_get_dparam(priv, has_usb_dmac));
  910. return usbhsf_dma_try_pop_with_rx_irq(pkt, is_done);
  911. }
  912. static int usbhsf_dma_pop_done_with_rx_irq(struct usbhs_pkt *pkt, int *is_done)
  913. {
  914. struct usbhs_pipe *pipe = pkt->pipe;
  915. int maxp = usbhs_pipe_get_maxpacket(pipe);
  916. usbhsf_dma_stop(pipe, pipe->fifo);
  917. usbhsf_dma_unmap(pkt);
  918. usbhsf_fifo_unselect(pipe, pipe->fifo);
  919. pkt->actual += pkt->trans;
  920. if ((pkt->actual == pkt->length) || /* receive all data */
  921. (pkt->trans < maxp)) { /* short packet */
  922. *is_done = 1;
  923. usbhs_pipe_running(pipe, 0);
  924. } else {
  925. /* re-enable */
  926. usbhs_pipe_running(pipe, 0);
  927. usbhsf_prepare_pop(pkt, is_done);
  928. }
  929. return 0;
  930. }
  931. static size_t usbhs_dma_calc_received_size(struct usbhs_pkt *pkt,
  932. struct dma_chan *chan, int dtln)
  933. {
  934. struct usbhs_pipe *pipe = pkt->pipe;
  935. size_t received_size;
  936. int maxp = usbhs_pipe_get_maxpacket(pipe);
  937. received_size = pkt->length - pkt->dma_result->residue;
  938. if (dtln) {
  939. received_size -= USBHS_USB_DMAC_XFER_SIZE;
  940. received_size &= ~(maxp - 1);
  941. received_size += dtln;
  942. }
  943. return received_size;
  944. }
  945. static int usbhsf_dma_pop_done_with_usb_dmac(struct usbhs_pkt *pkt,
  946. int *is_done)
  947. {
  948. struct usbhs_pipe *pipe = pkt->pipe;
  949. struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
  950. struct usbhs_fifo *fifo = usbhs_pipe_to_fifo(pipe);
  951. struct dma_chan *chan = usbhsf_dma_chan_get(fifo, pkt);
  952. int rcv_len;
  953. /*
  954. * Since the driver disables rx_irq in DMA mode, the interrupt handler
  955. * cannot the BRDYSTS. So, the function clears it here because the
  956. * driver may use PIO mode next time.
  957. */
  958. usbhs_xxxsts_clear(priv, BRDYSTS, usbhs_pipe_number(pipe));
  959. rcv_len = usbhsf_fifo_rcv_len(priv, fifo);
  960. usbhsf_fifo_clear(pipe, fifo);
  961. pkt->actual = usbhs_dma_calc_received_size(pkt, chan, rcv_len);
  962. usbhs_pipe_running(pipe, 0);
  963. usbhsf_dma_stop(pipe, fifo);
  964. usbhsf_dma_unmap(pkt);
  965. usbhsf_fifo_unselect(pipe, pipe->fifo);
  966. /* The driver can assume the rx transaction is always "done" */
  967. *is_done = 1;
  968. return 0;
  969. }
  970. static int usbhsf_dma_pop_done(struct usbhs_pkt *pkt, int *is_done)
  971. {
  972. struct usbhs_priv *priv = usbhs_pipe_to_priv(pkt->pipe);
  973. if (usbhs_get_dparam(priv, has_usb_dmac))
  974. return usbhsf_dma_pop_done_with_usb_dmac(pkt, is_done);
  975. else
  976. return usbhsf_dma_pop_done_with_rx_irq(pkt, is_done);
  977. }
  978. const struct usbhs_pkt_handle usbhs_fifo_dma_pop_handler = {
  979. .prepare = usbhsf_dma_prepare_pop,
  980. .try_run = usbhsf_dma_try_pop,
  981. .dma_done = usbhsf_dma_pop_done
  982. };
  983. /*
  984. * DMA setting
  985. */
  986. static bool usbhsf_dma_filter(struct dma_chan *chan, void *param)
  987. {
  988. struct sh_dmae_slave *slave = param;
  989. /*
  990. * FIXME
  991. *
  992. * usbhs doesn't recognize id = 0 as valid DMA
  993. */
  994. if (0 == slave->shdma_slave.slave_id)
  995. return false;
  996. chan->private = slave;
  997. return true;
  998. }
  999. static void usbhsf_dma_quit(struct usbhs_priv *priv, struct usbhs_fifo *fifo)
  1000. {
  1001. if (fifo->tx_chan)
  1002. dma_release_channel(fifo->tx_chan);
  1003. if (fifo->rx_chan)
  1004. dma_release_channel(fifo->rx_chan);
  1005. fifo->tx_chan = NULL;
  1006. fifo->rx_chan = NULL;
  1007. }
  1008. static void usbhsf_dma_init_pdev(struct usbhs_fifo *fifo)
  1009. {
  1010. dma_cap_mask_t mask;
  1011. dma_cap_zero(mask);
  1012. dma_cap_set(DMA_SLAVE, mask);
  1013. fifo->tx_chan = dma_request_channel(mask, usbhsf_dma_filter,
  1014. &fifo->tx_slave);
  1015. dma_cap_zero(mask);
  1016. dma_cap_set(DMA_SLAVE, mask);
  1017. fifo->rx_chan = dma_request_channel(mask, usbhsf_dma_filter,
  1018. &fifo->rx_slave);
  1019. }
  1020. static void usbhsf_dma_init_dt(struct device *dev, struct usbhs_fifo *fifo,
  1021. int channel)
  1022. {
  1023. char name[16];
  1024. /*
  1025. * To avoid complex handing for DnFIFOs, the driver uses each
  1026. * DnFIFO as TX or RX direction (not bi-direction).
  1027. * So, the driver uses odd channels for TX, even channels for RX.
  1028. */
  1029. snprintf(name, sizeof(name), "ch%d", channel);
  1030. if (channel & 1) {
  1031. fifo->tx_chan = dma_request_chan(dev, name);
  1032. if (IS_ERR(fifo->tx_chan))
  1033. fifo->tx_chan = NULL;
  1034. } else {
  1035. fifo->rx_chan = dma_request_chan(dev, name);
  1036. if (IS_ERR(fifo->rx_chan))
  1037. fifo->rx_chan = NULL;
  1038. }
  1039. }
  1040. static void usbhsf_dma_init(struct usbhs_priv *priv, struct usbhs_fifo *fifo,
  1041. int channel)
  1042. {
  1043. struct device *dev = usbhs_priv_to_dev(priv);
  1044. if (dev_of_node(dev))
  1045. usbhsf_dma_init_dt(dev, fifo, channel);
  1046. else
  1047. usbhsf_dma_init_pdev(fifo);
  1048. if (fifo->tx_chan || fifo->rx_chan)
  1049. dev_dbg(dev, "enable DMAEngine (%s%s%s)\n",
  1050. fifo->name,
  1051. fifo->tx_chan ? "[TX]" : " ",
  1052. fifo->rx_chan ? "[RX]" : " ");
  1053. }
  1054. /*
  1055. * irq functions
  1056. */
  1057. static int usbhsf_irq_empty(struct usbhs_priv *priv,
  1058. struct usbhs_irq_state *irq_state)
  1059. {
  1060. struct usbhs_pipe *pipe;
  1061. struct device *dev = usbhs_priv_to_dev(priv);
  1062. int i, ret;
  1063. if (!irq_state->bempsts) {
  1064. dev_err(dev, "debug %s !!\n", __func__);
  1065. return -EIO;
  1066. }
  1067. dev_dbg(dev, "irq empty [0x%04x]\n", irq_state->bempsts);
  1068. /*
  1069. * search interrupted "pipe"
  1070. * not "uep".
  1071. */
  1072. usbhs_for_each_pipe_with_dcp(pipe, priv, i) {
  1073. if (!(irq_state->bempsts & (1 << i)))
  1074. continue;
  1075. ret = usbhsf_pkt_handler(pipe, USBHSF_PKT_TRY_RUN);
  1076. if (ret < 0)
  1077. dev_err(dev, "irq_empty run_error %d : %d\n", i, ret);
  1078. }
  1079. return 0;
  1080. }
  1081. static int usbhsf_irq_ready(struct usbhs_priv *priv,
  1082. struct usbhs_irq_state *irq_state)
  1083. {
  1084. struct usbhs_pipe *pipe;
  1085. struct device *dev = usbhs_priv_to_dev(priv);
  1086. int i, ret;
  1087. if (!irq_state->brdysts) {
  1088. dev_err(dev, "debug %s !!\n", __func__);
  1089. return -EIO;
  1090. }
  1091. dev_dbg(dev, "irq ready [0x%04x]\n", irq_state->brdysts);
  1092. /*
  1093. * search interrupted "pipe"
  1094. * not "uep".
  1095. */
  1096. usbhs_for_each_pipe_with_dcp(pipe, priv, i) {
  1097. if (!(irq_state->brdysts & (1 << i)))
  1098. continue;
  1099. ret = usbhsf_pkt_handler(pipe, USBHSF_PKT_TRY_RUN);
  1100. if (ret < 0)
  1101. dev_err(dev, "irq_ready run_error %d : %d\n", i, ret);
  1102. }
  1103. return 0;
  1104. }
  1105. static void usbhsf_dma_complete(void *arg,
  1106. const struct dmaengine_result *result)
  1107. {
  1108. struct usbhs_pkt *pkt = arg;
  1109. struct usbhs_pipe *pipe = pkt->pipe;
  1110. struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
  1111. struct device *dev = usbhs_priv_to_dev(priv);
  1112. int ret;
  1113. pkt->dma_result = result;
  1114. ret = usbhsf_pkt_handler(pipe, USBHSF_PKT_DMA_DONE);
  1115. if (ret < 0)
  1116. dev_err(dev, "dma_complete run_error %d : %d\n",
  1117. usbhs_pipe_number(pipe), ret);
  1118. }
  1119. void usbhs_fifo_clear_dcp(struct usbhs_pipe *pipe)
  1120. {
  1121. struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
  1122. struct usbhs_fifo *fifo = usbhsf_get_cfifo(priv); /* CFIFO */
  1123. /* clear DCP FIFO of transmission */
  1124. if (usbhsf_fifo_select(pipe, fifo, 1) < 0)
  1125. return;
  1126. usbhsf_fifo_clear(pipe, fifo);
  1127. usbhsf_fifo_unselect(pipe, fifo);
  1128. /* clear DCP FIFO of reception */
  1129. if (usbhsf_fifo_select(pipe, fifo, 0) < 0)
  1130. return;
  1131. usbhsf_fifo_clear(pipe, fifo);
  1132. usbhsf_fifo_unselect(pipe, fifo);
  1133. }
  1134. /*
  1135. * fifo init
  1136. */
  1137. void usbhs_fifo_init(struct usbhs_priv *priv)
  1138. {
  1139. struct usbhs_mod *mod = usbhs_mod_get_current(priv);
  1140. struct usbhs_fifo *cfifo = usbhsf_get_cfifo(priv);
  1141. struct usbhs_fifo *dfifo;
  1142. int i;
  1143. mod->irq_empty = usbhsf_irq_empty;
  1144. mod->irq_ready = usbhsf_irq_ready;
  1145. mod->irq_bempsts = 0;
  1146. mod->irq_brdysts = 0;
  1147. cfifo->pipe = NULL;
  1148. usbhs_for_each_dfifo(priv, dfifo, i)
  1149. dfifo->pipe = NULL;
  1150. }
  1151. void usbhs_fifo_quit(struct usbhs_priv *priv)
  1152. {
  1153. struct usbhs_mod *mod = usbhs_mod_get_current(priv);
  1154. mod->irq_empty = NULL;
  1155. mod->irq_ready = NULL;
  1156. mod->irq_bempsts = 0;
  1157. mod->irq_brdysts = 0;
  1158. }
  1159. #define __USBHS_DFIFO_INIT(priv, fifo, channel, fifo_port) \
  1160. do { \
  1161. fifo = usbhsf_get_dnfifo(priv, channel); \
  1162. fifo->name = "D"#channel"FIFO"; \
  1163. fifo->port = fifo_port; \
  1164. fifo->sel = D##channel##FIFOSEL; \
  1165. fifo->ctr = D##channel##FIFOCTR; \
  1166. fifo->tx_slave.shdma_slave.slave_id = \
  1167. usbhs_get_dparam(priv, d##channel##_tx_id); \
  1168. fifo->rx_slave.shdma_slave.slave_id = \
  1169. usbhs_get_dparam(priv, d##channel##_rx_id); \
  1170. usbhsf_dma_init(priv, fifo, channel); \
  1171. } while (0)
  1172. #define USBHS_DFIFO_INIT(priv, fifo, channel) \
  1173. __USBHS_DFIFO_INIT(priv, fifo, channel, D##channel##FIFO)
  1174. #define USBHS_DFIFO_INIT_NO_PORT(priv, fifo, channel) \
  1175. __USBHS_DFIFO_INIT(priv, fifo, channel, 0)
  1176. int usbhs_fifo_probe(struct usbhs_priv *priv)
  1177. {
  1178. struct usbhs_fifo *fifo;
  1179. /* CFIFO */
  1180. fifo = usbhsf_get_cfifo(priv);
  1181. fifo->name = "CFIFO";
  1182. fifo->port = CFIFO;
  1183. fifo->sel = CFIFOSEL;
  1184. fifo->ctr = CFIFOCTR;
  1185. /* DFIFO */
  1186. USBHS_DFIFO_INIT(priv, fifo, 0);
  1187. USBHS_DFIFO_INIT(priv, fifo, 1);
  1188. USBHS_DFIFO_INIT_NO_PORT(priv, fifo, 2);
  1189. USBHS_DFIFO_INIT_NO_PORT(priv, fifo, 3);
  1190. return 0;
  1191. }
  1192. void usbhs_fifo_remove(struct usbhs_priv *priv)
  1193. {
  1194. struct usbhs_fifo *fifo;
  1195. int i;
  1196. usbhs_for_each_dfifo(priv, fifo, i)
  1197. usbhsf_dma_quit(priv, fifo);
  1198. }