davinci_cpdma.c 35 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Texas Instruments CPDMA Driver
  4. *
  5. * Copyright (C) 2010 Texas Instruments
  6. *
  7. */
  8. #include <linux/kernel.h>
  9. #include <linux/spinlock.h>
  10. #include <linux/device.h>
  11. #include <linux/module.h>
  12. #include <linux/slab.h>
  13. #include <linux/err.h>
  14. #include <linux/dma-mapping.h>
  15. #include <linux/io.h>
  16. #include <linux/delay.h>
  17. #include <linux/genalloc.h>
  18. #include "davinci_cpdma.h"
  19. /* DMA Registers */
  20. #define CPDMA_TXIDVER 0x00
  21. #define CPDMA_TXCONTROL 0x04
  22. #define CPDMA_TXTEARDOWN 0x08
  23. #define CPDMA_RXIDVER 0x10
  24. #define CPDMA_RXCONTROL 0x14
  25. #define CPDMA_SOFTRESET 0x1c
  26. #define CPDMA_RXTEARDOWN 0x18
  27. #define CPDMA_TX_PRI0_RATE 0x30
  28. #define CPDMA_TXINTSTATRAW 0x80
  29. #define CPDMA_TXINTSTATMASKED 0x84
  30. #define CPDMA_TXINTMASKSET 0x88
  31. #define CPDMA_TXINTMASKCLEAR 0x8c
  32. #define CPDMA_MACINVECTOR 0x90
  33. #define CPDMA_MACEOIVECTOR 0x94
  34. #define CPDMA_RXINTSTATRAW 0xa0
  35. #define CPDMA_RXINTSTATMASKED 0xa4
  36. #define CPDMA_RXINTMASKSET 0xa8
  37. #define CPDMA_RXINTMASKCLEAR 0xac
  38. #define CPDMA_DMAINTSTATRAW 0xb0
  39. #define CPDMA_DMAINTSTATMASKED 0xb4
  40. #define CPDMA_DMAINTMASKSET 0xb8
  41. #define CPDMA_DMAINTMASKCLEAR 0xbc
  42. #define CPDMA_DMAINT_HOSTERR BIT(1)
  43. /* the following exist only if has_ext_regs is set */
  44. #define CPDMA_DMACONTROL 0x20
  45. #define CPDMA_DMASTATUS 0x24
  46. #define CPDMA_RXBUFFOFS 0x28
  47. #define CPDMA_EM_CONTROL 0x2c
  48. /* Descriptor mode bits */
  49. #define CPDMA_DESC_SOP BIT(31)
  50. #define CPDMA_DESC_EOP BIT(30)
  51. #define CPDMA_DESC_OWNER BIT(29)
  52. #define CPDMA_DESC_EOQ BIT(28)
  53. #define CPDMA_DESC_TD_COMPLETE BIT(27)
  54. #define CPDMA_DESC_PASS_CRC BIT(26)
  55. #define CPDMA_DESC_TO_PORT_EN BIT(20)
  56. #define CPDMA_TO_PORT_SHIFT 16
  57. #define CPDMA_DESC_PORT_MASK (BIT(18) | BIT(17) | BIT(16))
  58. #define CPDMA_DESC_CRC_LEN 4
  59. #define CPDMA_TEARDOWN_VALUE 0xfffffffc
  60. #define CPDMA_MAX_RLIM_CNT 16384
  61. struct cpdma_desc {
  62. /* hardware fields */
  63. u32 hw_next;
  64. u32 hw_buffer;
  65. u32 hw_len;
  66. u32 hw_mode;
  67. /* software fields */
  68. void *sw_token;
  69. u32 sw_buffer;
  70. u32 sw_len;
  71. };
  72. struct cpdma_desc_pool {
  73. phys_addr_t phys;
  74. dma_addr_t hw_addr;
  75. void __iomem *iomap; /* ioremap map */
  76. void *cpumap; /* dma_alloc map */
  77. int desc_size, mem_size;
  78. int num_desc;
  79. struct device *dev;
  80. struct gen_pool *gen_pool;
  81. };
  82. enum cpdma_state {
  83. CPDMA_STATE_IDLE,
  84. CPDMA_STATE_ACTIVE,
  85. CPDMA_STATE_TEARDOWN,
  86. };
  87. struct cpdma_ctlr {
  88. enum cpdma_state state;
  89. struct cpdma_params params;
  90. struct device *dev;
  91. struct cpdma_desc_pool *pool;
  92. spinlock_t lock;
  93. struct cpdma_chan *channels[2 * CPDMA_MAX_CHANNELS];
  94. int chan_num;
  95. int num_rx_desc; /* RX descriptors number */
  96. int num_tx_desc; /* TX descriptors number */
  97. };
  98. struct cpdma_chan {
  99. struct cpdma_desc __iomem *head, *tail;
  100. void __iomem *hdp, *cp, *rxfree;
  101. enum cpdma_state state;
  102. struct cpdma_ctlr *ctlr;
  103. int chan_num;
  104. spinlock_t lock;
  105. int count;
  106. u32 desc_num;
  107. u32 mask;
  108. cpdma_handler_fn handler;
  109. enum dma_data_direction dir;
  110. struct cpdma_chan_stats stats;
  111. /* offsets into dmaregs */
  112. int int_set, int_clear, td;
  113. int weight;
  114. u32 rate_factor;
  115. u32 rate;
  116. };
  117. struct cpdma_control_info {
  118. u32 reg;
  119. u32 shift, mask;
  120. int access;
  121. #define ACCESS_RO BIT(0)
  122. #define ACCESS_WO BIT(1)
  123. #define ACCESS_RW (ACCESS_RO | ACCESS_WO)
  124. };
  125. struct submit_info {
  126. struct cpdma_chan *chan;
  127. int directed;
  128. void *token;
  129. void *data_virt;
  130. dma_addr_t data_dma;
  131. int len;
  132. };
  133. static struct cpdma_control_info controls[] = {
  134. [CPDMA_TX_RLIM] = {CPDMA_DMACONTROL, 8, 0xffff, ACCESS_RW},
  135. [CPDMA_CMD_IDLE] = {CPDMA_DMACONTROL, 3, 1, ACCESS_WO},
  136. [CPDMA_COPY_ERROR_FRAMES] = {CPDMA_DMACONTROL, 4, 1, ACCESS_RW},
  137. [CPDMA_RX_OFF_LEN_UPDATE] = {CPDMA_DMACONTROL, 2, 1, ACCESS_RW},
  138. [CPDMA_RX_OWNERSHIP_FLIP] = {CPDMA_DMACONTROL, 1, 1, ACCESS_RW},
  139. [CPDMA_TX_PRIO_FIXED] = {CPDMA_DMACONTROL, 0, 1, ACCESS_RW},
  140. [CPDMA_STAT_IDLE] = {CPDMA_DMASTATUS, 31, 1, ACCESS_RO},
  141. [CPDMA_STAT_TX_ERR_CODE] = {CPDMA_DMASTATUS, 20, 0xf, ACCESS_RW},
  142. [CPDMA_STAT_TX_ERR_CHAN] = {CPDMA_DMASTATUS, 16, 0x7, ACCESS_RW},
  143. [CPDMA_STAT_RX_ERR_CODE] = {CPDMA_DMASTATUS, 12, 0xf, ACCESS_RW},
  144. [CPDMA_STAT_RX_ERR_CHAN] = {CPDMA_DMASTATUS, 8, 0x7, ACCESS_RW},
  145. [CPDMA_RX_BUFFER_OFFSET] = {CPDMA_RXBUFFOFS, 0, 0xffff, ACCESS_RW},
  146. };
  147. #define tx_chan_num(chan) (chan)
  148. #define rx_chan_num(chan) ((chan) + CPDMA_MAX_CHANNELS)
  149. #define is_rx_chan(chan) ((chan)->chan_num >= CPDMA_MAX_CHANNELS)
  150. #define is_tx_chan(chan) (!is_rx_chan(chan))
  151. #define __chan_linear(chan_num) ((chan_num) & (CPDMA_MAX_CHANNELS - 1))
  152. #define chan_linear(chan) __chan_linear((chan)->chan_num)
  153. /* The following make access to common cpdma_ctlr params more readable */
  154. #define dmaregs params.dmaregs
  155. #define num_chan params.num_chan
  156. /* various accessors */
  157. #define dma_reg_read(ctlr, ofs) readl((ctlr)->dmaregs + (ofs))
  158. #define chan_read(chan, fld) readl((chan)->fld)
  159. #define desc_read(desc, fld) readl(&(desc)->fld)
  160. #define dma_reg_write(ctlr, ofs, v) writel(v, (ctlr)->dmaregs + (ofs))
  161. #define chan_write(chan, fld, v) writel(v, (chan)->fld)
  162. #define desc_write(desc, fld, v) writel((u32)(v), &(desc)->fld)
  163. #define cpdma_desc_to_port(chan, mode, directed) \
  164. do { \
  165. if (!is_rx_chan(chan) && ((directed == 1) || \
  166. (directed == 2))) \
  167. mode |= (CPDMA_DESC_TO_PORT_EN | \
  168. (directed << CPDMA_TO_PORT_SHIFT)); \
  169. } while (0)
  170. #define CPDMA_DMA_EXT_MAP BIT(16)
  171. static void cpdma_desc_pool_destroy(struct cpdma_ctlr *ctlr)
  172. {
  173. struct cpdma_desc_pool *pool = ctlr->pool;
  174. if (!pool)
  175. return;
  176. WARN(gen_pool_size(pool->gen_pool) != gen_pool_avail(pool->gen_pool),
  177. "cpdma_desc_pool size %zd != avail %zd",
  178. gen_pool_size(pool->gen_pool),
  179. gen_pool_avail(pool->gen_pool));
  180. if (pool->cpumap)
  181. dma_free_coherent(ctlr->dev, pool->mem_size, pool->cpumap,
  182. pool->phys);
  183. }
  184. /*
  185. * Utility constructs for a cpdma descriptor pool. Some devices (e.g. davinci
  186. * emac) have dedicated on-chip memory for these descriptors. Some other
  187. * devices (e.g. cpsw switches) use plain old memory. Descriptor pools
  188. * abstract out these details
  189. */
  190. static int cpdma_desc_pool_create(struct cpdma_ctlr *ctlr)
  191. {
  192. struct cpdma_params *cpdma_params = &ctlr->params;
  193. struct cpdma_desc_pool *pool;
  194. int ret = -ENOMEM;
  195. pool = devm_kzalloc(ctlr->dev, sizeof(*pool), GFP_KERNEL);
  196. if (!pool)
  197. goto gen_pool_create_fail;
  198. ctlr->pool = pool;
  199. pool->mem_size = cpdma_params->desc_mem_size;
  200. pool->desc_size = ALIGN(sizeof(struct cpdma_desc),
  201. cpdma_params->desc_align);
  202. pool->num_desc = pool->mem_size / pool->desc_size;
  203. if (cpdma_params->descs_pool_size) {
  204. /* recalculate memory size required cpdma descriptor pool
  205. * basing on number of descriptors specified by user and
  206. * if memory size > CPPI internal RAM size (desc_mem_size)
  207. * then switch to use DDR
  208. */
  209. pool->num_desc = cpdma_params->descs_pool_size;
  210. pool->mem_size = pool->desc_size * pool->num_desc;
  211. if (pool->mem_size > cpdma_params->desc_mem_size)
  212. cpdma_params->desc_mem_phys = 0;
  213. }
  214. pool->gen_pool = devm_gen_pool_create(ctlr->dev, ilog2(pool->desc_size),
  215. -1, "cpdma");
  216. if (IS_ERR(pool->gen_pool)) {
  217. ret = PTR_ERR(pool->gen_pool);
  218. dev_err(ctlr->dev, "pool create failed %d\n", ret);
  219. goto gen_pool_create_fail;
  220. }
  221. if (cpdma_params->desc_mem_phys) {
  222. pool->phys = cpdma_params->desc_mem_phys;
  223. pool->iomap = devm_ioremap(ctlr->dev, pool->phys,
  224. pool->mem_size);
  225. pool->hw_addr = cpdma_params->desc_hw_addr;
  226. } else {
  227. pool->cpumap = dma_alloc_coherent(ctlr->dev, pool->mem_size,
  228. &pool->hw_addr, GFP_KERNEL);
  229. pool->iomap = (void __iomem __force *)pool->cpumap;
  230. pool->phys = pool->hw_addr; /* assumes no IOMMU, don't use this value */
  231. }
  232. if (!pool->iomap)
  233. goto gen_pool_create_fail;
  234. ret = gen_pool_add_virt(pool->gen_pool, (unsigned long)pool->iomap,
  235. pool->phys, pool->mem_size, -1);
  236. if (ret < 0) {
  237. dev_err(ctlr->dev, "pool add failed %d\n", ret);
  238. goto gen_pool_add_virt_fail;
  239. }
  240. return 0;
  241. gen_pool_add_virt_fail:
  242. cpdma_desc_pool_destroy(ctlr);
  243. gen_pool_create_fail:
  244. ctlr->pool = NULL;
  245. return ret;
  246. }
  247. static inline dma_addr_t desc_phys(struct cpdma_desc_pool *pool,
  248. struct cpdma_desc __iomem *desc)
  249. {
  250. if (!desc)
  251. return 0;
  252. return pool->hw_addr + (__force long)desc - (__force long)pool->iomap;
  253. }
  254. static inline struct cpdma_desc __iomem *
  255. desc_from_phys(struct cpdma_desc_pool *pool, dma_addr_t dma)
  256. {
  257. return dma ? pool->iomap + dma - pool->hw_addr : NULL;
  258. }
  259. static struct cpdma_desc __iomem *
  260. cpdma_desc_alloc(struct cpdma_desc_pool *pool)
  261. {
  262. return (struct cpdma_desc __iomem *)
  263. gen_pool_alloc(pool->gen_pool, pool->desc_size);
  264. }
  265. static void cpdma_desc_free(struct cpdma_desc_pool *pool,
  266. struct cpdma_desc __iomem *desc, int num_desc)
  267. {
  268. gen_pool_free(pool->gen_pool, (unsigned long)desc, pool->desc_size);
  269. }
  270. static int _cpdma_control_set(struct cpdma_ctlr *ctlr, int control, int value)
  271. {
  272. struct cpdma_control_info *info = &controls[control];
  273. u32 val;
  274. if (!ctlr->params.has_ext_regs)
  275. return -ENOTSUPP;
  276. if (ctlr->state != CPDMA_STATE_ACTIVE)
  277. return -EINVAL;
  278. if (control < 0 || control >= ARRAY_SIZE(controls))
  279. return -ENOENT;
  280. if ((info->access & ACCESS_WO) != ACCESS_WO)
  281. return -EPERM;
  282. val = dma_reg_read(ctlr, info->reg);
  283. val &= ~(info->mask << info->shift);
  284. val |= (value & info->mask) << info->shift;
  285. dma_reg_write(ctlr, info->reg, val);
  286. return 0;
  287. }
  288. static int _cpdma_control_get(struct cpdma_ctlr *ctlr, int control)
  289. {
  290. struct cpdma_control_info *info = &controls[control];
  291. int ret;
  292. if (!ctlr->params.has_ext_regs)
  293. return -ENOTSUPP;
  294. if (ctlr->state != CPDMA_STATE_ACTIVE)
  295. return -EINVAL;
  296. if (control < 0 || control >= ARRAY_SIZE(controls))
  297. return -ENOENT;
  298. if ((info->access & ACCESS_RO) != ACCESS_RO)
  299. return -EPERM;
  300. ret = (dma_reg_read(ctlr, info->reg) >> info->shift) & info->mask;
  301. return ret;
  302. }
  303. /* cpdma_chan_set_chan_shaper - set shaper for a channel
  304. * Has to be called under ctlr lock
  305. */
  306. static int cpdma_chan_set_chan_shaper(struct cpdma_chan *chan)
  307. {
  308. struct cpdma_ctlr *ctlr = chan->ctlr;
  309. u32 rate_reg;
  310. u32 rmask;
  311. int ret;
  312. if (!chan->rate)
  313. return 0;
  314. rate_reg = CPDMA_TX_PRI0_RATE + 4 * chan->chan_num;
  315. dma_reg_write(ctlr, rate_reg, chan->rate_factor);
  316. rmask = _cpdma_control_get(ctlr, CPDMA_TX_RLIM);
  317. rmask |= chan->mask;
  318. ret = _cpdma_control_set(ctlr, CPDMA_TX_RLIM, rmask);
  319. return ret;
  320. }
  321. static int cpdma_chan_on(struct cpdma_chan *chan)
  322. {
  323. struct cpdma_ctlr *ctlr = chan->ctlr;
  324. struct cpdma_desc_pool *pool = ctlr->pool;
  325. unsigned long flags;
  326. spin_lock_irqsave(&chan->lock, flags);
  327. if (chan->state != CPDMA_STATE_IDLE) {
  328. spin_unlock_irqrestore(&chan->lock, flags);
  329. return -EBUSY;
  330. }
  331. if (ctlr->state != CPDMA_STATE_ACTIVE) {
  332. spin_unlock_irqrestore(&chan->lock, flags);
  333. return -EINVAL;
  334. }
  335. dma_reg_write(ctlr, chan->int_set, chan->mask);
  336. chan->state = CPDMA_STATE_ACTIVE;
  337. if (chan->head) {
  338. chan_write(chan, hdp, desc_phys(pool, chan->head));
  339. if (chan->rxfree)
  340. chan_write(chan, rxfree, chan->count);
  341. }
  342. spin_unlock_irqrestore(&chan->lock, flags);
  343. return 0;
  344. }
  345. /* cpdma_chan_fit_rate - set rate for a channel and check if it's possible.
  346. * rmask - mask of rate limited channels
  347. * Returns min rate in Kb/s
  348. */
  349. static int cpdma_chan_fit_rate(struct cpdma_chan *ch, u32 rate,
  350. u32 *rmask, int *prio_mode)
  351. {
  352. struct cpdma_ctlr *ctlr = ch->ctlr;
  353. struct cpdma_chan *chan;
  354. u32 old_rate = ch->rate;
  355. u32 new_rmask = 0;
  356. int rlim = 0;
  357. int i;
  358. for (i = tx_chan_num(0); i < tx_chan_num(CPDMA_MAX_CHANNELS); i++) {
  359. chan = ctlr->channels[i];
  360. if (!chan)
  361. continue;
  362. if (chan == ch)
  363. chan->rate = rate;
  364. if (chan->rate) {
  365. rlim = 1;
  366. new_rmask |= chan->mask;
  367. continue;
  368. }
  369. if (rlim)
  370. goto err;
  371. }
  372. *rmask = new_rmask;
  373. *prio_mode = rlim;
  374. return 0;
  375. err:
  376. ch->rate = old_rate;
  377. dev_err(ctlr->dev, "Upper cpdma ch%d is not rate limited\n",
  378. chan->chan_num);
  379. return -EINVAL;
  380. }
  381. static u32 cpdma_chan_set_factors(struct cpdma_ctlr *ctlr,
  382. struct cpdma_chan *ch)
  383. {
  384. u32 delta = UINT_MAX, prev_delta = UINT_MAX, best_delta = UINT_MAX;
  385. u32 best_send_cnt = 0, best_idle_cnt = 0;
  386. u32 new_rate, best_rate = 0, rate_reg;
  387. u64 send_cnt, idle_cnt;
  388. u32 min_send_cnt, freq;
  389. u64 divident, divisor;
  390. if (!ch->rate) {
  391. ch->rate_factor = 0;
  392. goto set_factor;
  393. }
  394. freq = ctlr->params.bus_freq_mhz * 1000 * 32;
  395. if (!freq) {
  396. dev_err(ctlr->dev, "The bus frequency is not set\n");
  397. return -EINVAL;
  398. }
  399. min_send_cnt = freq - ch->rate;
  400. send_cnt = DIV_ROUND_UP(min_send_cnt, ch->rate);
  401. while (send_cnt <= CPDMA_MAX_RLIM_CNT) {
  402. divident = ch->rate * send_cnt;
  403. divisor = min_send_cnt;
  404. idle_cnt = DIV_ROUND_CLOSEST_ULL(divident, divisor);
  405. divident = freq * idle_cnt;
  406. divisor = idle_cnt + send_cnt;
  407. new_rate = DIV_ROUND_CLOSEST_ULL(divident, divisor);
  408. delta = new_rate >= ch->rate ? new_rate - ch->rate : delta;
  409. if (delta < best_delta) {
  410. best_delta = delta;
  411. best_send_cnt = send_cnt;
  412. best_idle_cnt = idle_cnt;
  413. best_rate = new_rate;
  414. if (!delta)
  415. break;
  416. }
  417. if (prev_delta >= delta) {
  418. prev_delta = delta;
  419. send_cnt++;
  420. continue;
  421. }
  422. idle_cnt++;
  423. divident = freq * idle_cnt;
  424. send_cnt = DIV_ROUND_CLOSEST_ULL(divident, ch->rate);
  425. send_cnt -= idle_cnt;
  426. prev_delta = UINT_MAX;
  427. }
  428. ch->rate = best_rate;
  429. ch->rate_factor = best_send_cnt | (best_idle_cnt << 16);
  430. set_factor:
  431. rate_reg = CPDMA_TX_PRI0_RATE + 4 * ch->chan_num;
  432. dma_reg_write(ctlr, rate_reg, ch->rate_factor);
  433. return 0;
  434. }
  435. struct cpdma_ctlr *cpdma_ctlr_create(struct cpdma_params *params)
  436. {
  437. struct cpdma_ctlr *ctlr;
  438. ctlr = devm_kzalloc(params->dev, sizeof(*ctlr), GFP_KERNEL);
  439. if (!ctlr)
  440. return NULL;
  441. ctlr->state = CPDMA_STATE_IDLE;
  442. ctlr->params = *params;
  443. ctlr->dev = params->dev;
  444. ctlr->chan_num = 0;
  445. spin_lock_init(&ctlr->lock);
  446. if (cpdma_desc_pool_create(ctlr))
  447. return NULL;
  448. /* split pool equally between RX/TX by default */
  449. ctlr->num_tx_desc = ctlr->pool->num_desc / 2;
  450. ctlr->num_rx_desc = ctlr->pool->num_desc - ctlr->num_tx_desc;
  451. if (WARN_ON(ctlr->num_chan > CPDMA_MAX_CHANNELS))
  452. ctlr->num_chan = CPDMA_MAX_CHANNELS;
  453. return ctlr;
  454. }
  455. int cpdma_ctlr_start(struct cpdma_ctlr *ctlr)
  456. {
  457. struct cpdma_chan *chan;
  458. unsigned long flags;
  459. int i, prio_mode;
  460. spin_lock_irqsave(&ctlr->lock, flags);
  461. if (ctlr->state != CPDMA_STATE_IDLE) {
  462. spin_unlock_irqrestore(&ctlr->lock, flags);
  463. return -EBUSY;
  464. }
  465. if (ctlr->params.has_soft_reset) {
  466. unsigned timeout = 10 * 100;
  467. dma_reg_write(ctlr, CPDMA_SOFTRESET, 1);
  468. while (timeout) {
  469. if (dma_reg_read(ctlr, CPDMA_SOFTRESET) == 0)
  470. break;
  471. udelay(10);
  472. timeout--;
  473. }
  474. WARN_ON(!timeout);
  475. }
  476. for (i = 0; i < ctlr->num_chan; i++) {
  477. writel(0, ctlr->params.txhdp + 4 * i);
  478. writel(0, ctlr->params.rxhdp + 4 * i);
  479. writel(0, ctlr->params.txcp + 4 * i);
  480. writel(0, ctlr->params.rxcp + 4 * i);
  481. }
  482. dma_reg_write(ctlr, CPDMA_RXINTMASKCLEAR, 0xffffffff);
  483. dma_reg_write(ctlr, CPDMA_TXINTMASKCLEAR, 0xffffffff);
  484. dma_reg_write(ctlr, CPDMA_TXCONTROL, 1);
  485. dma_reg_write(ctlr, CPDMA_RXCONTROL, 1);
  486. ctlr->state = CPDMA_STATE_ACTIVE;
  487. prio_mode = 0;
  488. for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) {
  489. chan = ctlr->channels[i];
  490. if (chan) {
  491. cpdma_chan_set_chan_shaper(chan);
  492. cpdma_chan_on(chan);
  493. /* off prio mode if all tx channels are rate limited */
  494. if (is_tx_chan(chan) && !chan->rate)
  495. prio_mode = 1;
  496. }
  497. }
  498. _cpdma_control_set(ctlr, CPDMA_TX_PRIO_FIXED, prio_mode);
  499. _cpdma_control_set(ctlr, CPDMA_RX_BUFFER_OFFSET, 0);
  500. spin_unlock_irqrestore(&ctlr->lock, flags);
  501. return 0;
  502. }
  503. int cpdma_ctlr_stop(struct cpdma_ctlr *ctlr)
  504. {
  505. unsigned long flags;
  506. int i;
  507. spin_lock_irqsave(&ctlr->lock, flags);
  508. if (ctlr->state != CPDMA_STATE_ACTIVE) {
  509. spin_unlock_irqrestore(&ctlr->lock, flags);
  510. return -EINVAL;
  511. }
  512. ctlr->state = CPDMA_STATE_TEARDOWN;
  513. spin_unlock_irqrestore(&ctlr->lock, flags);
  514. for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) {
  515. if (ctlr->channels[i])
  516. cpdma_chan_stop(ctlr->channels[i]);
  517. }
  518. spin_lock_irqsave(&ctlr->lock, flags);
  519. dma_reg_write(ctlr, CPDMA_RXINTMASKCLEAR, 0xffffffff);
  520. dma_reg_write(ctlr, CPDMA_TXINTMASKCLEAR, 0xffffffff);
  521. dma_reg_write(ctlr, CPDMA_TXCONTROL, 0);
  522. dma_reg_write(ctlr, CPDMA_RXCONTROL, 0);
  523. ctlr->state = CPDMA_STATE_IDLE;
  524. spin_unlock_irqrestore(&ctlr->lock, flags);
  525. return 0;
  526. }
  527. int cpdma_ctlr_destroy(struct cpdma_ctlr *ctlr)
  528. {
  529. int ret = 0, i;
  530. if (!ctlr)
  531. return -EINVAL;
  532. if (ctlr->state != CPDMA_STATE_IDLE)
  533. cpdma_ctlr_stop(ctlr);
  534. for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++)
  535. cpdma_chan_destroy(ctlr->channels[i]);
  536. cpdma_desc_pool_destroy(ctlr);
  537. return ret;
  538. }
  539. int cpdma_ctlr_int_ctrl(struct cpdma_ctlr *ctlr, bool enable)
  540. {
  541. unsigned long flags;
  542. int i;
  543. spin_lock_irqsave(&ctlr->lock, flags);
  544. if (ctlr->state != CPDMA_STATE_ACTIVE) {
  545. spin_unlock_irqrestore(&ctlr->lock, flags);
  546. return -EINVAL;
  547. }
  548. for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) {
  549. if (ctlr->channels[i])
  550. cpdma_chan_int_ctrl(ctlr->channels[i], enable);
  551. }
  552. spin_unlock_irqrestore(&ctlr->lock, flags);
  553. return 0;
  554. }
  555. void cpdma_ctlr_eoi(struct cpdma_ctlr *ctlr, u32 value)
  556. {
  557. dma_reg_write(ctlr, CPDMA_MACEOIVECTOR, value);
  558. }
  559. u32 cpdma_ctrl_rxchs_state(struct cpdma_ctlr *ctlr)
  560. {
  561. return dma_reg_read(ctlr, CPDMA_RXINTSTATMASKED);
  562. }
  563. u32 cpdma_ctrl_txchs_state(struct cpdma_ctlr *ctlr)
  564. {
  565. return dma_reg_read(ctlr, CPDMA_TXINTSTATMASKED);
  566. }
  567. static void cpdma_chan_set_descs(struct cpdma_ctlr *ctlr,
  568. int rx, int desc_num,
  569. int per_ch_desc)
  570. {
  571. struct cpdma_chan *chan, *most_chan = NULL;
  572. int desc_cnt = desc_num;
  573. int most_dnum = 0;
  574. int min, max, i;
  575. if (!desc_num)
  576. return;
  577. if (rx) {
  578. min = rx_chan_num(0);
  579. max = rx_chan_num(CPDMA_MAX_CHANNELS);
  580. } else {
  581. min = tx_chan_num(0);
  582. max = tx_chan_num(CPDMA_MAX_CHANNELS);
  583. }
  584. for (i = min; i < max; i++) {
  585. chan = ctlr->channels[i];
  586. if (!chan)
  587. continue;
  588. if (chan->weight)
  589. chan->desc_num = (chan->weight * desc_num) / 100;
  590. else
  591. chan->desc_num = per_ch_desc;
  592. desc_cnt -= chan->desc_num;
  593. if (most_dnum < chan->desc_num) {
  594. most_dnum = chan->desc_num;
  595. most_chan = chan;
  596. }
  597. }
  598. /* use remains */
  599. if (most_chan)
  600. most_chan->desc_num += desc_cnt;
  601. }
  602. /*
  603. * cpdma_chan_split_pool - Splits ctrl pool between all channels.
  604. * Has to be called under ctlr lock
  605. */
  606. static int cpdma_chan_split_pool(struct cpdma_ctlr *ctlr)
  607. {
  608. int tx_per_ch_desc = 0, rx_per_ch_desc = 0;
  609. int free_rx_num = 0, free_tx_num = 0;
  610. int rx_weight = 0, tx_weight = 0;
  611. int tx_desc_num, rx_desc_num;
  612. struct cpdma_chan *chan;
  613. int i;
  614. if (!ctlr->chan_num)
  615. return 0;
  616. for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) {
  617. chan = ctlr->channels[i];
  618. if (!chan)
  619. continue;
  620. if (is_rx_chan(chan)) {
  621. if (!chan->weight)
  622. free_rx_num++;
  623. rx_weight += chan->weight;
  624. } else {
  625. if (!chan->weight)
  626. free_tx_num++;
  627. tx_weight += chan->weight;
  628. }
  629. }
  630. if (rx_weight > 100 || tx_weight > 100)
  631. return -EINVAL;
  632. tx_desc_num = ctlr->num_tx_desc;
  633. rx_desc_num = ctlr->num_rx_desc;
  634. if (free_tx_num) {
  635. tx_per_ch_desc = tx_desc_num - (tx_weight * tx_desc_num) / 100;
  636. tx_per_ch_desc /= free_tx_num;
  637. }
  638. if (free_rx_num) {
  639. rx_per_ch_desc = rx_desc_num - (rx_weight * rx_desc_num) / 100;
  640. rx_per_ch_desc /= free_rx_num;
  641. }
  642. cpdma_chan_set_descs(ctlr, 0, tx_desc_num, tx_per_ch_desc);
  643. cpdma_chan_set_descs(ctlr, 1, rx_desc_num, rx_per_ch_desc);
  644. return 0;
  645. }
  646. /* cpdma_chan_set_weight - set weight of a channel in percentage.
  647. * Tx and Rx channels have separate weights. That is 100% for RX
  648. * and 100% for Tx. The weight is used to split cpdma resources
  649. * in correct proportion required by the channels, including number
  650. * of descriptors. The channel rate is not enough to know the
  651. * weight of a channel as the maximum rate of an interface is needed.
  652. * If weight = 0, then channel uses rest of descriptors leaved by
  653. * weighted channels.
  654. */
  655. int cpdma_chan_set_weight(struct cpdma_chan *ch, int weight)
  656. {
  657. struct cpdma_ctlr *ctlr = ch->ctlr;
  658. unsigned long flags, ch_flags;
  659. int ret;
  660. spin_lock_irqsave(&ctlr->lock, flags);
  661. spin_lock_irqsave(&ch->lock, ch_flags);
  662. if (ch->weight == weight) {
  663. spin_unlock_irqrestore(&ch->lock, ch_flags);
  664. spin_unlock_irqrestore(&ctlr->lock, flags);
  665. return 0;
  666. }
  667. ch->weight = weight;
  668. spin_unlock_irqrestore(&ch->lock, ch_flags);
  669. /* re-split pool using new channel weight */
  670. ret = cpdma_chan_split_pool(ctlr);
  671. spin_unlock_irqrestore(&ctlr->lock, flags);
  672. return ret;
  673. }
  674. /* cpdma_chan_get_min_rate - get minimum allowed rate for channel
  675. * Should be called before cpdma_chan_set_rate.
  676. * Returns min rate in Kb/s
  677. */
  678. u32 cpdma_chan_get_min_rate(struct cpdma_ctlr *ctlr)
  679. {
  680. unsigned int divident, divisor;
  681. divident = ctlr->params.bus_freq_mhz * 32 * 1000;
  682. divisor = 1 + CPDMA_MAX_RLIM_CNT;
  683. return DIV_ROUND_UP(divident, divisor);
  684. }
  685. /* cpdma_chan_set_rate - limits bandwidth for transmit channel.
  686. * The bandwidth * limited channels have to be in order beginning from lowest.
  687. * ch - transmit channel the bandwidth is configured for
  688. * rate - bandwidth in Kb/s, if 0 - then off shaper
  689. */
  690. int cpdma_chan_set_rate(struct cpdma_chan *ch, u32 rate)
  691. {
  692. unsigned long flags, ch_flags;
  693. struct cpdma_ctlr *ctlr;
  694. int ret, prio_mode;
  695. u32 rmask;
  696. if (!ch || !is_tx_chan(ch))
  697. return -EINVAL;
  698. if (ch->rate == rate)
  699. return rate;
  700. ctlr = ch->ctlr;
  701. spin_lock_irqsave(&ctlr->lock, flags);
  702. spin_lock_irqsave(&ch->lock, ch_flags);
  703. ret = cpdma_chan_fit_rate(ch, rate, &rmask, &prio_mode);
  704. if (ret)
  705. goto err;
  706. ret = cpdma_chan_set_factors(ctlr, ch);
  707. if (ret)
  708. goto err;
  709. spin_unlock_irqrestore(&ch->lock, ch_flags);
  710. /* on shapers */
  711. _cpdma_control_set(ctlr, CPDMA_TX_RLIM, rmask);
  712. _cpdma_control_set(ctlr, CPDMA_TX_PRIO_FIXED, prio_mode);
  713. spin_unlock_irqrestore(&ctlr->lock, flags);
  714. return ret;
  715. err:
  716. spin_unlock_irqrestore(&ch->lock, ch_flags);
  717. spin_unlock_irqrestore(&ctlr->lock, flags);
  718. return ret;
  719. }
  720. u32 cpdma_chan_get_rate(struct cpdma_chan *ch)
  721. {
  722. unsigned long flags;
  723. u32 rate;
  724. spin_lock_irqsave(&ch->lock, flags);
  725. rate = ch->rate;
  726. spin_unlock_irqrestore(&ch->lock, flags);
  727. return rate;
  728. }
  729. struct cpdma_chan *cpdma_chan_create(struct cpdma_ctlr *ctlr, int chan_num,
  730. cpdma_handler_fn handler, int rx_type)
  731. {
  732. int offset = chan_num * 4;
  733. struct cpdma_chan *chan;
  734. unsigned long flags;
  735. chan_num = rx_type ? rx_chan_num(chan_num) : tx_chan_num(chan_num);
  736. if (__chan_linear(chan_num) >= ctlr->num_chan)
  737. return ERR_PTR(-EINVAL);
  738. chan = devm_kzalloc(ctlr->dev, sizeof(*chan), GFP_KERNEL);
  739. if (!chan)
  740. return ERR_PTR(-ENOMEM);
  741. spin_lock_irqsave(&ctlr->lock, flags);
  742. if (ctlr->channels[chan_num]) {
  743. spin_unlock_irqrestore(&ctlr->lock, flags);
  744. devm_kfree(ctlr->dev, chan);
  745. return ERR_PTR(-EBUSY);
  746. }
  747. chan->ctlr = ctlr;
  748. chan->state = CPDMA_STATE_IDLE;
  749. chan->chan_num = chan_num;
  750. chan->handler = handler;
  751. chan->rate = 0;
  752. chan->weight = 0;
  753. if (is_rx_chan(chan)) {
  754. chan->hdp = ctlr->params.rxhdp + offset;
  755. chan->cp = ctlr->params.rxcp + offset;
  756. chan->rxfree = ctlr->params.rxfree + offset;
  757. chan->int_set = CPDMA_RXINTMASKSET;
  758. chan->int_clear = CPDMA_RXINTMASKCLEAR;
  759. chan->td = CPDMA_RXTEARDOWN;
  760. chan->dir = DMA_FROM_DEVICE;
  761. } else {
  762. chan->hdp = ctlr->params.txhdp + offset;
  763. chan->cp = ctlr->params.txcp + offset;
  764. chan->int_set = CPDMA_TXINTMASKSET;
  765. chan->int_clear = CPDMA_TXINTMASKCLEAR;
  766. chan->td = CPDMA_TXTEARDOWN;
  767. chan->dir = DMA_TO_DEVICE;
  768. }
  769. chan->mask = BIT(chan_linear(chan));
  770. spin_lock_init(&chan->lock);
  771. ctlr->channels[chan_num] = chan;
  772. ctlr->chan_num++;
  773. cpdma_chan_split_pool(ctlr);
  774. spin_unlock_irqrestore(&ctlr->lock, flags);
  775. return chan;
  776. }
  777. int cpdma_chan_get_rx_buf_num(struct cpdma_chan *chan)
  778. {
  779. unsigned long flags;
  780. int desc_num;
  781. spin_lock_irqsave(&chan->lock, flags);
  782. desc_num = chan->desc_num;
  783. spin_unlock_irqrestore(&chan->lock, flags);
  784. return desc_num;
  785. }
  786. int cpdma_chan_destroy(struct cpdma_chan *chan)
  787. {
  788. struct cpdma_ctlr *ctlr;
  789. unsigned long flags;
  790. if (!chan)
  791. return -EINVAL;
  792. ctlr = chan->ctlr;
  793. spin_lock_irqsave(&ctlr->lock, flags);
  794. if (chan->state != CPDMA_STATE_IDLE)
  795. cpdma_chan_stop(chan);
  796. ctlr->channels[chan->chan_num] = NULL;
  797. ctlr->chan_num--;
  798. devm_kfree(ctlr->dev, chan);
  799. cpdma_chan_split_pool(ctlr);
  800. spin_unlock_irqrestore(&ctlr->lock, flags);
  801. return 0;
  802. }
  803. int cpdma_chan_get_stats(struct cpdma_chan *chan,
  804. struct cpdma_chan_stats *stats)
  805. {
  806. unsigned long flags;
  807. if (!chan)
  808. return -EINVAL;
  809. spin_lock_irqsave(&chan->lock, flags);
  810. memcpy(stats, &chan->stats, sizeof(*stats));
  811. spin_unlock_irqrestore(&chan->lock, flags);
  812. return 0;
  813. }
  814. static void __cpdma_chan_submit(struct cpdma_chan *chan,
  815. struct cpdma_desc __iomem *desc)
  816. {
  817. struct cpdma_ctlr *ctlr = chan->ctlr;
  818. struct cpdma_desc __iomem *prev = chan->tail;
  819. struct cpdma_desc_pool *pool = ctlr->pool;
  820. dma_addr_t desc_dma;
  821. u32 mode;
  822. desc_dma = desc_phys(pool, desc);
  823. /* simple case - idle channel */
  824. if (!chan->head) {
  825. chan->stats.head_enqueue++;
  826. chan->head = desc;
  827. chan->tail = desc;
  828. if (chan->state == CPDMA_STATE_ACTIVE)
  829. chan_write(chan, hdp, desc_dma);
  830. return;
  831. }
  832. /* first chain the descriptor at the tail of the list */
  833. desc_write(prev, hw_next, desc_dma);
  834. chan->tail = desc;
  835. chan->stats.tail_enqueue++;
  836. /* next check if EOQ has been triggered already */
  837. mode = desc_read(prev, hw_mode);
  838. if (((mode & (CPDMA_DESC_EOQ | CPDMA_DESC_OWNER)) == CPDMA_DESC_EOQ) &&
  839. (chan->state == CPDMA_STATE_ACTIVE)) {
  840. desc_write(prev, hw_mode, mode & ~CPDMA_DESC_EOQ);
  841. chan_write(chan, hdp, desc_dma);
  842. chan->stats.misqueued++;
  843. }
  844. }
  845. static int cpdma_chan_submit_si(struct submit_info *si)
  846. {
  847. struct cpdma_chan *chan = si->chan;
  848. struct cpdma_ctlr *ctlr = chan->ctlr;
  849. int len = si->len;
  850. struct cpdma_desc __iomem *desc;
  851. dma_addr_t buffer;
  852. u32 mode;
  853. int ret;
  854. if (chan->count >= chan->desc_num) {
  855. chan->stats.desc_alloc_fail++;
  856. return -ENOMEM;
  857. }
  858. desc = cpdma_desc_alloc(ctlr->pool);
  859. if (!desc) {
  860. chan->stats.desc_alloc_fail++;
  861. return -ENOMEM;
  862. }
  863. if (len < ctlr->params.min_packet_size) {
  864. len = ctlr->params.min_packet_size;
  865. chan->stats.runt_transmit_buff++;
  866. }
  867. mode = CPDMA_DESC_OWNER | CPDMA_DESC_SOP | CPDMA_DESC_EOP;
  868. cpdma_desc_to_port(chan, mode, si->directed);
  869. if (si->data_dma) {
  870. buffer = si->data_dma;
  871. dma_sync_single_for_device(ctlr->dev, buffer, len, chan->dir);
  872. } else {
  873. buffer = dma_map_single(ctlr->dev, si->data_virt, len, chan->dir);
  874. ret = dma_mapping_error(ctlr->dev, buffer);
  875. if (ret) {
  876. cpdma_desc_free(ctlr->pool, desc, 1);
  877. return -EINVAL;
  878. }
  879. }
  880. /* Relaxed IO accessors can be used here as there is read barrier
  881. * at the end of write sequence.
  882. */
  883. writel_relaxed(0, &desc->hw_next);
  884. writel_relaxed(buffer, &desc->hw_buffer);
  885. writel_relaxed(len, &desc->hw_len);
  886. writel_relaxed(mode | len, &desc->hw_mode);
  887. writel_relaxed((uintptr_t)si->token, &desc->sw_token);
  888. writel_relaxed(buffer, &desc->sw_buffer);
  889. writel_relaxed(si->data_dma ? len | CPDMA_DMA_EXT_MAP : len,
  890. &desc->sw_len);
  891. desc_read(desc, sw_len);
  892. __cpdma_chan_submit(chan, desc);
  893. if (chan->state == CPDMA_STATE_ACTIVE && chan->rxfree)
  894. chan_write(chan, rxfree, 1);
  895. chan->count++;
  896. return 0;
  897. }
  898. int cpdma_chan_idle_submit(struct cpdma_chan *chan, void *token, void *data,
  899. int len, int directed)
  900. {
  901. struct submit_info si;
  902. unsigned long flags;
  903. int ret;
  904. si.chan = chan;
  905. si.token = token;
  906. si.data_virt = data;
  907. si.data_dma = 0;
  908. si.len = len;
  909. si.directed = directed;
  910. spin_lock_irqsave(&chan->lock, flags);
  911. if (chan->state == CPDMA_STATE_TEARDOWN) {
  912. spin_unlock_irqrestore(&chan->lock, flags);
  913. return -EINVAL;
  914. }
  915. ret = cpdma_chan_submit_si(&si);
  916. spin_unlock_irqrestore(&chan->lock, flags);
  917. return ret;
  918. }
  919. int cpdma_chan_idle_submit_mapped(struct cpdma_chan *chan, void *token,
  920. dma_addr_t data, int len, int directed)
  921. {
  922. struct submit_info si;
  923. unsigned long flags;
  924. int ret;
  925. si.chan = chan;
  926. si.token = token;
  927. si.data_virt = NULL;
  928. si.data_dma = data;
  929. si.len = len;
  930. si.directed = directed;
  931. spin_lock_irqsave(&chan->lock, flags);
  932. if (chan->state == CPDMA_STATE_TEARDOWN) {
  933. spin_unlock_irqrestore(&chan->lock, flags);
  934. return -EINVAL;
  935. }
  936. ret = cpdma_chan_submit_si(&si);
  937. spin_unlock_irqrestore(&chan->lock, flags);
  938. return ret;
  939. }
  940. int cpdma_chan_submit(struct cpdma_chan *chan, void *token, void *data,
  941. int len, int directed)
  942. {
  943. struct submit_info si;
  944. unsigned long flags;
  945. int ret;
  946. si.chan = chan;
  947. si.token = token;
  948. si.data_virt = data;
  949. si.data_dma = 0;
  950. si.len = len;
  951. si.directed = directed;
  952. spin_lock_irqsave(&chan->lock, flags);
  953. if (chan->state != CPDMA_STATE_ACTIVE) {
  954. spin_unlock_irqrestore(&chan->lock, flags);
  955. return -EINVAL;
  956. }
  957. ret = cpdma_chan_submit_si(&si);
  958. spin_unlock_irqrestore(&chan->lock, flags);
  959. return ret;
  960. }
  961. int cpdma_chan_submit_mapped(struct cpdma_chan *chan, void *token,
  962. dma_addr_t data, int len, int directed)
  963. {
  964. struct submit_info si;
  965. unsigned long flags;
  966. int ret;
  967. si.chan = chan;
  968. si.token = token;
  969. si.data_virt = NULL;
  970. si.data_dma = data;
  971. si.len = len;
  972. si.directed = directed;
  973. spin_lock_irqsave(&chan->lock, flags);
  974. if (chan->state != CPDMA_STATE_ACTIVE) {
  975. spin_unlock_irqrestore(&chan->lock, flags);
  976. return -EINVAL;
  977. }
  978. ret = cpdma_chan_submit_si(&si);
  979. spin_unlock_irqrestore(&chan->lock, flags);
  980. return ret;
  981. }
  982. bool cpdma_check_free_tx_desc(struct cpdma_chan *chan)
  983. {
  984. struct cpdma_ctlr *ctlr = chan->ctlr;
  985. struct cpdma_desc_pool *pool = ctlr->pool;
  986. bool free_tx_desc;
  987. unsigned long flags;
  988. spin_lock_irqsave(&chan->lock, flags);
  989. free_tx_desc = (chan->count < chan->desc_num) &&
  990. gen_pool_avail(pool->gen_pool);
  991. spin_unlock_irqrestore(&chan->lock, flags);
  992. return free_tx_desc;
  993. }
  994. static void __cpdma_chan_free(struct cpdma_chan *chan,
  995. struct cpdma_desc __iomem *desc,
  996. int outlen, int status)
  997. {
  998. struct cpdma_ctlr *ctlr = chan->ctlr;
  999. struct cpdma_desc_pool *pool = ctlr->pool;
  1000. dma_addr_t buff_dma;
  1001. int origlen;
  1002. uintptr_t token;
  1003. token = desc_read(desc, sw_token);
  1004. origlen = desc_read(desc, sw_len);
  1005. buff_dma = desc_read(desc, sw_buffer);
  1006. if (origlen & CPDMA_DMA_EXT_MAP) {
  1007. origlen &= ~CPDMA_DMA_EXT_MAP;
  1008. dma_sync_single_for_cpu(ctlr->dev, buff_dma, origlen,
  1009. chan->dir);
  1010. } else {
  1011. dma_unmap_single(ctlr->dev, buff_dma, origlen, chan->dir);
  1012. }
  1013. cpdma_desc_free(pool, desc, 1);
  1014. (*chan->handler)((void *)token, outlen, status);
  1015. }
  1016. static int __cpdma_chan_process(struct cpdma_chan *chan)
  1017. {
  1018. struct cpdma_ctlr *ctlr = chan->ctlr;
  1019. struct cpdma_desc __iomem *desc;
  1020. int status, outlen;
  1021. int cb_status = 0;
  1022. struct cpdma_desc_pool *pool = ctlr->pool;
  1023. dma_addr_t desc_dma;
  1024. unsigned long flags;
  1025. spin_lock_irqsave(&chan->lock, flags);
  1026. desc = chan->head;
  1027. if (!desc) {
  1028. chan->stats.empty_dequeue++;
  1029. status = -ENOENT;
  1030. goto unlock_ret;
  1031. }
  1032. desc_dma = desc_phys(pool, desc);
  1033. status = desc_read(desc, hw_mode);
  1034. outlen = status & 0x7ff;
  1035. if (status & CPDMA_DESC_OWNER) {
  1036. chan->stats.busy_dequeue++;
  1037. status = -EBUSY;
  1038. goto unlock_ret;
  1039. }
  1040. if (status & CPDMA_DESC_PASS_CRC)
  1041. outlen -= CPDMA_DESC_CRC_LEN;
  1042. status = status & (CPDMA_DESC_EOQ | CPDMA_DESC_TD_COMPLETE |
  1043. CPDMA_DESC_PORT_MASK | CPDMA_RX_VLAN_ENCAP);
  1044. chan->head = desc_from_phys(pool, desc_read(desc, hw_next));
  1045. chan_write(chan, cp, desc_dma);
  1046. chan->count--;
  1047. chan->stats.good_dequeue++;
  1048. if ((status & CPDMA_DESC_EOQ) && chan->head) {
  1049. chan->stats.requeue++;
  1050. chan_write(chan, hdp, desc_phys(pool, chan->head));
  1051. }
  1052. spin_unlock_irqrestore(&chan->lock, flags);
  1053. if (unlikely(status & CPDMA_DESC_TD_COMPLETE))
  1054. cb_status = -ENOSYS;
  1055. else
  1056. cb_status = status;
  1057. __cpdma_chan_free(chan, desc, outlen, cb_status);
  1058. return status;
  1059. unlock_ret:
  1060. spin_unlock_irqrestore(&chan->lock, flags);
  1061. return status;
  1062. }
  1063. int cpdma_chan_process(struct cpdma_chan *chan, int quota)
  1064. {
  1065. int used = 0, ret = 0;
  1066. if (chan->state != CPDMA_STATE_ACTIVE)
  1067. return -EINVAL;
  1068. while (used < quota) {
  1069. ret = __cpdma_chan_process(chan);
  1070. if (ret < 0)
  1071. break;
  1072. used++;
  1073. }
  1074. return used;
  1075. }
  1076. int cpdma_chan_start(struct cpdma_chan *chan)
  1077. {
  1078. struct cpdma_ctlr *ctlr = chan->ctlr;
  1079. unsigned long flags;
  1080. int ret;
  1081. spin_lock_irqsave(&ctlr->lock, flags);
  1082. ret = cpdma_chan_set_chan_shaper(chan);
  1083. spin_unlock_irqrestore(&ctlr->lock, flags);
  1084. if (ret)
  1085. return ret;
  1086. ret = cpdma_chan_on(chan);
  1087. if (ret)
  1088. return ret;
  1089. return 0;
  1090. }
  1091. int cpdma_chan_stop(struct cpdma_chan *chan)
  1092. {
  1093. struct cpdma_ctlr *ctlr = chan->ctlr;
  1094. struct cpdma_desc_pool *pool = ctlr->pool;
  1095. unsigned long flags;
  1096. int ret;
  1097. unsigned timeout;
  1098. spin_lock_irqsave(&chan->lock, flags);
  1099. if (chan->state == CPDMA_STATE_TEARDOWN) {
  1100. spin_unlock_irqrestore(&chan->lock, flags);
  1101. return -EINVAL;
  1102. }
  1103. chan->state = CPDMA_STATE_TEARDOWN;
  1104. dma_reg_write(ctlr, chan->int_clear, chan->mask);
  1105. /* trigger teardown */
  1106. dma_reg_write(ctlr, chan->td, chan_linear(chan));
  1107. /* wait for teardown complete */
  1108. timeout = 100 * 100; /* 100 ms */
  1109. while (timeout) {
  1110. u32 cp = chan_read(chan, cp);
  1111. if ((cp & CPDMA_TEARDOWN_VALUE) == CPDMA_TEARDOWN_VALUE)
  1112. break;
  1113. udelay(10);
  1114. timeout--;
  1115. }
  1116. WARN_ON(!timeout);
  1117. chan_write(chan, cp, CPDMA_TEARDOWN_VALUE);
  1118. /* handle completed packets */
  1119. spin_unlock_irqrestore(&chan->lock, flags);
  1120. do {
  1121. ret = __cpdma_chan_process(chan);
  1122. if (ret < 0)
  1123. break;
  1124. } while ((ret & CPDMA_DESC_TD_COMPLETE) == 0);
  1125. spin_lock_irqsave(&chan->lock, flags);
  1126. /* remaining packets haven't been tx/rx'ed, clean them up */
  1127. while (chan->head) {
  1128. struct cpdma_desc __iomem *desc = chan->head;
  1129. dma_addr_t next_dma;
  1130. next_dma = desc_read(desc, hw_next);
  1131. chan->head = desc_from_phys(pool, next_dma);
  1132. chan->count--;
  1133. chan->stats.teardown_dequeue++;
  1134. /* issue callback without locks held */
  1135. spin_unlock_irqrestore(&chan->lock, flags);
  1136. __cpdma_chan_free(chan, desc, 0, -ENOSYS);
  1137. spin_lock_irqsave(&chan->lock, flags);
  1138. }
  1139. chan->state = CPDMA_STATE_IDLE;
  1140. spin_unlock_irqrestore(&chan->lock, flags);
  1141. return 0;
  1142. }
  1143. int cpdma_chan_int_ctrl(struct cpdma_chan *chan, bool enable)
  1144. {
  1145. unsigned long flags;
  1146. spin_lock_irqsave(&chan->lock, flags);
  1147. if (chan->state != CPDMA_STATE_ACTIVE) {
  1148. spin_unlock_irqrestore(&chan->lock, flags);
  1149. return -EINVAL;
  1150. }
  1151. dma_reg_write(chan->ctlr, enable ? chan->int_set : chan->int_clear,
  1152. chan->mask);
  1153. spin_unlock_irqrestore(&chan->lock, flags);
  1154. return 0;
  1155. }
  1156. int cpdma_control_get(struct cpdma_ctlr *ctlr, int control)
  1157. {
  1158. unsigned long flags;
  1159. int ret;
  1160. spin_lock_irqsave(&ctlr->lock, flags);
  1161. ret = _cpdma_control_get(ctlr, control);
  1162. spin_unlock_irqrestore(&ctlr->lock, flags);
  1163. return ret;
  1164. }
  1165. int cpdma_control_set(struct cpdma_ctlr *ctlr, int control, int value)
  1166. {
  1167. unsigned long flags;
  1168. int ret;
  1169. spin_lock_irqsave(&ctlr->lock, flags);
  1170. ret = _cpdma_control_set(ctlr, control, value);
  1171. spin_unlock_irqrestore(&ctlr->lock, flags);
  1172. return ret;
  1173. }
  1174. int cpdma_get_num_rx_descs(struct cpdma_ctlr *ctlr)
  1175. {
  1176. return ctlr->num_rx_desc;
  1177. }
  1178. int cpdma_get_num_tx_descs(struct cpdma_ctlr *ctlr)
  1179. {
  1180. return ctlr->num_tx_desc;
  1181. }
  1182. int cpdma_set_num_rx_descs(struct cpdma_ctlr *ctlr, int num_rx_desc)
  1183. {
  1184. unsigned long flags;
  1185. int temp, ret;
  1186. spin_lock_irqsave(&ctlr->lock, flags);
  1187. temp = ctlr->num_rx_desc;
  1188. ctlr->num_rx_desc = num_rx_desc;
  1189. ctlr->num_tx_desc = ctlr->pool->num_desc - ctlr->num_rx_desc;
  1190. ret = cpdma_chan_split_pool(ctlr);
  1191. if (ret) {
  1192. ctlr->num_rx_desc = temp;
  1193. ctlr->num_tx_desc = ctlr->pool->num_desc - ctlr->num_rx_desc;
  1194. }
  1195. spin_unlock_irqrestore(&ctlr->lock, flags);
  1196. return ret;
  1197. }