gsi_trans.c 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787
  1. // SPDX-License-Identifier: GPL-2.0
  2. /* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
  3. * Copyright (C) 2019-2022 Linaro Ltd.
  4. */
  5. #include <linux/types.h>
  6. #include <linux/bits.h>
  7. #include <linux/bitfield.h>
  8. #include <linux/refcount.h>
  9. #include <linux/scatterlist.h>
  10. #include <linux/dma-direction.h>
  11. #include "gsi.h"
  12. #include "gsi_private.h"
  13. #include "gsi_trans.h"
  14. #include "ipa_gsi.h"
  15. #include "ipa_data.h"
  16. #include "ipa_cmd.h"
  17. /**
  18. * DOC: GSI Transactions
  19. *
  20. * A GSI transaction abstracts the behavior of a GSI channel by representing
  21. * everything about a related group of IPA operations in a single structure.
  22. * (A "operation" in this sense is either a data transfer or an IPA immediate
  23. * command.) Most details of interaction with the GSI hardware are managed
  24. * by the GSI transaction core, allowing users to simply describe operations
  25. * to be performed. When a transaction has completed a callback function
  26. * (dependent on the type of endpoint associated with the channel) allows
  27. * cleanup of resources associated with the transaction.
  28. *
  29. * To perform an operation (or set of them), a user of the GSI transaction
  30. * interface allocates a transaction, indicating the number of TREs required
  31. * (one per operation). If sufficient TREs are available, they are reserved
  32. * for use in the transaction and the allocation succeeds. This way
  33. * exhaustion of the available TREs in a channel ring is detected as early
  34. * as possible. Any other resources that might be needed to complete a
  35. * transaction are also allocated when the transaction is allocated.
  36. *
  37. * Operations performed as part of a transaction are represented in an array
  38. * of Linux scatterlist structures, allocated with the transaction. These
  39. * scatterlist structures are initialized by "adding" operations to the
  40. * transaction. If a buffer in an operation must be mapped for DMA, this is
  41. * done at the time it is added to the transaction. It is possible for a
  42. * mapping error to occur when an operation is added. In this case the
  43. * transaction should simply be freed; this correctly releases resources
  44. * associated with the transaction.
  45. *
  46. * Once all operations have been successfully added to a transaction, the
  47. * transaction is committed. Committing transfers ownership of the entire
  48. * transaction to the GSI transaction core. The GSI transaction code
  49. * formats the content of the scatterlist array into the channel ring
  50. * buffer and informs the hardware that new TREs are available to process.
  51. *
  52. * The last TRE in each transaction is marked to interrupt the AP when the
  53. * GSI hardware has completed it. Because transfers described by TREs are
  54. * performed strictly in order, signaling the completion of just the last
  55. * TRE in the transaction is sufficient to indicate the full transaction
  56. * is complete.
  57. *
  58. * When a transaction is complete, ipa_gsi_trans_complete() is called by the
  59. * GSI code into the IPA layer, allowing it to perform any final cleanup
  60. * required before the transaction is freed.
  61. */
  62. /* Hardware values representing a transfer element type */
  63. enum gsi_tre_type {
  64. GSI_RE_XFER = 0x2,
  65. GSI_RE_IMMD_CMD = 0x3,
  66. };
  67. /* An entry in a channel ring */
  68. struct gsi_tre {
  69. __le64 addr; /* DMA address */
  70. __le16 len_opcode; /* length in bytes or enum IPA_CMD_* */
  71. __le16 reserved;
  72. __le32 flags; /* TRE_FLAGS_* */
  73. };
  74. /* gsi_tre->flags mask values (in CPU byte order) */
  75. #define TRE_FLAGS_CHAIN_FMASK GENMASK(0, 0)
  76. #define TRE_FLAGS_IEOT_FMASK GENMASK(9, 9)
  77. #define TRE_FLAGS_BEI_FMASK GENMASK(10, 10)
  78. #define TRE_FLAGS_TYPE_FMASK GENMASK(23, 16)
  79. int gsi_trans_pool_init(struct gsi_trans_pool *pool, size_t size, u32 count,
  80. u32 max_alloc)
  81. {
  82. void *virt;
  83. if (!size)
  84. return -EINVAL;
  85. if (count < max_alloc)
  86. return -EINVAL;
  87. if (!max_alloc)
  88. return -EINVAL;
  89. /* By allocating a few extra entries in our pool (one less
  90. * than the maximum number that will be requested in a
  91. * single allocation), we can always satisfy requests without
  92. * ever worrying about straddling the end of the pool array.
  93. * If there aren't enough entries starting at the free index,
  94. * we just allocate free entries from the beginning of the pool.
  95. */
  96. virt = kcalloc(count + max_alloc - 1, size, GFP_KERNEL);
  97. if (!virt)
  98. return -ENOMEM;
  99. pool->base = virt;
  100. /* If the allocator gave us any extra memory, use it */
  101. pool->count = ksize(pool->base) / size;
  102. pool->free = 0;
  103. pool->max_alloc = max_alloc;
  104. pool->size = size;
  105. pool->addr = 0; /* Only used for DMA pools */
  106. return 0;
  107. }
  108. void gsi_trans_pool_exit(struct gsi_trans_pool *pool)
  109. {
  110. kfree(pool->base);
  111. memset(pool, 0, sizeof(*pool));
  112. }
  113. /* Home-grown DMA pool. This way we can preallocate the pool, and guarantee
  114. * allocations will succeed. The immediate commands in a transaction can
  115. * require up to max_alloc elements from the pool. But we only allow
  116. * allocation of a single element from a DMA pool at a time.
  117. */
  118. int gsi_trans_pool_init_dma(struct device *dev, struct gsi_trans_pool *pool,
  119. size_t size, u32 count, u32 max_alloc)
  120. {
  121. size_t total_size;
  122. dma_addr_t addr;
  123. void *virt;
  124. if (!size)
  125. return -EINVAL;
  126. if (count < max_alloc)
  127. return -EINVAL;
  128. if (!max_alloc)
  129. return -EINVAL;
  130. /* Don't let allocations cross a power-of-two boundary */
  131. size = __roundup_pow_of_two(size);
  132. total_size = (count + max_alloc - 1) * size;
  133. /* The allocator will give us a power-of-2 number of pages
  134. * sufficient to satisfy our request. Round up our requested
  135. * size to avoid any unused space in the allocation. This way
  136. * gsi_trans_pool_exit_dma() can assume the total allocated
  137. * size is exactly (count * size).
  138. */
  139. total_size = PAGE_SIZE << get_order(total_size);
  140. virt = dma_alloc_coherent(dev, total_size, &addr, GFP_KERNEL);
  141. if (!virt)
  142. return -ENOMEM;
  143. pool->base = virt;
  144. pool->count = total_size / size;
  145. pool->free = 0;
  146. pool->size = size;
  147. pool->max_alloc = max_alloc;
  148. pool->addr = addr;
  149. return 0;
  150. }
  151. void gsi_trans_pool_exit_dma(struct device *dev, struct gsi_trans_pool *pool)
  152. {
  153. size_t total_size = pool->count * pool->size;
  154. dma_free_coherent(dev, total_size, pool->base, pool->addr);
  155. memset(pool, 0, sizeof(*pool));
  156. }
  157. /* Return the byte offset of the next free entry in the pool */
  158. static u32 gsi_trans_pool_alloc_common(struct gsi_trans_pool *pool, u32 count)
  159. {
  160. u32 offset;
  161. WARN_ON(!count);
  162. WARN_ON(count > pool->max_alloc);
  163. /* Allocate from beginning if wrap would occur */
  164. if (count > pool->count - pool->free)
  165. pool->free = 0;
  166. offset = pool->free * pool->size;
  167. pool->free += count;
  168. memset(pool->base + offset, 0, count * pool->size);
  169. return offset;
  170. }
  171. /* Allocate a contiguous block of zeroed entries from a pool */
  172. void *gsi_trans_pool_alloc(struct gsi_trans_pool *pool, u32 count)
  173. {
  174. return pool->base + gsi_trans_pool_alloc_common(pool, count);
  175. }
  176. /* Allocate a single zeroed entry from a DMA pool */
  177. void *gsi_trans_pool_alloc_dma(struct gsi_trans_pool *pool, dma_addr_t *addr)
  178. {
  179. u32 offset = gsi_trans_pool_alloc_common(pool, 1);
  180. *addr = pool->addr + offset;
  181. return pool->base + offset;
  182. }
  183. /* Map a TRE ring entry index to the transaction it is associated with */
  184. static void gsi_trans_map(struct gsi_trans *trans, u32 index)
  185. {
  186. struct gsi_channel *channel = &trans->gsi->channel[trans->channel_id];
  187. /* The completion event will indicate the last TRE used */
  188. index += trans->used_count - 1;
  189. /* Note: index *must* be used modulo the ring count here */
  190. channel->trans_info.map[index % channel->tre_ring.count] = trans;
  191. }
  192. /* Return the transaction mapped to a given ring entry */
  193. struct gsi_trans *
  194. gsi_channel_trans_mapped(struct gsi_channel *channel, u32 index)
  195. {
  196. /* Note: index *must* be used modulo the ring count here */
  197. return channel->trans_info.map[index % channel->tre_ring.count];
  198. }
  199. /* Return the oldest completed transaction for a channel (or null) */
  200. struct gsi_trans *gsi_channel_trans_complete(struct gsi_channel *channel)
  201. {
  202. struct gsi_trans_info *trans_info = &channel->trans_info;
  203. u16 trans_id = trans_info->completed_id;
  204. if (trans_id == trans_info->pending_id) {
  205. gsi_channel_update(channel);
  206. if (trans_id == trans_info->pending_id)
  207. return NULL;
  208. }
  209. return &trans_info->trans[trans_id %= channel->tre_count];
  210. }
  211. /* Move a transaction from allocated to committed state */
  212. static void gsi_trans_move_committed(struct gsi_trans *trans)
  213. {
  214. struct gsi_channel *channel = &trans->gsi->channel[trans->channel_id];
  215. struct gsi_trans_info *trans_info = &channel->trans_info;
  216. /* This allocated transaction is now committed */
  217. trans_info->allocated_id++;
  218. }
  219. /* Move committed transactions to pending state */
  220. static void gsi_trans_move_pending(struct gsi_trans *trans)
  221. {
  222. struct gsi_channel *channel = &trans->gsi->channel[trans->channel_id];
  223. struct gsi_trans_info *trans_info = &channel->trans_info;
  224. u16 trans_index = trans - &trans_info->trans[0];
  225. u16 delta;
  226. /* These committed transactions are now pending */
  227. delta = trans_index - trans_info->committed_id + 1;
  228. trans_info->committed_id += delta % channel->tre_count;
  229. }
  230. /* Move pending transactions to completed state */
  231. void gsi_trans_move_complete(struct gsi_trans *trans)
  232. {
  233. struct gsi_channel *channel = &trans->gsi->channel[trans->channel_id];
  234. struct gsi_trans_info *trans_info = &channel->trans_info;
  235. u16 trans_index = trans - trans_info->trans;
  236. u16 delta;
  237. /* These pending transactions are now completed */
  238. delta = trans_index - trans_info->pending_id + 1;
  239. delta %= channel->tre_count;
  240. trans_info->pending_id += delta;
  241. }
  242. /* Move a transaction from completed to polled state */
  243. void gsi_trans_move_polled(struct gsi_trans *trans)
  244. {
  245. struct gsi_channel *channel = &trans->gsi->channel[trans->channel_id];
  246. struct gsi_trans_info *trans_info = &channel->trans_info;
  247. /* This completed transaction is now polled */
  248. trans_info->completed_id++;
  249. }
  250. /* Reserve some number of TREs on a channel. Returns true if successful */
  251. static bool
  252. gsi_trans_tre_reserve(struct gsi_trans_info *trans_info, u32 tre_count)
  253. {
  254. int avail = atomic_read(&trans_info->tre_avail);
  255. int new;
  256. do {
  257. new = avail - (int)tre_count;
  258. if (unlikely(new < 0))
  259. return false;
  260. } while (!atomic_try_cmpxchg(&trans_info->tre_avail, &avail, new));
  261. return true;
  262. }
  263. /* Release previously-reserved TRE entries to a channel */
  264. static void
  265. gsi_trans_tre_release(struct gsi_trans_info *trans_info, u32 tre_count)
  266. {
  267. atomic_add(tre_count, &trans_info->tre_avail);
  268. }
  269. /* Return true if no transactions are allocated, false otherwise */
  270. bool gsi_channel_trans_idle(struct gsi *gsi, u32 channel_id)
  271. {
  272. u32 tre_max = gsi_channel_tre_max(gsi, channel_id);
  273. struct gsi_trans_info *trans_info;
  274. trans_info = &gsi->channel[channel_id].trans_info;
  275. return atomic_read(&trans_info->tre_avail) == tre_max;
  276. }
  277. /* Allocate a GSI transaction on a channel */
  278. struct gsi_trans *gsi_channel_trans_alloc(struct gsi *gsi, u32 channel_id,
  279. u32 tre_count,
  280. enum dma_data_direction direction)
  281. {
  282. struct gsi_channel *channel = &gsi->channel[channel_id];
  283. struct gsi_trans_info *trans_info;
  284. struct gsi_trans *trans;
  285. u16 trans_index;
  286. if (WARN_ON(tre_count > channel->trans_tre_max))
  287. return NULL;
  288. trans_info = &channel->trans_info;
  289. /* If we can't reserve the TREs for the transaction, we're done */
  290. if (!gsi_trans_tre_reserve(trans_info, tre_count))
  291. return NULL;
  292. trans_index = trans_info->free_id % channel->tre_count;
  293. trans = &trans_info->trans[trans_index];
  294. memset(trans, 0, sizeof(*trans));
  295. /* Initialize non-zero fields in the transaction */
  296. trans->gsi = gsi;
  297. trans->channel_id = channel_id;
  298. trans->rsvd_count = tre_count;
  299. init_completion(&trans->completion);
  300. /* Allocate the scatterlist */
  301. trans->sgl = gsi_trans_pool_alloc(&trans_info->sg_pool, tre_count);
  302. sg_init_marker(trans->sgl, tre_count);
  303. trans->direction = direction;
  304. refcount_set(&trans->refcount, 1);
  305. /* This free transaction is now allocated */
  306. trans_info->free_id++;
  307. return trans;
  308. }
  309. /* Free a previously-allocated transaction */
  310. void gsi_trans_free(struct gsi_trans *trans)
  311. {
  312. struct gsi_trans_info *trans_info;
  313. if (!refcount_dec_and_test(&trans->refcount))
  314. return;
  315. /* Unused transactions are allocated but never committed, pending,
  316. * completed, or polled.
  317. */
  318. trans_info = &trans->gsi->channel[trans->channel_id].trans_info;
  319. if (!trans->used_count) {
  320. trans_info->allocated_id++;
  321. trans_info->committed_id++;
  322. trans_info->pending_id++;
  323. trans_info->completed_id++;
  324. } else {
  325. ipa_gsi_trans_release(trans);
  326. }
  327. /* This transaction is now free */
  328. trans_info->polled_id++;
  329. /* Releasing the reserved TREs implicitly frees the sgl[] and
  330. * (if present) info[] arrays, plus the transaction itself.
  331. */
  332. gsi_trans_tre_release(trans_info, trans->rsvd_count);
  333. }
  334. /* Add an immediate command to a transaction */
  335. void gsi_trans_cmd_add(struct gsi_trans *trans, void *buf, u32 size,
  336. dma_addr_t addr, enum ipa_cmd_opcode opcode)
  337. {
  338. u32 which = trans->used_count++;
  339. struct scatterlist *sg;
  340. WARN_ON(which >= trans->rsvd_count);
  341. /* Commands are quite different from data transfer requests.
  342. * Their payloads come from a pool whose memory is allocated
  343. * using dma_alloc_coherent(). We therefore do *not* map them
  344. * for DMA (unlike what we do for pages and skbs).
  345. *
  346. * When a transaction completes, the SGL is normally unmapped.
  347. * A command transaction has direction DMA_NONE, which tells
  348. * gsi_trans_complete() to skip the unmapping step.
  349. *
  350. * The only things we use directly in a command scatter/gather
  351. * entry are the DMA address and length. We still need the SG
  352. * table flags to be maintained though, so assign a NULL page
  353. * pointer for that purpose.
  354. */
  355. sg = &trans->sgl[which];
  356. sg_assign_page(sg, NULL);
  357. sg_dma_address(sg) = addr;
  358. sg_dma_len(sg) = size;
  359. trans->cmd_opcode[which] = opcode;
  360. }
  361. /* Add a page transfer to a transaction. It will fill the only TRE. */
  362. int gsi_trans_page_add(struct gsi_trans *trans, struct page *page, u32 size,
  363. u32 offset)
  364. {
  365. struct scatterlist *sg = &trans->sgl[0];
  366. int ret;
  367. if (WARN_ON(trans->rsvd_count != 1))
  368. return -EINVAL;
  369. if (WARN_ON(trans->used_count))
  370. return -EINVAL;
  371. sg_set_page(sg, page, size, offset);
  372. ret = dma_map_sg(trans->gsi->dev, sg, 1, trans->direction);
  373. if (!ret)
  374. return -ENOMEM;
  375. trans->used_count++; /* Transaction now owns the (DMA mapped) page */
  376. return 0;
  377. }
  378. /* Add an SKB transfer to a transaction. No other TREs will be used. */
  379. int gsi_trans_skb_add(struct gsi_trans *trans, struct sk_buff *skb)
  380. {
  381. struct scatterlist *sg = &trans->sgl[0];
  382. u32 used_count;
  383. int ret;
  384. if (WARN_ON(trans->rsvd_count != 1))
  385. return -EINVAL;
  386. if (WARN_ON(trans->used_count))
  387. return -EINVAL;
  388. /* skb->len will not be 0 (checked early) */
  389. ret = skb_to_sgvec(skb, sg, 0, skb->len);
  390. if (ret < 0)
  391. return ret;
  392. used_count = ret;
  393. ret = dma_map_sg(trans->gsi->dev, sg, used_count, trans->direction);
  394. if (!ret)
  395. return -ENOMEM;
  396. /* Transaction now owns the (DMA mapped) skb */
  397. trans->used_count += used_count;
  398. return 0;
  399. }
  400. /* Compute the length/opcode value to use for a TRE */
  401. static __le16 gsi_tre_len_opcode(enum ipa_cmd_opcode opcode, u32 len)
  402. {
  403. return opcode == IPA_CMD_NONE ? cpu_to_le16((u16)len)
  404. : cpu_to_le16((u16)opcode);
  405. }
  406. /* Compute the flags value to use for a given TRE */
  407. static __le32 gsi_tre_flags(bool last_tre, bool bei, enum ipa_cmd_opcode opcode)
  408. {
  409. enum gsi_tre_type tre_type;
  410. u32 tre_flags;
  411. tre_type = opcode == IPA_CMD_NONE ? GSI_RE_XFER : GSI_RE_IMMD_CMD;
  412. tre_flags = u32_encode_bits(tre_type, TRE_FLAGS_TYPE_FMASK);
  413. /* Last TRE contains interrupt flags */
  414. if (last_tre) {
  415. /* All transactions end in a transfer completion interrupt */
  416. tre_flags |= TRE_FLAGS_IEOT_FMASK;
  417. /* Don't interrupt when outbound commands are acknowledged */
  418. if (bei)
  419. tre_flags |= TRE_FLAGS_BEI_FMASK;
  420. } else { /* All others indicate there's more to come */
  421. tre_flags |= TRE_FLAGS_CHAIN_FMASK;
  422. }
  423. return cpu_to_le32(tre_flags);
  424. }
  425. static void gsi_trans_tre_fill(struct gsi_tre *dest_tre, dma_addr_t addr,
  426. u32 len, bool last_tre, bool bei,
  427. enum ipa_cmd_opcode opcode)
  428. {
  429. struct gsi_tre tre;
  430. tre.addr = cpu_to_le64(addr);
  431. tre.len_opcode = gsi_tre_len_opcode(opcode, len);
  432. tre.reserved = 0;
  433. tre.flags = gsi_tre_flags(last_tre, bei, opcode);
  434. /* ARM64 can write 16 bytes as a unit with a single instruction.
  435. * Doing the assignment this way is an attempt to make that happen.
  436. */
  437. *dest_tre = tre;
  438. }
  439. /**
  440. * __gsi_trans_commit() - Common GSI transaction commit code
  441. * @trans: Transaction to commit
  442. * @ring_db: Whether to tell the hardware about these queued transfers
  443. *
  444. * Formats channel ring TRE entries based on the content of the scatterlist.
  445. * Maps a transaction pointer to the last ring entry used for the transaction,
  446. * so it can be recovered when it completes. Moves the transaction to
  447. * pending state. Finally, updates the channel ring pointer and optionally
  448. * rings the doorbell.
  449. */
  450. static void __gsi_trans_commit(struct gsi_trans *trans, bool ring_db)
  451. {
  452. struct gsi_channel *channel = &trans->gsi->channel[trans->channel_id];
  453. struct gsi_ring *tre_ring = &channel->tre_ring;
  454. enum ipa_cmd_opcode opcode = IPA_CMD_NONE;
  455. bool bei = channel->toward_ipa;
  456. struct gsi_tre *dest_tre;
  457. struct scatterlist *sg;
  458. u32 byte_count = 0;
  459. u8 *cmd_opcode;
  460. u32 avail;
  461. u32 i;
  462. WARN_ON(!trans->used_count);
  463. /* Consume the entries. If we cross the end of the ring while
  464. * filling them we'll switch to the beginning to finish.
  465. * If there is no info array we're doing a simple data
  466. * transfer request, whose opcode is IPA_CMD_NONE.
  467. */
  468. cmd_opcode = channel->command ? &trans->cmd_opcode[0] : NULL;
  469. avail = tre_ring->count - tre_ring->index % tre_ring->count;
  470. dest_tre = gsi_ring_virt(tre_ring, tre_ring->index);
  471. for_each_sg(trans->sgl, sg, trans->used_count, i) {
  472. bool last_tre = i == trans->used_count - 1;
  473. dma_addr_t addr = sg_dma_address(sg);
  474. u32 len = sg_dma_len(sg);
  475. byte_count += len;
  476. if (!avail--)
  477. dest_tre = gsi_ring_virt(tre_ring, 0);
  478. if (cmd_opcode)
  479. opcode = *cmd_opcode++;
  480. gsi_trans_tre_fill(dest_tre, addr, len, last_tre, bei, opcode);
  481. dest_tre++;
  482. }
  483. /* Associate the TRE with the transaction */
  484. gsi_trans_map(trans, tre_ring->index);
  485. tre_ring->index += trans->used_count;
  486. trans->len = byte_count;
  487. if (channel->toward_ipa)
  488. gsi_trans_tx_committed(trans);
  489. gsi_trans_move_committed(trans);
  490. /* Ring doorbell if requested, or if all TREs are allocated */
  491. if (ring_db || !atomic_read(&channel->trans_info.tre_avail)) {
  492. /* Report what we're handing off to hardware for TX channels */
  493. if (channel->toward_ipa)
  494. gsi_trans_tx_queued(trans);
  495. gsi_trans_move_pending(trans);
  496. gsi_channel_doorbell(channel);
  497. }
  498. }
  499. /* Commit a GSI transaction */
  500. void gsi_trans_commit(struct gsi_trans *trans, bool ring_db)
  501. {
  502. if (trans->used_count)
  503. __gsi_trans_commit(trans, ring_db);
  504. else
  505. gsi_trans_free(trans);
  506. }
  507. /* Commit a GSI transaction and wait for it to complete */
  508. void gsi_trans_commit_wait(struct gsi_trans *trans)
  509. {
  510. if (!trans->used_count)
  511. goto out_trans_free;
  512. refcount_inc(&trans->refcount);
  513. __gsi_trans_commit(trans, true);
  514. wait_for_completion(&trans->completion);
  515. out_trans_free:
  516. gsi_trans_free(trans);
  517. }
  518. /* Process the completion of a transaction; called while polling */
  519. void gsi_trans_complete(struct gsi_trans *trans)
  520. {
  521. /* If the entire SGL was mapped when added, unmap it now */
  522. if (trans->direction != DMA_NONE)
  523. dma_unmap_sg(trans->gsi->dev, trans->sgl, trans->used_count,
  524. trans->direction);
  525. ipa_gsi_trans_complete(trans);
  526. complete(&trans->completion);
  527. gsi_trans_free(trans);
  528. }
  529. /* Cancel a channel's pending transactions */
  530. void gsi_channel_trans_cancel_pending(struct gsi_channel *channel)
  531. {
  532. struct gsi_trans_info *trans_info = &channel->trans_info;
  533. u16 trans_id = trans_info->pending_id;
  534. /* channel->gsi->mutex is held by caller */
  535. /* If there are no pending transactions, we're done */
  536. if (trans_id == trans_info->committed_id)
  537. return;
  538. /* Mark all pending transactions cancelled */
  539. do {
  540. struct gsi_trans *trans;
  541. trans = &trans_info->trans[trans_id % channel->tre_count];
  542. trans->cancelled = true;
  543. } while (++trans_id != trans_info->committed_id);
  544. /* All pending transactions are now completed */
  545. trans_info->pending_id = trans_info->committed_id;
  546. /* Schedule NAPI polling to complete the cancelled transactions */
  547. napi_schedule(&channel->napi);
  548. }
  549. /* Issue a command to read a single byte from a channel */
  550. int gsi_trans_read_byte(struct gsi *gsi, u32 channel_id, dma_addr_t addr)
  551. {
  552. struct gsi_channel *channel = &gsi->channel[channel_id];
  553. struct gsi_ring *tre_ring = &channel->tre_ring;
  554. struct gsi_trans_info *trans_info;
  555. struct gsi_tre *dest_tre;
  556. trans_info = &channel->trans_info;
  557. /* First reserve the TRE, if possible */
  558. if (!gsi_trans_tre_reserve(trans_info, 1))
  559. return -EBUSY;
  560. /* Now fill the reserved TRE and tell the hardware */
  561. dest_tre = gsi_ring_virt(tre_ring, tre_ring->index);
  562. gsi_trans_tre_fill(dest_tre, addr, 1, true, false, IPA_CMD_NONE);
  563. tre_ring->index++;
  564. gsi_channel_doorbell(channel);
  565. return 0;
  566. }
  567. /* Mark a gsi_trans_read_byte() request done */
  568. void gsi_trans_read_byte_done(struct gsi *gsi, u32 channel_id)
  569. {
  570. struct gsi_channel *channel = &gsi->channel[channel_id];
  571. gsi_trans_tre_release(&channel->trans_info, 1);
  572. }
  573. /* Initialize a channel's GSI transaction info */
  574. int gsi_channel_trans_init(struct gsi *gsi, u32 channel_id)
  575. {
  576. struct gsi_channel *channel = &gsi->channel[channel_id];
  577. u32 tre_count = channel->tre_count;
  578. struct gsi_trans_info *trans_info;
  579. u32 tre_max;
  580. int ret;
  581. /* Ensure the size of a channel element is what's expected */
  582. BUILD_BUG_ON(sizeof(struct gsi_tre) != GSI_RING_ELEMENT_SIZE);
  583. trans_info = &channel->trans_info;
  584. /* The tre_avail field is what ultimately limits the number of
  585. * outstanding transactions and their resources. A transaction
  586. * allocation succeeds only if the TREs available are sufficient
  587. * for what the transaction might need.
  588. */
  589. tre_max = gsi_channel_tre_max(channel->gsi, channel_id);
  590. atomic_set(&trans_info->tre_avail, tre_max);
  591. /* We can't use more TREs than the number available in the ring.
  592. * This limits the number of transactions that can be outstanding.
  593. * Worst case is one TRE per transaction (but we actually limit
  594. * it to something a little less than that). By allocating a
  595. * power-of-two number of transactions we can use an index
  596. * modulo that number to determine the next one that's free.
  597. * Transactions are allocated one at a time.
  598. */
  599. trans_info->trans = kcalloc(tre_count, sizeof(*trans_info->trans),
  600. GFP_KERNEL);
  601. if (!trans_info->trans)
  602. return -ENOMEM;
  603. trans_info->free_id = 0; /* all modulo channel->tre_count */
  604. trans_info->allocated_id = 0;
  605. trans_info->committed_id = 0;
  606. trans_info->pending_id = 0;
  607. trans_info->completed_id = 0;
  608. trans_info->polled_id = 0;
  609. /* A completion event contains a pointer to the TRE that caused
  610. * the event (which will be the last one used by the transaction).
  611. * Each entry in this map records the transaction associated
  612. * with a corresponding completed TRE.
  613. */
  614. trans_info->map = kcalloc(tre_count, sizeof(*trans_info->map),
  615. GFP_KERNEL);
  616. if (!trans_info->map) {
  617. ret = -ENOMEM;
  618. goto err_trans_free;
  619. }
  620. /* A transaction uses a scatterlist array to represent the data
  621. * transfers implemented by the transaction. Each scatterlist
  622. * element is used to fill a single TRE when the transaction is
  623. * committed. So we need as many scatterlist elements as the
  624. * maximum number of TREs that can be outstanding.
  625. */
  626. ret = gsi_trans_pool_init(&trans_info->sg_pool,
  627. sizeof(struct scatterlist),
  628. tre_max, channel->trans_tre_max);
  629. if (ret)
  630. goto err_map_free;
  631. return 0;
  632. err_map_free:
  633. kfree(trans_info->map);
  634. err_trans_free:
  635. kfree(trans_info->trans);
  636. dev_err(gsi->dev, "error %d initializing channel %u transactions\n",
  637. ret, channel_id);
  638. return ret;
  639. }
  640. /* Inverse of gsi_channel_trans_init() */
  641. void gsi_channel_trans_exit(struct gsi_channel *channel)
  642. {
  643. struct gsi_trans_info *trans_info = &channel->trans_info;
  644. gsi_trans_pool_exit(&trans_info->sg_pool);
  645. kfree(trans_info->trans);
  646. kfree(trans_info->map);
  647. }