ipa_cmd.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657
  1. // SPDX-License-Identifier: GPL-2.0
  2. /* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
  3. * Copyright (C) 2019-2022 Linaro Ltd.
  4. */
  5. #include <linux/types.h>
  6. #include <linux/device.h>
  7. #include <linux/slab.h>
  8. #include <linux/bitfield.h>
  9. #include <linux/dma-direction.h>
  10. #include "gsi.h"
  11. #include "gsi_trans.h"
  12. #include "ipa.h"
  13. #include "ipa_endpoint.h"
  14. #include "ipa_table.h"
  15. #include "ipa_cmd.h"
  16. #include "ipa_mem.h"
  17. /**
  18. * DOC: IPA Immediate Commands
  19. *
  20. * The AP command TX endpoint is used to issue immediate commands to the IPA.
  21. * An immediate command is generally used to request the IPA do something
  22. * other than data transfer to another endpoint.
  23. *
  24. * Immediate commands are represented by GSI transactions just like other
  25. * transfer requests, and use a single GSI TRE. Each immediate command
  26. * has a well-defined format, having a payload of a known length. This
  27. * allows the transfer element's length field to be used to hold an
  28. * immediate command's opcode. The payload for a command resides in AP
  29. * memory and is described by a single scatterlist entry in its transaction.
  30. * Commands do not require a transaction completion callback, and are
  31. * always issued using gsi_trans_commit_wait().
  32. */
  33. /* Some commands can wait until indicated pipeline stages are clear */
  34. enum pipeline_clear_options {
  35. pipeline_clear_hps = 0x0,
  36. pipeline_clear_src_grp = 0x1,
  37. pipeline_clear_full = 0x2,
  38. };
  39. /* IPA_CMD_IP_V{4,6}_{FILTER,ROUTING}_INIT */
  40. struct ipa_cmd_hw_ip_fltrt_init {
  41. __le64 hash_rules_addr;
  42. __le64 flags;
  43. __le64 nhash_rules_addr;
  44. };
  45. /* Field masks for ipa_cmd_hw_ip_fltrt_init structure fields */
  46. #define IP_FLTRT_FLAGS_HASH_SIZE_FMASK GENMASK_ULL(11, 0)
  47. #define IP_FLTRT_FLAGS_HASH_ADDR_FMASK GENMASK_ULL(27, 12)
  48. #define IP_FLTRT_FLAGS_NHASH_SIZE_FMASK GENMASK_ULL(39, 28)
  49. #define IP_FLTRT_FLAGS_NHASH_ADDR_FMASK GENMASK_ULL(55, 40)
  50. /* IPA_CMD_HDR_INIT_LOCAL */
  51. struct ipa_cmd_hw_hdr_init_local {
  52. __le64 hdr_table_addr;
  53. __le32 flags;
  54. __le32 reserved;
  55. };
  56. /* Field masks for ipa_cmd_hw_hdr_init_local structure fields */
  57. #define HDR_INIT_LOCAL_FLAGS_TABLE_SIZE_FMASK GENMASK(11, 0)
  58. #define HDR_INIT_LOCAL_FLAGS_HDR_ADDR_FMASK GENMASK(27, 12)
  59. /* IPA_CMD_REGISTER_WRITE */
  60. /* For IPA v4.0+, the pipeline clear options are encoded in the opcode */
  61. #define REGISTER_WRITE_OPCODE_SKIP_CLEAR_FMASK GENMASK(8, 8)
  62. #define REGISTER_WRITE_OPCODE_CLEAR_OPTION_FMASK GENMASK(10, 9)
  63. struct ipa_cmd_register_write {
  64. __le16 flags; /* Unused/reserved prior to IPA v4.0 */
  65. __le16 offset;
  66. __le32 value;
  67. __le32 value_mask;
  68. __le32 clear_options; /* Unused/reserved for IPA v4.0+ */
  69. };
  70. /* Field masks for ipa_cmd_register_write structure fields */
  71. /* The next field is present for IPA v4.0+ */
  72. #define REGISTER_WRITE_FLAGS_OFFSET_HIGH_FMASK GENMASK(14, 11)
  73. /* The next field is not present for IPA v4.0+ */
  74. #define REGISTER_WRITE_FLAGS_SKIP_CLEAR_FMASK GENMASK(15, 15)
  75. /* The next field and its values are not present for IPA v4.0+ */
  76. #define REGISTER_WRITE_CLEAR_OPTIONS_FMASK GENMASK(1, 0)
  77. /* IPA_CMD_IP_PACKET_INIT */
  78. struct ipa_cmd_ip_packet_init {
  79. u8 dest_endpoint;
  80. u8 reserved[7];
  81. };
  82. /* Field masks for ipa_cmd_ip_packet_init dest_endpoint field */
  83. #define IPA_PACKET_INIT_DEST_ENDPOINT_FMASK GENMASK(4, 0)
  84. /* IPA_CMD_DMA_SHARED_MEM */
  85. /* For IPA v4.0+, this opcode gets modified with pipeline clear options */
  86. #define DMA_SHARED_MEM_OPCODE_SKIP_CLEAR_FMASK GENMASK(8, 8)
  87. #define DMA_SHARED_MEM_OPCODE_CLEAR_OPTION_FMASK GENMASK(10, 9)
  88. struct ipa_cmd_hw_dma_mem_mem {
  89. __le16 clear_after_read; /* 0 or DMA_SHARED_MEM_CLEAR_AFTER_READ */
  90. __le16 size;
  91. __le16 local_addr;
  92. __le16 flags;
  93. __le64 system_addr;
  94. };
  95. /* Flag allowing atomic clear of target region after reading data (v4.0+)*/
  96. #define DMA_SHARED_MEM_CLEAR_AFTER_READ GENMASK(15, 15)
  97. /* Field masks for ipa_cmd_hw_dma_mem_mem structure fields */
  98. #define DMA_SHARED_MEM_FLAGS_DIRECTION_FMASK GENMASK(0, 0)
  99. /* The next two fields are not present for IPA v4.0+ */
  100. #define DMA_SHARED_MEM_FLAGS_SKIP_CLEAR_FMASK GENMASK(1, 1)
  101. #define DMA_SHARED_MEM_FLAGS_CLEAR_OPTIONS_FMASK GENMASK(3, 2)
  102. /* IPA_CMD_IP_PACKET_TAG_STATUS */
  103. struct ipa_cmd_ip_packet_tag_status {
  104. __le64 tag;
  105. };
  106. #define IP_PACKET_TAG_STATUS_TAG_FMASK GENMASK_ULL(63, 16)
  107. /* Immediate command payload */
  108. union ipa_cmd_payload {
  109. struct ipa_cmd_hw_ip_fltrt_init table_init;
  110. struct ipa_cmd_hw_hdr_init_local hdr_init_local;
  111. struct ipa_cmd_register_write register_write;
  112. struct ipa_cmd_ip_packet_init ip_packet_init;
  113. struct ipa_cmd_hw_dma_mem_mem dma_shared_mem;
  114. struct ipa_cmd_ip_packet_tag_status ip_packet_tag_status;
  115. };
  116. static void ipa_cmd_validate_build(void)
  117. {
  118. /* The sizes of a filter and route tables need to fit into fields
  119. * in the ipa_cmd_hw_ip_fltrt_init structure. Although hashed tables
  120. * might not be used, non-hashed and hashed tables have the same
  121. * maximum size. IPv4 and IPv6 filter tables have the same number
  122. * of entries, as and IPv4 and IPv6 route tables have the same number
  123. * of entries.
  124. */
  125. #define TABLE_SIZE (TABLE_COUNT_MAX * sizeof(__le64))
  126. #define TABLE_COUNT_MAX max_t(u32, IPA_ROUTE_COUNT_MAX, IPA_FILTER_COUNT_MAX)
  127. BUILD_BUG_ON(TABLE_SIZE > field_max(IP_FLTRT_FLAGS_HASH_SIZE_FMASK));
  128. BUILD_BUG_ON(TABLE_SIZE > field_max(IP_FLTRT_FLAGS_NHASH_SIZE_FMASK));
  129. #undef TABLE_COUNT_MAX
  130. #undef TABLE_SIZE
  131. /* Hashed and non-hashed fields are assumed to be the same size */
  132. BUILD_BUG_ON(field_max(IP_FLTRT_FLAGS_HASH_SIZE_FMASK) !=
  133. field_max(IP_FLTRT_FLAGS_NHASH_SIZE_FMASK));
  134. BUILD_BUG_ON(field_max(IP_FLTRT_FLAGS_HASH_ADDR_FMASK) !=
  135. field_max(IP_FLTRT_FLAGS_NHASH_ADDR_FMASK));
  136. /* Valid endpoint numbers must fit in the IP packet init command */
  137. BUILD_BUG_ON(field_max(IPA_PACKET_INIT_DEST_ENDPOINT_FMASK) <
  138. IPA_ENDPOINT_MAX - 1);
  139. }
  140. /* Validate a memory region holding a table */
  141. bool ipa_cmd_table_valid(struct ipa *ipa, const struct ipa_mem *mem, bool route)
  142. {
  143. u32 offset_max = field_max(IP_FLTRT_FLAGS_NHASH_ADDR_FMASK);
  144. u32 size_max = field_max(IP_FLTRT_FLAGS_NHASH_SIZE_FMASK);
  145. const char *table = route ? "route" : "filter";
  146. struct device *dev = &ipa->pdev->dev;
  147. /* Size must fit in the immediate command field that holds it */
  148. if (mem->size > size_max) {
  149. dev_err(dev, "%s table region size too large\n", table);
  150. dev_err(dev, " (0x%04x > 0x%04x)\n",
  151. mem->size, size_max);
  152. return false;
  153. }
  154. /* Offset must fit in the immediate command field that holds it */
  155. if (mem->offset > offset_max ||
  156. ipa->mem_offset > offset_max - mem->offset) {
  157. dev_err(dev, "%s table region offset too large\n", table);
  158. dev_err(dev, " (0x%04x + 0x%04x > 0x%04x)\n",
  159. ipa->mem_offset, mem->offset, offset_max);
  160. return false;
  161. }
  162. /* Entire memory range must fit within IPA-local memory */
  163. if (mem->offset > ipa->mem_size ||
  164. mem->size > ipa->mem_size - mem->offset) {
  165. dev_err(dev, "%s table region out of range\n", table);
  166. dev_err(dev, " (0x%04x + 0x%04x > 0x%04x)\n",
  167. mem->offset, mem->size, ipa->mem_size);
  168. return false;
  169. }
  170. return true;
  171. }
  172. /* Validate the memory region that holds headers */
  173. static bool ipa_cmd_header_valid(struct ipa *ipa)
  174. {
  175. struct device *dev = &ipa->pdev->dev;
  176. const struct ipa_mem *mem;
  177. u32 offset_max;
  178. u32 size_max;
  179. u32 offset;
  180. u32 size;
  181. /* In ipa_cmd_hdr_init_local_add() we record the offset and size of
  182. * the header table memory area in an immediate command. Make sure
  183. * the offset and size fit in the fields that need to hold them, and
  184. * that the entire range is within the overall IPA memory range.
  185. */
  186. offset_max = field_max(HDR_INIT_LOCAL_FLAGS_HDR_ADDR_FMASK);
  187. size_max = field_max(HDR_INIT_LOCAL_FLAGS_TABLE_SIZE_FMASK);
  188. /* The header memory area contains both the modem and AP header
  189. * regions. The modem portion defines the address of the region.
  190. */
  191. mem = ipa_mem_find(ipa, IPA_MEM_MODEM_HEADER);
  192. offset = mem->offset;
  193. size = mem->size;
  194. /* Make sure the offset fits in the IPA command */
  195. if (offset > offset_max || ipa->mem_offset > offset_max - offset) {
  196. dev_err(dev, "header table region offset too large\n");
  197. dev_err(dev, " (0x%04x + 0x%04x > 0x%04x)\n",
  198. ipa->mem_offset, offset, offset_max);
  199. return false;
  200. }
  201. /* Add the size of the AP portion (if defined) to the combined size */
  202. mem = ipa_mem_find(ipa, IPA_MEM_AP_HEADER);
  203. if (mem)
  204. size += mem->size;
  205. /* Make sure the combined size fits in the IPA command */
  206. if (size > size_max) {
  207. dev_err(dev, "header table region size too large\n");
  208. dev_err(dev, " (0x%04x > 0x%08x)\n", size, size_max);
  209. return false;
  210. }
  211. /* Make sure the entire combined area fits in IPA memory */
  212. if (size > ipa->mem_size || offset > ipa->mem_size - size) {
  213. dev_err(dev, "header table region out of range\n");
  214. dev_err(dev, " (0x%04x + 0x%04x > 0x%04x)\n",
  215. offset, size, ipa->mem_size);
  216. return false;
  217. }
  218. return true;
  219. }
  220. /* Indicate whether an offset can be used with a register_write command */
  221. static bool ipa_cmd_register_write_offset_valid(struct ipa *ipa,
  222. const char *name, u32 offset)
  223. {
  224. struct ipa_cmd_register_write *payload;
  225. struct device *dev = &ipa->pdev->dev;
  226. u32 offset_max;
  227. u32 bit_count;
  228. /* The maximum offset in a register_write immediate command depends
  229. * on the version of IPA. A 16 bit offset is always supported,
  230. * but starting with IPA v4.0 some additional high-order bits are
  231. * allowed.
  232. */
  233. bit_count = BITS_PER_BYTE * sizeof(payload->offset);
  234. if (ipa->version >= IPA_VERSION_4_0)
  235. bit_count += hweight32(REGISTER_WRITE_FLAGS_OFFSET_HIGH_FMASK);
  236. BUILD_BUG_ON(bit_count > 32);
  237. offset_max = ~0U >> (32 - bit_count);
  238. /* Make sure the offset can be represented by the field(s)
  239. * that holds it. Also make sure the offset is not outside
  240. * the overall IPA memory range.
  241. */
  242. if (offset > offset_max || ipa->mem_offset > offset_max - offset) {
  243. dev_err(dev, "%s offset too large 0x%04x + 0x%04x > 0x%04x)\n",
  244. name, ipa->mem_offset, offset, offset_max);
  245. return false;
  246. }
  247. return true;
  248. }
  249. /* Check whether offsets passed to register_write are valid */
  250. static bool ipa_cmd_register_write_valid(struct ipa *ipa)
  251. {
  252. const struct ipa_reg *reg;
  253. const char *name;
  254. u32 offset;
  255. /* If hashed tables are supported, ensure the hash flush register
  256. * offset will fit in a register write IPA immediate command.
  257. */
  258. if (ipa_table_hash_support(ipa)) {
  259. reg = ipa_reg(ipa, FILT_ROUT_HASH_FLUSH);
  260. offset = ipa_reg_offset(reg);
  261. name = "filter/route hash flush";
  262. if (!ipa_cmd_register_write_offset_valid(ipa, name, offset))
  263. return false;
  264. }
  265. /* Each endpoint can have a status endpoint associated with it,
  266. * and this is recorded in an endpoint register. If the modem
  267. * crashes, we reset the status endpoint for all modem endpoints
  268. * using a register write IPA immediate command. Make sure the
  269. * worst case (highest endpoint number) offset of that endpoint
  270. * fits in the register write command field(s) that must hold it.
  271. */
  272. reg = ipa_reg(ipa, ENDP_STATUS);
  273. offset = ipa_reg_n_offset(reg, IPA_ENDPOINT_COUNT - 1);
  274. name = "maximal endpoint status";
  275. if (!ipa_cmd_register_write_offset_valid(ipa, name, offset))
  276. return false;
  277. return true;
  278. }
  279. bool ipa_cmd_data_valid(struct ipa *ipa)
  280. {
  281. if (!ipa_cmd_header_valid(ipa))
  282. return false;
  283. if (!ipa_cmd_register_write_valid(ipa))
  284. return false;
  285. return true;
  286. }
  287. int ipa_cmd_pool_init(struct gsi_channel *channel, u32 tre_max)
  288. {
  289. struct gsi_trans_info *trans_info = &channel->trans_info;
  290. struct device *dev = channel->gsi->dev;
  291. /* This is as good a place as any to validate build constants */
  292. ipa_cmd_validate_build();
  293. /* Command payloads are allocated one at a time, but a single
  294. * transaction can require up to the maximum supported by the
  295. * channel; treat them as if they were allocated all at once.
  296. */
  297. return gsi_trans_pool_init_dma(dev, &trans_info->cmd_pool,
  298. sizeof(union ipa_cmd_payload),
  299. tre_max, channel->trans_tre_max);
  300. }
  301. void ipa_cmd_pool_exit(struct gsi_channel *channel)
  302. {
  303. struct gsi_trans_info *trans_info = &channel->trans_info;
  304. struct device *dev = channel->gsi->dev;
  305. gsi_trans_pool_exit_dma(dev, &trans_info->cmd_pool);
  306. }
  307. static union ipa_cmd_payload *
  308. ipa_cmd_payload_alloc(struct ipa *ipa, dma_addr_t *addr)
  309. {
  310. struct gsi_trans_info *trans_info;
  311. struct ipa_endpoint *endpoint;
  312. endpoint = ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX];
  313. trans_info = &ipa->gsi.channel[endpoint->channel_id].trans_info;
  314. return gsi_trans_pool_alloc_dma(&trans_info->cmd_pool, addr);
  315. }
  316. /* If hash_size is 0, hash_offset and hash_addr ignored. */
  317. void ipa_cmd_table_init_add(struct gsi_trans *trans,
  318. enum ipa_cmd_opcode opcode, u16 size, u32 offset,
  319. dma_addr_t addr, u16 hash_size, u32 hash_offset,
  320. dma_addr_t hash_addr)
  321. {
  322. struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi);
  323. struct ipa_cmd_hw_ip_fltrt_init *payload;
  324. union ipa_cmd_payload *cmd_payload;
  325. dma_addr_t payload_addr;
  326. u64 val;
  327. /* Record the non-hash table offset and size */
  328. offset += ipa->mem_offset;
  329. val = u64_encode_bits(offset, IP_FLTRT_FLAGS_NHASH_ADDR_FMASK);
  330. val |= u64_encode_bits(size, IP_FLTRT_FLAGS_NHASH_SIZE_FMASK);
  331. /* The hash table offset and address are zero if its size is 0 */
  332. if (hash_size) {
  333. /* Record the hash table offset and size */
  334. hash_offset += ipa->mem_offset;
  335. val |= u64_encode_bits(hash_offset,
  336. IP_FLTRT_FLAGS_HASH_ADDR_FMASK);
  337. val |= u64_encode_bits(hash_size,
  338. IP_FLTRT_FLAGS_HASH_SIZE_FMASK);
  339. }
  340. cmd_payload = ipa_cmd_payload_alloc(ipa, &payload_addr);
  341. payload = &cmd_payload->table_init;
  342. /* Fill in all offsets and sizes and the non-hash table address */
  343. if (hash_size)
  344. payload->hash_rules_addr = cpu_to_le64(hash_addr);
  345. payload->flags = cpu_to_le64(val);
  346. payload->nhash_rules_addr = cpu_to_le64(addr);
  347. gsi_trans_cmd_add(trans, payload, sizeof(*payload), payload_addr,
  348. opcode);
  349. }
  350. /* Initialize header space in IPA-local memory */
  351. void ipa_cmd_hdr_init_local_add(struct gsi_trans *trans, u32 offset, u16 size,
  352. dma_addr_t addr)
  353. {
  354. struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi);
  355. enum ipa_cmd_opcode opcode = IPA_CMD_HDR_INIT_LOCAL;
  356. struct ipa_cmd_hw_hdr_init_local *payload;
  357. union ipa_cmd_payload *cmd_payload;
  358. dma_addr_t payload_addr;
  359. u32 flags;
  360. offset += ipa->mem_offset;
  361. /* With this command we tell the IPA where in its local memory the
  362. * header tables reside. The content of the buffer provided is
  363. * also written via DMA into that space. The IPA hardware owns
  364. * the table, but the AP must initialize it.
  365. */
  366. cmd_payload = ipa_cmd_payload_alloc(ipa, &payload_addr);
  367. payload = &cmd_payload->hdr_init_local;
  368. payload->hdr_table_addr = cpu_to_le64(addr);
  369. flags = u32_encode_bits(size, HDR_INIT_LOCAL_FLAGS_TABLE_SIZE_FMASK);
  370. flags |= u32_encode_bits(offset, HDR_INIT_LOCAL_FLAGS_HDR_ADDR_FMASK);
  371. payload->flags = cpu_to_le32(flags);
  372. gsi_trans_cmd_add(trans, payload, sizeof(*payload), payload_addr,
  373. opcode);
  374. }
  375. void ipa_cmd_register_write_add(struct gsi_trans *trans, u32 offset, u32 value,
  376. u32 mask, bool clear_full)
  377. {
  378. struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi);
  379. struct ipa_cmd_register_write *payload;
  380. union ipa_cmd_payload *cmd_payload;
  381. u32 opcode = IPA_CMD_REGISTER_WRITE;
  382. dma_addr_t payload_addr;
  383. u32 clear_option;
  384. u32 options;
  385. u16 flags;
  386. /* pipeline_clear_src_grp is not used */
  387. clear_option = clear_full ? pipeline_clear_full : pipeline_clear_hps;
  388. /* IPA v4.0+ represents the pipeline clear options in the opcode. It
  389. * also supports a larger offset by encoding additional high-order
  390. * bits in the payload flags field.
  391. */
  392. if (ipa->version >= IPA_VERSION_4_0) {
  393. u16 offset_high;
  394. u32 val;
  395. /* Opcode encodes pipeline clear options */
  396. /* SKIP_CLEAR is always 0 (don't skip pipeline clear) */
  397. val = u16_encode_bits(clear_option,
  398. REGISTER_WRITE_OPCODE_CLEAR_OPTION_FMASK);
  399. opcode |= val;
  400. /* Extract the high 4 bits from the offset */
  401. offset_high = (u16)u32_get_bits(offset, GENMASK(19, 16));
  402. offset &= (1 << 16) - 1;
  403. /* Extract the top 4 bits and encode it into the flags field */
  404. flags = u16_encode_bits(offset_high,
  405. REGISTER_WRITE_FLAGS_OFFSET_HIGH_FMASK);
  406. options = 0; /* reserved */
  407. } else {
  408. flags = 0; /* SKIP_CLEAR flag is always 0 */
  409. options = u16_encode_bits(clear_option,
  410. REGISTER_WRITE_CLEAR_OPTIONS_FMASK);
  411. }
  412. cmd_payload = ipa_cmd_payload_alloc(ipa, &payload_addr);
  413. payload = &cmd_payload->register_write;
  414. payload->flags = cpu_to_le16(flags);
  415. payload->offset = cpu_to_le16((u16)offset);
  416. payload->value = cpu_to_le32(value);
  417. payload->value_mask = cpu_to_le32(mask);
  418. payload->clear_options = cpu_to_le32(options);
  419. gsi_trans_cmd_add(trans, payload, sizeof(*payload), payload_addr,
  420. opcode);
  421. }
  422. /* Skip IP packet processing on the next data transfer on a TX channel */
  423. static void ipa_cmd_ip_packet_init_add(struct gsi_trans *trans, u8 endpoint_id)
  424. {
  425. struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi);
  426. enum ipa_cmd_opcode opcode = IPA_CMD_IP_PACKET_INIT;
  427. struct ipa_cmd_ip_packet_init *payload;
  428. union ipa_cmd_payload *cmd_payload;
  429. dma_addr_t payload_addr;
  430. cmd_payload = ipa_cmd_payload_alloc(ipa, &payload_addr);
  431. payload = &cmd_payload->ip_packet_init;
  432. payload->dest_endpoint = u8_encode_bits(endpoint_id,
  433. IPA_PACKET_INIT_DEST_ENDPOINT_FMASK);
  434. gsi_trans_cmd_add(trans, payload, sizeof(*payload), payload_addr,
  435. opcode);
  436. }
  437. /* Use a DMA command to read or write a block of IPA-resident memory */
  438. void ipa_cmd_dma_shared_mem_add(struct gsi_trans *trans, u32 offset, u16 size,
  439. dma_addr_t addr, bool toward_ipa)
  440. {
  441. struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi);
  442. enum ipa_cmd_opcode opcode = IPA_CMD_DMA_SHARED_MEM;
  443. struct ipa_cmd_hw_dma_mem_mem *payload;
  444. union ipa_cmd_payload *cmd_payload;
  445. dma_addr_t payload_addr;
  446. u16 flags;
  447. /* size and offset must fit in 16 bit fields */
  448. WARN_ON(!size);
  449. WARN_ON(size > U16_MAX);
  450. WARN_ON(offset > U16_MAX || ipa->mem_offset > U16_MAX - offset);
  451. offset += ipa->mem_offset;
  452. cmd_payload = ipa_cmd_payload_alloc(ipa, &payload_addr);
  453. payload = &cmd_payload->dma_shared_mem;
  454. /* payload->clear_after_read was reserved prior to IPA v4.0. It's
  455. * never needed for current code, so it's 0 regardless of version.
  456. */
  457. payload->size = cpu_to_le16(size);
  458. payload->local_addr = cpu_to_le16(offset);
  459. /* payload->flags:
  460. * direction: 0 = write to IPA, 1 read from IPA
  461. * Starting at v4.0 these are reserved; either way, all zero:
  462. * pipeline clear: 0 = wait for pipeline clear (don't skip)
  463. * clear_options: 0 = pipeline_clear_hps
  464. * Instead, for v4.0+ these are encoded in the opcode. But again
  465. * since both values are 0 we won't bother OR'ing them in.
  466. */
  467. flags = toward_ipa ? 0 : DMA_SHARED_MEM_FLAGS_DIRECTION_FMASK;
  468. payload->flags = cpu_to_le16(flags);
  469. payload->system_addr = cpu_to_le64(addr);
  470. gsi_trans_cmd_add(trans, payload, sizeof(*payload), payload_addr,
  471. opcode);
  472. }
  473. static void ipa_cmd_ip_tag_status_add(struct gsi_trans *trans)
  474. {
  475. struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi);
  476. enum ipa_cmd_opcode opcode = IPA_CMD_IP_PACKET_TAG_STATUS;
  477. struct ipa_cmd_ip_packet_tag_status *payload;
  478. union ipa_cmd_payload *cmd_payload;
  479. dma_addr_t payload_addr;
  480. cmd_payload = ipa_cmd_payload_alloc(ipa, &payload_addr);
  481. payload = &cmd_payload->ip_packet_tag_status;
  482. payload->tag = le64_encode_bits(0, IP_PACKET_TAG_STATUS_TAG_FMASK);
  483. gsi_trans_cmd_add(trans, payload, sizeof(*payload), payload_addr,
  484. opcode);
  485. }
  486. /* Issue a small command TX data transfer */
  487. static void ipa_cmd_transfer_add(struct gsi_trans *trans)
  488. {
  489. struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi);
  490. enum ipa_cmd_opcode opcode = IPA_CMD_NONE;
  491. union ipa_cmd_payload *payload;
  492. dma_addr_t payload_addr;
  493. /* Just transfer a zero-filled payload structure */
  494. payload = ipa_cmd_payload_alloc(ipa, &payload_addr);
  495. gsi_trans_cmd_add(trans, payload, sizeof(*payload), payload_addr,
  496. opcode);
  497. }
  498. /* Add immediate commands to a transaction to clear the hardware pipeline */
  499. void ipa_cmd_pipeline_clear_add(struct gsi_trans *trans)
  500. {
  501. struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi);
  502. struct ipa_endpoint *endpoint;
  503. /* This will complete when the transfer is received */
  504. reinit_completion(&ipa->completion);
  505. /* Issue a no-op register write command (mask 0 means no write) */
  506. ipa_cmd_register_write_add(trans, 0, 0, 0, true);
  507. /* Send a data packet through the IPA pipeline. The packet_init
  508. * command says to send the next packet directly to the exception
  509. * endpoint without any other IPA processing. The tag_status
  510. * command requests that status be generated on completion of
  511. * that transfer, and that it will be tagged with a value.
  512. * Finally, the transfer command sends a small packet of data
  513. * (instead of a command) using the command endpoint.
  514. */
  515. endpoint = ipa->name_map[IPA_ENDPOINT_AP_LAN_RX];
  516. ipa_cmd_ip_packet_init_add(trans, endpoint->endpoint_id);
  517. ipa_cmd_ip_tag_status_add(trans);
  518. ipa_cmd_transfer_add(trans);
  519. }
  520. /* Returns the number of commands required to clear the pipeline */
  521. u32 ipa_cmd_pipeline_clear_count(void)
  522. {
  523. return 4;
  524. }
  525. void ipa_cmd_pipeline_clear_wait(struct ipa *ipa)
  526. {
  527. wait_for_completion(&ipa->completion);
  528. }
  529. /* Allocate a transaction for the command TX endpoint */
  530. struct gsi_trans *ipa_cmd_trans_alloc(struct ipa *ipa, u32 tre_count)
  531. {
  532. struct ipa_endpoint *endpoint;
  533. if (WARN_ON(tre_count > IPA_COMMAND_TRANS_TRE_MAX))
  534. return NULL;
  535. endpoint = ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX];
  536. return gsi_channel_trans_alloc(&ipa->gsi, endpoint->channel_id,
  537. tre_count, DMA_NONE);
  538. }