ipa_endpoint.c 56 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000
  1. // SPDX-License-Identifier: GPL-2.0
  2. /* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
  3. * Copyright (C) 2019-2022 Linaro Ltd.
  4. */
  5. #include <linux/types.h>
  6. #include <linux/device.h>
  7. #include <linux/slab.h>
  8. #include <linux/bitfield.h>
  9. #include <linux/if_rmnet.h>
  10. #include <linux/dma-direction.h>
  11. #include "gsi.h"
  12. #include "gsi_trans.h"
  13. #include "ipa.h"
  14. #include "ipa_data.h"
  15. #include "ipa_endpoint.h"
  16. #include "ipa_cmd.h"
  17. #include "ipa_mem.h"
  18. #include "ipa_modem.h"
  19. #include "ipa_table.h"
  20. #include "ipa_gsi.h"
  21. #include "ipa_power.h"
  22. /* Hardware is told about receive buffers once a "batch" has been queued */
  23. #define IPA_REPLENISH_BATCH 16 /* Must be non-zero */
  24. /* The amount of RX buffer space consumed by standard skb overhead */
  25. #define IPA_RX_BUFFER_OVERHEAD (PAGE_SIZE - SKB_MAX_ORDER(NET_SKB_PAD, 0))
  26. /* Where to find the QMAP mux_id for a packet within modem-supplied metadata */
  27. #define IPA_ENDPOINT_QMAP_METADATA_MASK 0x000000ff /* host byte order */
  28. #define IPA_ENDPOINT_RESET_AGGR_RETRY_MAX 3
  29. /** enum ipa_status_opcode - status element opcode hardware values */
  30. enum ipa_status_opcode {
  31. IPA_STATUS_OPCODE_PACKET = 0x01,
  32. IPA_STATUS_OPCODE_DROPPED_PACKET = 0x04,
  33. IPA_STATUS_OPCODE_SUSPENDED_PACKET = 0x08,
  34. IPA_STATUS_OPCODE_PACKET_2ND_PASS = 0x40,
  35. };
  36. /** enum ipa_status_exception - status element exception type */
  37. enum ipa_status_exception {
  38. /* 0 means no exception */
  39. IPA_STATUS_EXCEPTION_DEAGGR = 0x01,
  40. };
  41. /* Status element provided by hardware */
  42. struct ipa_status {
  43. u8 opcode; /* enum ipa_status_opcode */
  44. u8 exception; /* enum ipa_status_exception */
  45. __le16 mask;
  46. __le16 pkt_len;
  47. u8 endp_src_idx;
  48. u8 endp_dst_idx;
  49. __le32 metadata;
  50. __le32 flags1;
  51. __le64 flags2;
  52. __le32 flags3;
  53. __le32 flags4;
  54. };
  55. /* Field masks for struct ipa_status structure fields */
  56. #define IPA_STATUS_MASK_TAG_VALID_FMASK GENMASK(4, 4)
  57. #define IPA_STATUS_SRC_IDX_FMASK GENMASK(4, 0)
  58. #define IPA_STATUS_DST_IDX_FMASK GENMASK(4, 0)
  59. #define IPA_STATUS_FLAGS1_RT_RULE_ID_FMASK GENMASK(31, 22)
  60. #define IPA_STATUS_FLAGS2_TAG_FMASK GENMASK_ULL(63, 16)
  61. /* Compute the aggregation size value to use for a given buffer size */
  62. static u32 ipa_aggr_size_kb(u32 rx_buffer_size, bool aggr_hard_limit)
  63. {
  64. /* A hard aggregation limit will not be crossed; aggregation closes
  65. * if saving incoming data would cross the hard byte limit boundary.
  66. *
  67. * With a soft limit, aggregation closes *after* the size boundary
  68. * has been crossed. In that case the limit must leave enough space
  69. * after that limit to receive a full MTU of data plus overhead.
  70. */
  71. if (!aggr_hard_limit)
  72. rx_buffer_size -= IPA_MTU + IPA_RX_BUFFER_OVERHEAD;
  73. /* The byte limit is encoded as a number of kilobytes */
  74. return rx_buffer_size / SZ_1K;
  75. }
  76. static bool ipa_endpoint_data_valid_one(struct ipa *ipa, u32 count,
  77. const struct ipa_gsi_endpoint_data *all_data,
  78. const struct ipa_gsi_endpoint_data *data)
  79. {
  80. const struct ipa_gsi_endpoint_data *other_data;
  81. struct device *dev = &ipa->pdev->dev;
  82. enum ipa_endpoint_name other_name;
  83. if (ipa_gsi_endpoint_data_empty(data))
  84. return true;
  85. if (!data->toward_ipa) {
  86. const struct ipa_endpoint_rx *rx_config;
  87. const struct ipa_reg *reg;
  88. u32 buffer_size;
  89. u32 aggr_size;
  90. u32 limit;
  91. if (data->endpoint.filter_support) {
  92. dev_err(dev, "filtering not supported for "
  93. "RX endpoint %u\n",
  94. data->endpoint_id);
  95. return false;
  96. }
  97. /* Nothing more to check for non-AP RX */
  98. if (data->ee_id != GSI_EE_AP)
  99. return true;
  100. rx_config = &data->endpoint.config.rx;
  101. /* The buffer size must hold an MTU plus overhead */
  102. buffer_size = rx_config->buffer_size;
  103. limit = IPA_MTU + IPA_RX_BUFFER_OVERHEAD;
  104. if (buffer_size < limit) {
  105. dev_err(dev, "RX buffer size too small for RX endpoint %u (%u < %u)\n",
  106. data->endpoint_id, buffer_size, limit);
  107. return false;
  108. }
  109. if (!data->endpoint.config.aggregation) {
  110. bool result = true;
  111. /* No aggregation; check for bogus aggregation data */
  112. if (rx_config->aggr_time_limit) {
  113. dev_err(dev,
  114. "time limit with no aggregation for RX endpoint %u\n",
  115. data->endpoint_id);
  116. result = false;
  117. }
  118. if (rx_config->aggr_hard_limit) {
  119. dev_err(dev, "hard limit with no aggregation for RX endpoint %u\n",
  120. data->endpoint_id);
  121. result = false;
  122. }
  123. if (rx_config->aggr_close_eof) {
  124. dev_err(dev, "close EOF with no aggregation for RX endpoint %u\n",
  125. data->endpoint_id);
  126. result = false;
  127. }
  128. return result; /* Nothing more to check */
  129. }
  130. /* For an endpoint supporting receive aggregation, the byte
  131. * limit defines the point at which aggregation closes. This
  132. * check ensures the receive buffer size doesn't result in a
  133. * limit that exceeds what's representable in the aggregation
  134. * byte limit field.
  135. */
  136. aggr_size = ipa_aggr_size_kb(buffer_size - NET_SKB_PAD,
  137. rx_config->aggr_hard_limit);
  138. reg = ipa_reg(ipa, ENDP_INIT_AGGR);
  139. limit = ipa_reg_field_max(reg, BYTE_LIMIT);
  140. if (aggr_size > limit) {
  141. dev_err(dev, "aggregated size too large for RX endpoint %u (%u KB > %u KB)\n",
  142. data->endpoint_id, aggr_size, limit);
  143. return false;
  144. }
  145. return true; /* Nothing more to check for RX */
  146. }
  147. /* Starting with IPA v4.5 sequencer replication is obsolete */
  148. if (ipa->version >= IPA_VERSION_4_5) {
  149. if (data->endpoint.config.tx.seq_rep_type) {
  150. dev_err(dev, "no-zero seq_rep_type TX endpoint %u\n",
  151. data->endpoint_id);
  152. return false;
  153. }
  154. }
  155. if (data->endpoint.config.status_enable) {
  156. other_name = data->endpoint.config.tx.status_endpoint;
  157. if (other_name >= count) {
  158. dev_err(dev, "status endpoint name %u out of range "
  159. "for endpoint %u\n",
  160. other_name, data->endpoint_id);
  161. return false;
  162. }
  163. /* Status endpoint must be defined... */
  164. other_data = &all_data[other_name];
  165. if (ipa_gsi_endpoint_data_empty(other_data)) {
  166. dev_err(dev, "DMA endpoint name %u undefined "
  167. "for endpoint %u\n",
  168. other_name, data->endpoint_id);
  169. return false;
  170. }
  171. /* ...and has to be an RX endpoint... */
  172. if (other_data->toward_ipa) {
  173. dev_err(dev,
  174. "status endpoint for endpoint %u not RX\n",
  175. data->endpoint_id);
  176. return false;
  177. }
  178. /* ...and if it's to be an AP endpoint... */
  179. if (other_data->ee_id == GSI_EE_AP) {
  180. /* ...make sure it has status enabled. */
  181. if (!other_data->endpoint.config.status_enable) {
  182. dev_err(dev,
  183. "status not enabled for endpoint %u\n",
  184. other_data->endpoint_id);
  185. return false;
  186. }
  187. }
  188. }
  189. if (data->endpoint.config.dma_mode) {
  190. other_name = data->endpoint.config.dma_endpoint;
  191. if (other_name >= count) {
  192. dev_err(dev, "DMA endpoint name %u out of range "
  193. "for endpoint %u\n",
  194. other_name, data->endpoint_id);
  195. return false;
  196. }
  197. other_data = &all_data[other_name];
  198. if (ipa_gsi_endpoint_data_empty(other_data)) {
  199. dev_err(dev, "DMA endpoint name %u undefined "
  200. "for endpoint %u\n",
  201. other_name, data->endpoint_id);
  202. return false;
  203. }
  204. }
  205. return true;
  206. }
  207. static bool ipa_endpoint_data_valid(struct ipa *ipa, u32 count,
  208. const struct ipa_gsi_endpoint_data *data)
  209. {
  210. const struct ipa_gsi_endpoint_data *dp = data;
  211. struct device *dev = &ipa->pdev->dev;
  212. enum ipa_endpoint_name name;
  213. if (count > IPA_ENDPOINT_COUNT) {
  214. dev_err(dev, "too many endpoints specified (%u > %u)\n",
  215. count, IPA_ENDPOINT_COUNT);
  216. return false;
  217. }
  218. /* Make sure needed endpoints have defined data */
  219. if (ipa_gsi_endpoint_data_empty(&data[IPA_ENDPOINT_AP_COMMAND_TX])) {
  220. dev_err(dev, "command TX endpoint not defined\n");
  221. return false;
  222. }
  223. if (ipa_gsi_endpoint_data_empty(&data[IPA_ENDPOINT_AP_LAN_RX])) {
  224. dev_err(dev, "LAN RX endpoint not defined\n");
  225. return false;
  226. }
  227. if (ipa_gsi_endpoint_data_empty(&data[IPA_ENDPOINT_AP_MODEM_TX])) {
  228. dev_err(dev, "AP->modem TX endpoint not defined\n");
  229. return false;
  230. }
  231. if (ipa_gsi_endpoint_data_empty(&data[IPA_ENDPOINT_AP_MODEM_RX])) {
  232. dev_err(dev, "AP<-modem RX endpoint not defined\n");
  233. return false;
  234. }
  235. for (name = 0; name < count; name++, dp++)
  236. if (!ipa_endpoint_data_valid_one(ipa, count, data, dp))
  237. return false;
  238. return true;
  239. }
  240. /* Allocate a transaction to use on a non-command endpoint */
  241. static struct gsi_trans *ipa_endpoint_trans_alloc(struct ipa_endpoint *endpoint,
  242. u32 tre_count)
  243. {
  244. struct gsi *gsi = &endpoint->ipa->gsi;
  245. u32 channel_id = endpoint->channel_id;
  246. enum dma_data_direction direction;
  247. direction = endpoint->toward_ipa ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
  248. return gsi_channel_trans_alloc(gsi, channel_id, tre_count, direction);
  249. }
  250. /* suspend_delay represents suspend for RX, delay for TX endpoints.
  251. * Note that suspend is not supported starting with IPA v4.0, and
  252. * delay mode should not be used starting with IPA v4.2.
  253. */
  254. static bool
  255. ipa_endpoint_init_ctrl(struct ipa_endpoint *endpoint, bool suspend_delay)
  256. {
  257. struct ipa *ipa = endpoint->ipa;
  258. const struct ipa_reg *reg;
  259. u32 field_id;
  260. u32 offset;
  261. bool state;
  262. u32 mask;
  263. u32 val;
  264. if (endpoint->toward_ipa)
  265. WARN_ON(ipa->version >= IPA_VERSION_4_2);
  266. else
  267. WARN_ON(ipa->version >= IPA_VERSION_4_0);
  268. reg = ipa_reg(ipa, ENDP_INIT_CTRL);
  269. offset = ipa_reg_n_offset(reg, endpoint->endpoint_id);
  270. val = ioread32(ipa->reg_virt + offset);
  271. field_id = endpoint->toward_ipa ? ENDP_DELAY : ENDP_SUSPEND;
  272. mask = ipa_reg_bit(reg, field_id);
  273. state = !!(val & mask);
  274. /* Don't bother if it's already in the requested state */
  275. if (suspend_delay != state) {
  276. val ^= mask;
  277. iowrite32(val, ipa->reg_virt + offset);
  278. }
  279. return state;
  280. }
  281. /* We don't care what the previous state was for delay mode */
  282. static void
  283. ipa_endpoint_program_delay(struct ipa_endpoint *endpoint, bool enable)
  284. {
  285. /* Delay mode should not be used for IPA v4.2+ */
  286. WARN_ON(endpoint->ipa->version >= IPA_VERSION_4_2);
  287. WARN_ON(!endpoint->toward_ipa);
  288. (void)ipa_endpoint_init_ctrl(endpoint, enable);
  289. }
  290. static bool ipa_endpoint_aggr_active(struct ipa_endpoint *endpoint)
  291. {
  292. u32 mask = BIT(endpoint->endpoint_id);
  293. struct ipa *ipa = endpoint->ipa;
  294. const struct ipa_reg *reg;
  295. u32 val;
  296. WARN_ON(!(mask & ipa->available));
  297. reg = ipa_reg(ipa, STATE_AGGR_ACTIVE);
  298. val = ioread32(ipa->reg_virt + ipa_reg_offset(reg));
  299. return !!(val & mask);
  300. }
  301. static void ipa_endpoint_force_close(struct ipa_endpoint *endpoint)
  302. {
  303. u32 mask = BIT(endpoint->endpoint_id);
  304. struct ipa *ipa = endpoint->ipa;
  305. const struct ipa_reg *reg;
  306. WARN_ON(!(mask & ipa->available));
  307. reg = ipa_reg(ipa, AGGR_FORCE_CLOSE);
  308. iowrite32(mask, ipa->reg_virt + ipa_reg_offset(reg));
  309. }
  310. /**
  311. * ipa_endpoint_suspend_aggr() - Emulate suspend interrupt
  312. * @endpoint: Endpoint on which to emulate a suspend
  313. *
  314. * Emulate suspend IPA interrupt to unsuspend an endpoint suspended
  315. * with an open aggregation frame. This is to work around a hardware
  316. * issue in IPA version 3.5.1 where the suspend interrupt will not be
  317. * generated when it should be.
  318. */
  319. static void ipa_endpoint_suspend_aggr(struct ipa_endpoint *endpoint)
  320. {
  321. struct ipa *ipa = endpoint->ipa;
  322. if (!endpoint->config.aggregation)
  323. return;
  324. /* Nothing to do if the endpoint doesn't have aggregation open */
  325. if (!ipa_endpoint_aggr_active(endpoint))
  326. return;
  327. /* Force close aggregation */
  328. ipa_endpoint_force_close(endpoint);
  329. ipa_interrupt_simulate_suspend(ipa->interrupt);
  330. }
  331. /* Returns previous suspend state (true means suspend was enabled) */
  332. static bool
  333. ipa_endpoint_program_suspend(struct ipa_endpoint *endpoint, bool enable)
  334. {
  335. bool suspended;
  336. if (endpoint->ipa->version >= IPA_VERSION_4_0)
  337. return enable; /* For IPA v4.0+, no change made */
  338. WARN_ON(endpoint->toward_ipa);
  339. suspended = ipa_endpoint_init_ctrl(endpoint, enable);
  340. /* A client suspended with an open aggregation frame will not
  341. * generate a SUSPEND IPA interrupt. If enabling suspend, have
  342. * ipa_endpoint_suspend_aggr() handle this.
  343. */
  344. if (enable && !suspended)
  345. ipa_endpoint_suspend_aggr(endpoint);
  346. return suspended;
  347. }
  348. /* Put all modem RX endpoints into suspend mode, and stop transmission
  349. * on all modem TX endpoints. Prior to IPA v4.2, endpoint DELAY mode is
  350. * used for TX endpoints; starting with IPA v4.2 we use GSI channel flow
  351. * control instead.
  352. */
  353. void ipa_endpoint_modem_pause_all(struct ipa *ipa, bool enable)
  354. {
  355. u32 endpoint_id;
  356. for (endpoint_id = 0; endpoint_id < IPA_ENDPOINT_MAX; endpoint_id++) {
  357. struct ipa_endpoint *endpoint = &ipa->endpoint[endpoint_id];
  358. if (endpoint->ee_id != GSI_EE_MODEM)
  359. continue;
  360. if (!endpoint->toward_ipa)
  361. (void)ipa_endpoint_program_suspend(endpoint, enable);
  362. else if (ipa->version < IPA_VERSION_4_2)
  363. ipa_endpoint_program_delay(endpoint, enable);
  364. else
  365. gsi_modem_channel_flow_control(&ipa->gsi,
  366. endpoint->channel_id,
  367. enable);
  368. }
  369. }
  370. /* Reset all modem endpoints to use the default exception endpoint */
  371. int ipa_endpoint_modem_exception_reset_all(struct ipa *ipa)
  372. {
  373. u32 initialized = ipa->initialized;
  374. struct gsi_trans *trans;
  375. u32 count;
  376. /* We need one command per modem TX endpoint, plus the commands
  377. * that clear the pipeline.
  378. */
  379. count = ipa->modem_tx_count + ipa_cmd_pipeline_clear_count();
  380. trans = ipa_cmd_trans_alloc(ipa, count);
  381. if (!trans) {
  382. dev_err(&ipa->pdev->dev,
  383. "no transaction to reset modem exception endpoints\n");
  384. return -EBUSY;
  385. }
  386. while (initialized) {
  387. u32 endpoint_id = __ffs(initialized);
  388. struct ipa_endpoint *endpoint;
  389. const struct ipa_reg *reg;
  390. u32 offset;
  391. initialized ^= BIT(endpoint_id);
  392. /* We only reset modem TX endpoints */
  393. endpoint = &ipa->endpoint[endpoint_id];
  394. if (!(endpoint->ee_id == GSI_EE_MODEM && endpoint->toward_ipa))
  395. continue;
  396. reg = ipa_reg(ipa, ENDP_STATUS);
  397. offset = ipa_reg_n_offset(reg, endpoint_id);
  398. /* Value written is 0, and all bits are updated. That
  399. * means status is disabled on the endpoint, and as a
  400. * result all other fields in the register are ignored.
  401. */
  402. ipa_cmd_register_write_add(trans, offset, 0, ~0, false);
  403. }
  404. ipa_cmd_pipeline_clear_add(trans);
  405. gsi_trans_commit_wait(trans);
  406. ipa_cmd_pipeline_clear_wait(ipa);
  407. return 0;
  408. }
  409. static void ipa_endpoint_init_cfg(struct ipa_endpoint *endpoint)
  410. {
  411. u32 endpoint_id = endpoint->endpoint_id;
  412. struct ipa *ipa = endpoint->ipa;
  413. enum ipa_cs_offload_en enabled;
  414. const struct ipa_reg *reg;
  415. u32 val = 0;
  416. reg = ipa_reg(ipa, ENDP_INIT_CFG);
  417. /* FRAG_OFFLOAD_EN is 0 */
  418. if (endpoint->config.checksum) {
  419. enum ipa_version version = ipa->version;
  420. if (endpoint->toward_ipa) {
  421. u32 off;
  422. /* Checksum header offset is in 4-byte units */
  423. off = sizeof(struct rmnet_map_header) / sizeof(u32);
  424. val |= ipa_reg_encode(reg, CS_METADATA_HDR_OFFSET, off);
  425. enabled = version < IPA_VERSION_4_5
  426. ? IPA_CS_OFFLOAD_UL
  427. : IPA_CS_OFFLOAD_INLINE;
  428. } else {
  429. enabled = version < IPA_VERSION_4_5
  430. ? IPA_CS_OFFLOAD_DL
  431. : IPA_CS_OFFLOAD_INLINE;
  432. }
  433. } else {
  434. enabled = IPA_CS_OFFLOAD_NONE;
  435. }
  436. val |= ipa_reg_encode(reg, CS_OFFLOAD_EN, enabled);
  437. /* CS_GEN_QMB_MASTER_SEL is 0 */
  438. iowrite32(val, ipa->reg_virt + ipa_reg_n_offset(reg, endpoint_id));
  439. }
  440. static void ipa_endpoint_init_nat(struct ipa_endpoint *endpoint)
  441. {
  442. u32 endpoint_id = endpoint->endpoint_id;
  443. struct ipa *ipa = endpoint->ipa;
  444. const struct ipa_reg *reg;
  445. u32 val;
  446. if (!endpoint->toward_ipa)
  447. return;
  448. reg = ipa_reg(ipa, ENDP_INIT_NAT);
  449. val = ipa_reg_encode(reg, NAT_EN, IPA_NAT_BYPASS);
  450. iowrite32(val, ipa->reg_virt + ipa_reg_n_offset(reg, endpoint_id));
  451. }
  452. static u32
  453. ipa_qmap_header_size(enum ipa_version version, struct ipa_endpoint *endpoint)
  454. {
  455. u32 header_size = sizeof(struct rmnet_map_header);
  456. /* Without checksum offload, we just have the MAP header */
  457. if (!endpoint->config.checksum)
  458. return header_size;
  459. if (version < IPA_VERSION_4_5) {
  460. /* Checksum header inserted for AP TX endpoints only */
  461. if (endpoint->toward_ipa)
  462. header_size += sizeof(struct rmnet_map_ul_csum_header);
  463. } else {
  464. /* Checksum header is used in both directions */
  465. header_size += sizeof(struct rmnet_map_v5_csum_header);
  466. }
  467. return header_size;
  468. }
  469. /* Encoded value for ENDP_INIT_HDR register HDR_LEN* field(s) */
  470. static u32 ipa_header_size_encode(enum ipa_version version,
  471. const struct ipa_reg *reg, u32 header_size)
  472. {
  473. u32 field_max = ipa_reg_field_max(reg, HDR_LEN);
  474. u32 val;
  475. /* We know field_max can be used as a mask (2^n - 1) */
  476. val = ipa_reg_encode(reg, HDR_LEN, header_size & field_max);
  477. if (version < IPA_VERSION_4_5) {
  478. WARN_ON(header_size > field_max);
  479. return val;
  480. }
  481. /* IPA v4.5 adds a few more most-significant bits */
  482. header_size >>= hweight32(field_max);
  483. WARN_ON(header_size > ipa_reg_field_max(reg, HDR_LEN_MSB));
  484. val |= ipa_reg_encode(reg, HDR_LEN_MSB, header_size);
  485. return val;
  486. }
  487. /* Encoded value for ENDP_INIT_HDR register OFST_METADATA* field(s) */
  488. static u32 ipa_metadata_offset_encode(enum ipa_version version,
  489. const struct ipa_reg *reg, u32 offset)
  490. {
  491. u32 field_max = ipa_reg_field_max(reg, HDR_OFST_METADATA);
  492. u32 val;
  493. /* We know field_max can be used as a mask (2^n - 1) */
  494. val = ipa_reg_encode(reg, HDR_OFST_METADATA, offset);
  495. if (version < IPA_VERSION_4_5) {
  496. WARN_ON(offset > field_max);
  497. return val;
  498. }
  499. /* IPA v4.5 adds a few more most-significant bits */
  500. offset >>= hweight32(field_max);
  501. WARN_ON(offset > ipa_reg_field_max(reg, HDR_OFST_METADATA_MSB));
  502. val |= ipa_reg_encode(reg, HDR_OFST_METADATA_MSB, offset);
  503. return val;
  504. }
  505. /**
  506. * ipa_endpoint_init_hdr() - Initialize HDR endpoint configuration register
  507. * @endpoint: Endpoint pointer
  508. *
  509. * We program QMAP endpoints so each packet received is preceded by a QMAP
  510. * header structure. The QMAP header contains a 1-byte mux_id and 2-byte
  511. * packet size field, and we have the IPA hardware populate both for each
  512. * received packet. The header is configured (in the HDR_EXT register)
  513. * to use big endian format.
  514. *
  515. * The packet size is written into the QMAP header's pkt_len field. That
  516. * location is defined here using the HDR_OFST_PKT_SIZE field.
  517. *
  518. * The mux_id comes from a 4-byte metadata value supplied with each packet
  519. * by the modem. It is *not* a QMAP header, but it does contain the mux_id
  520. * value that we want, in its low-order byte. A bitmask defined in the
  521. * endpoint's METADATA_MASK register defines which byte within the modem
  522. * metadata contains the mux_id. And the OFST_METADATA field programmed
  523. * here indicates where the extracted byte should be placed within the QMAP
  524. * header.
  525. */
  526. static void ipa_endpoint_init_hdr(struct ipa_endpoint *endpoint)
  527. {
  528. u32 endpoint_id = endpoint->endpoint_id;
  529. struct ipa *ipa = endpoint->ipa;
  530. const struct ipa_reg *reg;
  531. u32 val = 0;
  532. reg = ipa_reg(ipa, ENDP_INIT_HDR);
  533. if (endpoint->config.qmap) {
  534. enum ipa_version version = ipa->version;
  535. size_t header_size;
  536. header_size = ipa_qmap_header_size(version, endpoint);
  537. val = ipa_header_size_encode(version, reg, header_size);
  538. /* Define how to fill fields in a received QMAP header */
  539. if (!endpoint->toward_ipa) {
  540. u32 off; /* Field offset within header */
  541. /* Where IPA will write the metadata value */
  542. off = offsetof(struct rmnet_map_header, mux_id);
  543. val |= ipa_metadata_offset_encode(version, reg, off);
  544. /* Where IPA will write the length */
  545. off = offsetof(struct rmnet_map_header, pkt_len);
  546. /* Upper bits are stored in HDR_EXT with IPA v4.5 */
  547. if (version >= IPA_VERSION_4_5)
  548. off &= ipa_reg_field_max(reg, HDR_OFST_PKT_SIZE);
  549. val |= ipa_reg_bit(reg, HDR_OFST_PKT_SIZE_VALID);
  550. val |= ipa_reg_encode(reg, HDR_OFST_PKT_SIZE, off);
  551. }
  552. /* For QMAP TX, metadata offset is 0 (modem assumes this) */
  553. val |= ipa_reg_bit(reg, HDR_OFST_METADATA_VALID);
  554. /* HDR_ADDITIONAL_CONST_LEN is 0; (RX only) */
  555. /* HDR_A5_MUX is 0 */
  556. /* HDR_LEN_INC_DEAGG_HDR is 0 */
  557. /* HDR_METADATA_REG_VALID is 0 (TX only, version < v4.5) */
  558. }
  559. iowrite32(val, ipa->reg_virt + ipa_reg_n_offset(reg, endpoint_id));
  560. }
  561. static void ipa_endpoint_init_hdr_ext(struct ipa_endpoint *endpoint)
  562. {
  563. u32 pad_align = endpoint->config.rx.pad_align;
  564. u32 endpoint_id = endpoint->endpoint_id;
  565. struct ipa *ipa = endpoint->ipa;
  566. const struct ipa_reg *reg;
  567. u32 val = 0;
  568. reg = ipa_reg(ipa, ENDP_INIT_HDR_EXT);
  569. if (endpoint->config.qmap) {
  570. /* We have a header, so we must specify its endianness */
  571. val |= ipa_reg_bit(reg, HDR_ENDIANNESS); /* big endian */
  572. /* A QMAP header contains a 6 bit pad field at offset 0.
  573. * The RMNet driver assumes this field is meaningful in
  574. * packets it receives, and assumes the header's payload
  575. * length includes that padding. The RMNet driver does
  576. * *not* pad packets it sends, however, so the pad field
  577. * (although 0) should be ignored.
  578. */
  579. if (!endpoint->toward_ipa) {
  580. val |= ipa_reg_bit(reg, HDR_TOTAL_LEN_OR_PAD_VALID);
  581. /* HDR_TOTAL_LEN_OR_PAD is 0 (pad, not total_len) */
  582. val |= ipa_reg_bit(reg, HDR_PAYLOAD_LEN_INC_PADDING);
  583. /* HDR_TOTAL_LEN_OR_PAD_OFFSET is 0 */
  584. }
  585. }
  586. /* HDR_PAYLOAD_LEN_INC_PADDING is 0 */
  587. if (!endpoint->toward_ipa)
  588. val |= ipa_reg_encode(reg, HDR_PAD_TO_ALIGNMENT, pad_align);
  589. /* IPA v4.5 adds some most-significant bits to a few fields,
  590. * two of which are defined in the HDR (not HDR_EXT) register.
  591. */
  592. if (ipa->version >= IPA_VERSION_4_5) {
  593. /* HDR_TOTAL_LEN_OR_PAD_OFFSET is 0, so MSB is 0 */
  594. if (endpoint->config.qmap && !endpoint->toward_ipa) {
  595. u32 mask = ipa_reg_field_max(reg, HDR_OFST_PKT_SIZE);
  596. u32 off; /* Field offset within header */
  597. off = offsetof(struct rmnet_map_header, pkt_len);
  598. /* Low bits are in the ENDP_INIT_HDR register */
  599. off >>= hweight32(mask);
  600. val |= ipa_reg_encode(reg, HDR_OFST_PKT_SIZE_MSB, off);
  601. /* HDR_ADDITIONAL_CONST_LEN is 0 so MSB is 0 */
  602. }
  603. }
  604. iowrite32(val, ipa->reg_virt + ipa_reg_n_offset(reg, endpoint_id));
  605. }
  606. static void ipa_endpoint_init_hdr_metadata_mask(struct ipa_endpoint *endpoint)
  607. {
  608. u32 endpoint_id = endpoint->endpoint_id;
  609. struct ipa *ipa = endpoint->ipa;
  610. const struct ipa_reg *reg;
  611. u32 val = 0;
  612. u32 offset;
  613. if (endpoint->toward_ipa)
  614. return; /* Register not valid for TX endpoints */
  615. reg = ipa_reg(ipa, ENDP_INIT_HDR_METADATA_MASK);
  616. offset = ipa_reg_n_offset(reg, endpoint_id);
  617. /* Note that HDR_ENDIANNESS indicates big endian header fields */
  618. if (endpoint->config.qmap)
  619. val = (__force u32)cpu_to_be32(IPA_ENDPOINT_QMAP_METADATA_MASK);
  620. iowrite32(val, ipa->reg_virt + offset);
  621. }
  622. static void ipa_endpoint_init_mode(struct ipa_endpoint *endpoint)
  623. {
  624. struct ipa *ipa = endpoint->ipa;
  625. const struct ipa_reg *reg;
  626. u32 offset;
  627. u32 val;
  628. if (!endpoint->toward_ipa)
  629. return; /* Register not valid for RX endpoints */
  630. reg = ipa_reg(ipa, ENDP_INIT_MODE);
  631. if (endpoint->config.dma_mode) {
  632. enum ipa_endpoint_name name = endpoint->config.dma_endpoint;
  633. u32 dma_endpoint_id = ipa->name_map[name]->endpoint_id;
  634. val = ipa_reg_encode(reg, ENDP_MODE, IPA_DMA);
  635. val |= ipa_reg_encode(reg, DEST_PIPE_INDEX, dma_endpoint_id);
  636. } else {
  637. val = ipa_reg_encode(reg, ENDP_MODE, IPA_BASIC);
  638. }
  639. /* All other bits unspecified (and 0) */
  640. offset = ipa_reg_n_offset(reg, endpoint->endpoint_id);
  641. iowrite32(val, ipa->reg_virt + offset);
  642. }
  643. /* For IPA v4.5+, times are expressed using Qtime. The AP uses one of two
  644. * pulse generators (0 and 1) to measure elapsed time. In ipa_qtime_config()
  645. * they're configured to have granularity 100 usec and 1 msec, respectively.
  646. *
  647. * The return value is the positive or negative Qtime value to use to
  648. * express the (microsecond) time provided. A positive return value
  649. * means pulse generator 0 can be used; otherwise use pulse generator 1.
  650. */
  651. static int ipa_qtime_val(u32 microseconds, u32 max)
  652. {
  653. u32 val;
  654. /* Use 100 microsecond granularity if possible */
  655. val = DIV_ROUND_CLOSEST(microseconds, 100);
  656. if (val <= max)
  657. return (int)val;
  658. /* Have to use pulse generator 1 (millisecond granularity) */
  659. val = DIV_ROUND_CLOSEST(microseconds, 1000);
  660. WARN_ON(val > max);
  661. return (int)-val;
  662. }
  663. /* Encode the aggregation timer limit (microseconds) based on IPA version */
  664. static u32 aggr_time_limit_encode(struct ipa *ipa, const struct ipa_reg *reg,
  665. u32 microseconds)
  666. {
  667. u32 max;
  668. u32 val;
  669. if (!microseconds)
  670. return 0; /* Nothing to compute if time limit is 0 */
  671. max = ipa_reg_field_max(reg, TIME_LIMIT);
  672. if (ipa->version >= IPA_VERSION_4_5) {
  673. u32 gran_sel;
  674. int ret;
  675. /* Compute the Qtime limit value to use */
  676. ret = ipa_qtime_val(microseconds, max);
  677. if (ret < 0) {
  678. val = -ret;
  679. gran_sel = ipa_reg_bit(reg, AGGR_GRAN_SEL);
  680. } else {
  681. val = ret;
  682. gran_sel = 0;
  683. }
  684. return gran_sel | ipa_reg_encode(reg, TIME_LIMIT, val);
  685. }
  686. /* We program aggregation granularity in ipa_hardware_config() */
  687. val = DIV_ROUND_CLOSEST(microseconds, IPA_AGGR_GRANULARITY);
  688. WARN(val > max, "aggr_time_limit too large (%u > %u usec)\n",
  689. microseconds, max * IPA_AGGR_GRANULARITY);
  690. return ipa_reg_encode(reg, TIME_LIMIT, val);
  691. }
  692. static void ipa_endpoint_init_aggr(struct ipa_endpoint *endpoint)
  693. {
  694. u32 endpoint_id = endpoint->endpoint_id;
  695. struct ipa *ipa = endpoint->ipa;
  696. const struct ipa_reg *reg;
  697. u32 val = 0;
  698. reg = ipa_reg(ipa, ENDP_INIT_AGGR);
  699. if (endpoint->config.aggregation) {
  700. if (!endpoint->toward_ipa) {
  701. const struct ipa_endpoint_rx *rx_config;
  702. u32 buffer_size;
  703. u32 limit;
  704. rx_config = &endpoint->config.rx;
  705. val |= ipa_reg_encode(reg, AGGR_EN, IPA_ENABLE_AGGR);
  706. val |= ipa_reg_encode(reg, AGGR_TYPE, IPA_GENERIC);
  707. buffer_size = rx_config->buffer_size;
  708. limit = ipa_aggr_size_kb(buffer_size - NET_SKB_PAD,
  709. rx_config->aggr_hard_limit);
  710. val |= ipa_reg_encode(reg, BYTE_LIMIT, limit);
  711. limit = rx_config->aggr_time_limit;
  712. val |= aggr_time_limit_encode(ipa, reg, limit);
  713. /* AGGR_PKT_LIMIT is 0 (unlimited) */
  714. if (rx_config->aggr_close_eof)
  715. val |= ipa_reg_bit(reg, SW_EOF_ACTIVE);
  716. } else {
  717. val |= ipa_reg_encode(reg, AGGR_EN, IPA_ENABLE_DEAGGR);
  718. val |= ipa_reg_encode(reg, AGGR_TYPE, IPA_QCMAP);
  719. /* other fields ignored */
  720. }
  721. /* AGGR_FORCE_CLOSE is 0 */
  722. /* AGGR_GRAN_SEL is 0 for IPA v4.5 */
  723. } else {
  724. val |= ipa_reg_encode(reg, AGGR_EN, IPA_BYPASS_AGGR);
  725. /* other fields ignored */
  726. }
  727. iowrite32(val, ipa->reg_virt + ipa_reg_n_offset(reg, endpoint_id));
  728. }
  729. /* The head-of-line blocking timer is defined as a tick count. For
  730. * IPA version 4.5 the tick count is based on the Qtimer, which is
  731. * derived from the 19.2 MHz SoC XO clock. For older IPA versions
  732. * each tick represents 128 cycles of the IPA core clock.
  733. *
  734. * Return the encoded value representing the timeout period provided
  735. * that should be written to the ENDP_INIT_HOL_BLOCK_TIMER register.
  736. */
  737. static u32 hol_block_timer_encode(struct ipa *ipa, const struct ipa_reg *reg,
  738. u32 microseconds)
  739. {
  740. u32 width;
  741. u32 scale;
  742. u64 ticks;
  743. u64 rate;
  744. u32 high;
  745. u32 val;
  746. if (!microseconds)
  747. return 0; /* Nothing to compute if timer period is 0 */
  748. if (ipa->version >= IPA_VERSION_4_5) {
  749. u32 max = ipa_reg_field_max(reg, TIMER_LIMIT);
  750. u32 gran_sel;
  751. int ret;
  752. /* Compute the Qtime limit value to use */
  753. ret = ipa_qtime_val(microseconds, max);
  754. if (ret < 0) {
  755. val = -ret;
  756. gran_sel = ipa_reg_bit(reg, TIMER_GRAN_SEL);
  757. } else {
  758. val = ret;
  759. gran_sel = 0;
  760. }
  761. return gran_sel | ipa_reg_encode(reg, TIMER_LIMIT, val);
  762. }
  763. /* Use 64 bit arithmetic to avoid overflow */
  764. rate = ipa_core_clock_rate(ipa);
  765. ticks = DIV_ROUND_CLOSEST(microseconds * rate, 128 * USEC_PER_SEC);
  766. /* We still need the result to fit into the field */
  767. WARN_ON(ticks > ipa_reg_field_max(reg, TIMER_BASE_VALUE));
  768. /* IPA v3.5.1 through v4.1 just record the tick count */
  769. if (ipa->version < IPA_VERSION_4_2)
  770. return ipa_reg_encode(reg, TIMER_BASE_VALUE, (u32)ticks);
  771. /* For IPA v4.2, the tick count is represented by base and
  772. * scale fields within the 32-bit timer register, where:
  773. * ticks = base << scale;
  774. * The best precision is achieved when the base value is as
  775. * large as possible. Find the highest set bit in the tick
  776. * count, and extract the number of bits in the base field
  777. * such that high bit is included.
  778. */
  779. high = fls(ticks); /* 1..32 (or warning above) */
  780. width = hweight32(ipa_reg_fmask(reg, TIMER_BASE_VALUE));
  781. scale = high > width ? high - width : 0;
  782. if (scale) {
  783. /* If we're scaling, round up to get a closer result */
  784. ticks += 1 << (scale - 1);
  785. /* High bit was set, so rounding might have affected it */
  786. if (fls(ticks) != high)
  787. scale++;
  788. }
  789. val = ipa_reg_encode(reg, TIMER_SCALE, scale);
  790. val |= ipa_reg_encode(reg, TIMER_BASE_VALUE, (u32)ticks >> scale);
  791. return val;
  792. }
  793. /* If microseconds is 0, timeout is immediate */
  794. static void ipa_endpoint_init_hol_block_timer(struct ipa_endpoint *endpoint,
  795. u32 microseconds)
  796. {
  797. u32 endpoint_id = endpoint->endpoint_id;
  798. struct ipa *ipa = endpoint->ipa;
  799. const struct ipa_reg *reg;
  800. u32 val;
  801. /* This should only be changed when HOL_BLOCK_EN is disabled */
  802. reg = ipa_reg(ipa, ENDP_INIT_HOL_BLOCK_TIMER);
  803. val = hol_block_timer_encode(ipa, reg, microseconds);
  804. iowrite32(val, ipa->reg_virt + ipa_reg_n_offset(reg, endpoint_id));
  805. }
  806. static void
  807. ipa_endpoint_init_hol_block_en(struct ipa_endpoint *endpoint, bool enable)
  808. {
  809. u32 endpoint_id = endpoint->endpoint_id;
  810. struct ipa *ipa = endpoint->ipa;
  811. const struct ipa_reg *reg;
  812. u32 offset;
  813. u32 val;
  814. reg = ipa_reg(ipa, ENDP_INIT_HOL_BLOCK_EN);
  815. offset = ipa_reg_n_offset(reg, endpoint_id);
  816. val = enable ? ipa_reg_bit(reg, HOL_BLOCK_EN) : 0;
  817. iowrite32(val, ipa->reg_virt + offset);
  818. /* When enabling, the register must be written twice for IPA v4.5+ */
  819. if (enable && ipa->version >= IPA_VERSION_4_5)
  820. iowrite32(val, ipa->reg_virt + offset);
  821. }
  822. /* Assumes HOL_BLOCK is in disabled state */
  823. static void ipa_endpoint_init_hol_block_enable(struct ipa_endpoint *endpoint,
  824. u32 microseconds)
  825. {
  826. ipa_endpoint_init_hol_block_timer(endpoint, microseconds);
  827. ipa_endpoint_init_hol_block_en(endpoint, true);
  828. }
  829. static void ipa_endpoint_init_hol_block_disable(struct ipa_endpoint *endpoint)
  830. {
  831. ipa_endpoint_init_hol_block_en(endpoint, false);
  832. }
  833. void ipa_endpoint_modem_hol_block_clear_all(struct ipa *ipa)
  834. {
  835. u32 i;
  836. for (i = 0; i < IPA_ENDPOINT_MAX; i++) {
  837. struct ipa_endpoint *endpoint = &ipa->endpoint[i];
  838. if (endpoint->toward_ipa || endpoint->ee_id != GSI_EE_MODEM)
  839. continue;
  840. ipa_endpoint_init_hol_block_disable(endpoint);
  841. ipa_endpoint_init_hol_block_enable(endpoint, 0);
  842. }
  843. }
  844. static void ipa_endpoint_init_deaggr(struct ipa_endpoint *endpoint)
  845. {
  846. u32 endpoint_id = endpoint->endpoint_id;
  847. struct ipa *ipa = endpoint->ipa;
  848. const struct ipa_reg *reg;
  849. u32 val = 0;
  850. if (!endpoint->toward_ipa)
  851. return; /* Register not valid for RX endpoints */
  852. reg = ipa_reg(ipa, ENDP_INIT_DEAGGR);
  853. /* DEAGGR_HDR_LEN is 0 */
  854. /* PACKET_OFFSET_VALID is 0 */
  855. /* PACKET_OFFSET_LOCATION is ignored (not valid) */
  856. /* MAX_PACKET_LEN is 0 (not enforced) */
  857. iowrite32(val, ipa->reg_virt + ipa_reg_n_offset(reg, endpoint_id));
  858. }
  859. static void ipa_endpoint_init_rsrc_grp(struct ipa_endpoint *endpoint)
  860. {
  861. u32 resource_group = endpoint->config.resource_group;
  862. u32 endpoint_id = endpoint->endpoint_id;
  863. struct ipa *ipa = endpoint->ipa;
  864. const struct ipa_reg *reg;
  865. u32 val;
  866. reg = ipa_reg(ipa, ENDP_INIT_RSRC_GRP);
  867. val = ipa_reg_encode(reg, ENDP_RSRC_GRP, resource_group);
  868. iowrite32(val, ipa->reg_virt + ipa_reg_n_offset(reg, endpoint_id));
  869. }
  870. static void ipa_endpoint_init_seq(struct ipa_endpoint *endpoint)
  871. {
  872. u32 endpoint_id = endpoint->endpoint_id;
  873. struct ipa *ipa = endpoint->ipa;
  874. const struct ipa_reg *reg;
  875. u32 val;
  876. if (!endpoint->toward_ipa)
  877. return; /* Register not valid for RX endpoints */
  878. reg = ipa_reg(ipa, ENDP_INIT_SEQ);
  879. /* Low-order byte configures primary packet processing */
  880. val = ipa_reg_encode(reg, SEQ_TYPE, endpoint->config.tx.seq_type);
  881. /* Second byte (if supported) configures replicated packet processing */
  882. if (ipa->version < IPA_VERSION_4_5)
  883. val |= ipa_reg_encode(reg, SEQ_REP_TYPE,
  884. endpoint->config.tx.seq_rep_type);
  885. iowrite32(val, ipa->reg_virt + ipa_reg_n_offset(reg, endpoint_id));
  886. }
  887. /**
  888. * ipa_endpoint_skb_tx() - Transmit a socket buffer
  889. * @endpoint: Endpoint pointer
  890. * @skb: Socket buffer to send
  891. *
  892. * Returns: 0 if successful, or a negative error code
  893. */
  894. int ipa_endpoint_skb_tx(struct ipa_endpoint *endpoint, struct sk_buff *skb)
  895. {
  896. struct gsi_trans *trans;
  897. u32 nr_frags;
  898. int ret;
  899. /* Make sure source endpoint's TLV FIFO has enough entries to
  900. * hold the linear portion of the skb and all its fragments.
  901. * If not, see if we can linearize it before giving up.
  902. */
  903. nr_frags = skb_shinfo(skb)->nr_frags;
  904. if (nr_frags > endpoint->skb_frag_max) {
  905. if (skb_linearize(skb))
  906. return -E2BIG;
  907. nr_frags = 0;
  908. }
  909. trans = ipa_endpoint_trans_alloc(endpoint, 1 + nr_frags);
  910. if (!trans)
  911. return -EBUSY;
  912. ret = gsi_trans_skb_add(trans, skb);
  913. if (ret)
  914. goto err_trans_free;
  915. trans->data = skb; /* transaction owns skb now */
  916. gsi_trans_commit(trans, !netdev_xmit_more());
  917. return 0;
  918. err_trans_free:
  919. gsi_trans_free(trans);
  920. return -ENOMEM;
  921. }
  922. static void ipa_endpoint_status(struct ipa_endpoint *endpoint)
  923. {
  924. u32 endpoint_id = endpoint->endpoint_id;
  925. struct ipa *ipa = endpoint->ipa;
  926. const struct ipa_reg *reg;
  927. u32 val = 0;
  928. reg = ipa_reg(ipa, ENDP_STATUS);
  929. if (endpoint->config.status_enable) {
  930. val |= ipa_reg_bit(reg, STATUS_EN);
  931. if (endpoint->toward_ipa) {
  932. enum ipa_endpoint_name name;
  933. u32 status_endpoint_id;
  934. name = endpoint->config.tx.status_endpoint;
  935. status_endpoint_id = ipa->name_map[name]->endpoint_id;
  936. val |= ipa_reg_encode(reg, STATUS_ENDP,
  937. status_endpoint_id);
  938. }
  939. /* STATUS_LOCATION is 0, meaning status element precedes
  940. * packet (not present for IPA v4.5+)
  941. */
  942. /* STATUS_PKT_SUPPRESS_FMASK is 0 (not present for v4.0+) */
  943. }
  944. iowrite32(val, ipa->reg_virt + ipa_reg_n_offset(reg, endpoint_id));
  945. }
  946. static int ipa_endpoint_replenish_one(struct ipa_endpoint *endpoint,
  947. struct gsi_trans *trans)
  948. {
  949. struct page *page;
  950. u32 buffer_size;
  951. u32 offset;
  952. u32 len;
  953. int ret;
  954. buffer_size = endpoint->config.rx.buffer_size;
  955. page = dev_alloc_pages(get_order(buffer_size));
  956. if (!page)
  957. return -ENOMEM;
  958. /* Offset the buffer to make space for skb headroom */
  959. offset = NET_SKB_PAD;
  960. len = buffer_size - offset;
  961. ret = gsi_trans_page_add(trans, page, len, offset);
  962. if (ret)
  963. put_page(page);
  964. else
  965. trans->data = page; /* transaction owns page now */
  966. return ret;
  967. }
  968. /**
  969. * ipa_endpoint_replenish() - Replenish endpoint receive buffers
  970. * @endpoint: Endpoint to be replenished
  971. *
  972. * The IPA hardware can hold a fixed number of receive buffers for an RX
  973. * endpoint, based on the number of entries in the underlying channel ring
  974. * buffer. If an endpoint's "backlog" is non-zero, it indicates how many
  975. * more receive buffers can be supplied to the hardware. Replenishing for
  976. * an endpoint can be disabled, in which case buffers are not queued to
  977. * the hardware.
  978. */
  979. static void ipa_endpoint_replenish(struct ipa_endpoint *endpoint)
  980. {
  981. struct gsi_trans *trans;
  982. if (!test_bit(IPA_REPLENISH_ENABLED, endpoint->replenish_flags))
  983. return;
  984. /* Skip it if it's already active */
  985. if (test_and_set_bit(IPA_REPLENISH_ACTIVE, endpoint->replenish_flags))
  986. return;
  987. while ((trans = ipa_endpoint_trans_alloc(endpoint, 1))) {
  988. bool doorbell;
  989. if (ipa_endpoint_replenish_one(endpoint, trans))
  990. goto try_again_later;
  991. /* Ring the doorbell if we've got a full batch */
  992. doorbell = !(++endpoint->replenish_count % IPA_REPLENISH_BATCH);
  993. gsi_trans_commit(trans, doorbell);
  994. }
  995. clear_bit(IPA_REPLENISH_ACTIVE, endpoint->replenish_flags);
  996. return;
  997. try_again_later:
  998. gsi_trans_free(trans);
  999. clear_bit(IPA_REPLENISH_ACTIVE, endpoint->replenish_flags);
  1000. /* Whenever a receive buffer transaction completes we'll try to
  1001. * replenish again. It's unlikely, but if we fail to supply even
  1002. * one buffer, nothing will trigger another replenish attempt.
  1003. * If the hardware has no receive buffers queued, schedule work to
  1004. * try replenishing again.
  1005. */
  1006. if (gsi_channel_trans_idle(&endpoint->ipa->gsi, endpoint->channel_id))
  1007. schedule_delayed_work(&endpoint->replenish_work,
  1008. msecs_to_jiffies(1));
  1009. }
  1010. static void ipa_endpoint_replenish_enable(struct ipa_endpoint *endpoint)
  1011. {
  1012. set_bit(IPA_REPLENISH_ENABLED, endpoint->replenish_flags);
  1013. /* Start replenishing if hardware currently has no buffers */
  1014. if (gsi_channel_trans_idle(&endpoint->ipa->gsi, endpoint->channel_id))
  1015. ipa_endpoint_replenish(endpoint);
  1016. }
  1017. static void ipa_endpoint_replenish_disable(struct ipa_endpoint *endpoint)
  1018. {
  1019. clear_bit(IPA_REPLENISH_ENABLED, endpoint->replenish_flags);
  1020. }
  1021. static void ipa_endpoint_replenish_work(struct work_struct *work)
  1022. {
  1023. struct delayed_work *dwork = to_delayed_work(work);
  1024. struct ipa_endpoint *endpoint;
  1025. endpoint = container_of(dwork, struct ipa_endpoint, replenish_work);
  1026. ipa_endpoint_replenish(endpoint);
  1027. }
  1028. static void ipa_endpoint_skb_copy(struct ipa_endpoint *endpoint,
  1029. void *data, u32 len, u32 extra)
  1030. {
  1031. struct sk_buff *skb;
  1032. if (!endpoint->netdev)
  1033. return;
  1034. skb = __dev_alloc_skb(len, GFP_ATOMIC);
  1035. if (skb) {
  1036. /* Copy the data into the socket buffer and receive it */
  1037. skb_put(skb, len);
  1038. memcpy(skb->data, data, len);
  1039. skb->truesize += extra;
  1040. }
  1041. ipa_modem_skb_rx(endpoint->netdev, skb);
  1042. }
  1043. static bool ipa_endpoint_skb_build(struct ipa_endpoint *endpoint,
  1044. struct page *page, u32 len)
  1045. {
  1046. u32 buffer_size = endpoint->config.rx.buffer_size;
  1047. struct sk_buff *skb;
  1048. /* Nothing to do if there's no netdev */
  1049. if (!endpoint->netdev)
  1050. return false;
  1051. WARN_ON(len > SKB_WITH_OVERHEAD(buffer_size - NET_SKB_PAD));
  1052. skb = build_skb(page_address(page), buffer_size);
  1053. if (skb) {
  1054. /* Reserve the headroom and account for the data */
  1055. skb_reserve(skb, NET_SKB_PAD);
  1056. skb_put(skb, len);
  1057. }
  1058. /* Receive the buffer (or record drop if unable to build it) */
  1059. ipa_modem_skb_rx(endpoint->netdev, skb);
  1060. return skb != NULL;
  1061. }
  1062. /* The format of a packet status element is the same for several status
  1063. * types (opcodes). Other types aren't currently supported.
  1064. */
  1065. static bool ipa_status_format_packet(enum ipa_status_opcode opcode)
  1066. {
  1067. switch (opcode) {
  1068. case IPA_STATUS_OPCODE_PACKET:
  1069. case IPA_STATUS_OPCODE_DROPPED_PACKET:
  1070. case IPA_STATUS_OPCODE_SUSPENDED_PACKET:
  1071. case IPA_STATUS_OPCODE_PACKET_2ND_PASS:
  1072. return true;
  1073. default:
  1074. return false;
  1075. }
  1076. }
  1077. static bool ipa_endpoint_status_skip(struct ipa_endpoint *endpoint,
  1078. const struct ipa_status *status)
  1079. {
  1080. u32 endpoint_id;
  1081. if (!ipa_status_format_packet(status->opcode))
  1082. return true;
  1083. if (!status->pkt_len)
  1084. return true;
  1085. endpoint_id = u8_get_bits(status->endp_dst_idx,
  1086. IPA_STATUS_DST_IDX_FMASK);
  1087. if (endpoint_id != endpoint->endpoint_id)
  1088. return true;
  1089. return false; /* Don't skip this packet, process it */
  1090. }
  1091. static bool ipa_endpoint_status_tag(struct ipa_endpoint *endpoint,
  1092. const struct ipa_status *status)
  1093. {
  1094. struct ipa_endpoint *command_endpoint;
  1095. struct ipa *ipa = endpoint->ipa;
  1096. u32 endpoint_id;
  1097. if (!le16_get_bits(status->mask, IPA_STATUS_MASK_TAG_VALID_FMASK))
  1098. return false; /* No valid tag */
  1099. /* The status contains a valid tag. We know the packet was sent to
  1100. * this endpoint (already verified by ipa_endpoint_status_skip()).
  1101. * If the packet came from the AP->command TX endpoint we know
  1102. * this packet was sent as part of the pipeline clear process.
  1103. */
  1104. endpoint_id = u8_get_bits(status->endp_src_idx,
  1105. IPA_STATUS_SRC_IDX_FMASK);
  1106. command_endpoint = ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX];
  1107. if (endpoint_id == command_endpoint->endpoint_id) {
  1108. complete(&ipa->completion);
  1109. } else {
  1110. dev_err(&ipa->pdev->dev,
  1111. "unexpected tagged packet from endpoint %u\n",
  1112. endpoint_id);
  1113. }
  1114. return true;
  1115. }
  1116. /* Return whether the status indicates the packet should be dropped */
  1117. static bool ipa_endpoint_status_drop(struct ipa_endpoint *endpoint,
  1118. const struct ipa_status *status)
  1119. {
  1120. u32 val;
  1121. /* If the status indicates a tagged transfer, we'll drop the packet */
  1122. if (ipa_endpoint_status_tag(endpoint, status))
  1123. return true;
  1124. /* Deaggregation exceptions we drop; all other types we consume */
  1125. if (status->exception)
  1126. return status->exception == IPA_STATUS_EXCEPTION_DEAGGR;
  1127. /* Drop the packet if it fails to match a routing rule; otherwise no */
  1128. val = le32_get_bits(status->flags1, IPA_STATUS_FLAGS1_RT_RULE_ID_FMASK);
  1129. return val == field_max(IPA_STATUS_FLAGS1_RT_RULE_ID_FMASK);
  1130. }
  1131. static void ipa_endpoint_status_parse(struct ipa_endpoint *endpoint,
  1132. struct page *page, u32 total_len)
  1133. {
  1134. u32 buffer_size = endpoint->config.rx.buffer_size;
  1135. void *data = page_address(page) + NET_SKB_PAD;
  1136. u32 unused = buffer_size - total_len;
  1137. u32 resid = total_len;
  1138. while (resid) {
  1139. const struct ipa_status *status = data;
  1140. u32 align;
  1141. u32 len;
  1142. if (resid < sizeof(*status)) {
  1143. dev_err(&endpoint->ipa->pdev->dev,
  1144. "short message (%u bytes < %zu byte status)\n",
  1145. resid, sizeof(*status));
  1146. break;
  1147. }
  1148. /* Skip over status packets that lack packet data */
  1149. if (ipa_endpoint_status_skip(endpoint, status)) {
  1150. data += sizeof(*status);
  1151. resid -= sizeof(*status);
  1152. continue;
  1153. }
  1154. /* Compute the amount of buffer space consumed by the packet,
  1155. * including the status element. If the hardware is configured
  1156. * to pad packet data to an aligned boundary, account for that.
  1157. * And if checksum offload is enabled a trailer containing
  1158. * computed checksum information will be appended.
  1159. */
  1160. align = endpoint->config.rx.pad_align ? : 1;
  1161. len = le16_to_cpu(status->pkt_len);
  1162. len = sizeof(*status) + ALIGN(len, align);
  1163. if (endpoint->config.checksum)
  1164. len += sizeof(struct rmnet_map_dl_csum_trailer);
  1165. if (!ipa_endpoint_status_drop(endpoint, status)) {
  1166. void *data2;
  1167. u32 extra;
  1168. u32 len2;
  1169. /* Client receives only packet data (no status) */
  1170. data2 = data + sizeof(*status);
  1171. len2 = le16_to_cpu(status->pkt_len);
  1172. /* Have the true size reflect the extra unused space in
  1173. * the original receive buffer. Distribute the "cost"
  1174. * proportionately across all aggregated packets in the
  1175. * buffer.
  1176. */
  1177. extra = DIV_ROUND_CLOSEST(unused * len, total_len);
  1178. ipa_endpoint_skb_copy(endpoint, data2, len2, extra);
  1179. }
  1180. /* Consume status and the full packet it describes */
  1181. data += len;
  1182. resid -= len;
  1183. }
  1184. }
  1185. void ipa_endpoint_trans_complete(struct ipa_endpoint *endpoint,
  1186. struct gsi_trans *trans)
  1187. {
  1188. struct page *page;
  1189. if (endpoint->toward_ipa)
  1190. return;
  1191. if (trans->cancelled)
  1192. goto done;
  1193. /* Parse or build a socket buffer using the actual received length */
  1194. page = trans->data;
  1195. if (endpoint->config.status_enable)
  1196. ipa_endpoint_status_parse(endpoint, page, trans->len);
  1197. else if (ipa_endpoint_skb_build(endpoint, page, trans->len))
  1198. trans->data = NULL; /* Pages have been consumed */
  1199. done:
  1200. ipa_endpoint_replenish(endpoint);
  1201. }
  1202. void ipa_endpoint_trans_release(struct ipa_endpoint *endpoint,
  1203. struct gsi_trans *trans)
  1204. {
  1205. if (endpoint->toward_ipa) {
  1206. struct ipa *ipa = endpoint->ipa;
  1207. /* Nothing to do for command transactions */
  1208. if (endpoint != ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX]) {
  1209. struct sk_buff *skb = trans->data;
  1210. if (skb)
  1211. dev_kfree_skb_any(skb);
  1212. }
  1213. } else {
  1214. struct page *page = trans->data;
  1215. if (page)
  1216. put_page(page);
  1217. }
  1218. }
  1219. void ipa_endpoint_default_route_set(struct ipa *ipa, u32 endpoint_id)
  1220. {
  1221. const struct ipa_reg *reg;
  1222. u32 val;
  1223. reg = ipa_reg(ipa, ROUTE);
  1224. /* ROUTE_DIS is 0 */
  1225. val = ipa_reg_encode(reg, ROUTE_DEF_PIPE, endpoint_id);
  1226. val |= ipa_reg_bit(reg, ROUTE_DEF_HDR_TABLE);
  1227. /* ROUTE_DEF_HDR_OFST is 0 */
  1228. val |= ipa_reg_encode(reg, ROUTE_FRAG_DEF_PIPE, endpoint_id);
  1229. val |= ipa_reg_bit(reg, ROUTE_DEF_RETAIN_HDR);
  1230. iowrite32(val, ipa->reg_virt + ipa_reg_offset(reg));
  1231. }
  1232. void ipa_endpoint_default_route_clear(struct ipa *ipa)
  1233. {
  1234. ipa_endpoint_default_route_set(ipa, 0);
  1235. }
  1236. /**
  1237. * ipa_endpoint_reset_rx_aggr() - Reset RX endpoint with aggregation active
  1238. * @endpoint: Endpoint to be reset
  1239. *
  1240. * If aggregation is active on an RX endpoint when a reset is performed
  1241. * on its underlying GSI channel, a special sequence of actions must be
  1242. * taken to ensure the IPA pipeline is properly cleared.
  1243. *
  1244. * Return: 0 if successful, or a negative error code
  1245. */
  1246. static int ipa_endpoint_reset_rx_aggr(struct ipa_endpoint *endpoint)
  1247. {
  1248. struct device *dev = &endpoint->ipa->pdev->dev;
  1249. struct ipa *ipa = endpoint->ipa;
  1250. struct gsi *gsi = &ipa->gsi;
  1251. bool suspended = false;
  1252. dma_addr_t addr;
  1253. u32 retries;
  1254. u32 len = 1;
  1255. void *virt;
  1256. int ret;
  1257. virt = kzalloc(len, GFP_KERNEL);
  1258. if (!virt)
  1259. return -ENOMEM;
  1260. addr = dma_map_single(dev, virt, len, DMA_FROM_DEVICE);
  1261. if (dma_mapping_error(dev, addr)) {
  1262. ret = -ENOMEM;
  1263. goto out_kfree;
  1264. }
  1265. /* Force close aggregation before issuing the reset */
  1266. ipa_endpoint_force_close(endpoint);
  1267. /* Reset and reconfigure the channel with the doorbell engine
  1268. * disabled. Then poll until we know aggregation is no longer
  1269. * active. We'll re-enable the doorbell (if appropriate) when
  1270. * we reset again below.
  1271. */
  1272. gsi_channel_reset(gsi, endpoint->channel_id, false);
  1273. /* Make sure the channel isn't suspended */
  1274. suspended = ipa_endpoint_program_suspend(endpoint, false);
  1275. /* Start channel and do a 1 byte read */
  1276. ret = gsi_channel_start(gsi, endpoint->channel_id);
  1277. if (ret)
  1278. goto out_suspend_again;
  1279. ret = gsi_trans_read_byte(gsi, endpoint->channel_id, addr);
  1280. if (ret)
  1281. goto err_endpoint_stop;
  1282. /* Wait for aggregation to be closed on the channel */
  1283. retries = IPA_ENDPOINT_RESET_AGGR_RETRY_MAX;
  1284. do {
  1285. if (!ipa_endpoint_aggr_active(endpoint))
  1286. break;
  1287. usleep_range(USEC_PER_MSEC, 2 * USEC_PER_MSEC);
  1288. } while (retries--);
  1289. /* Check one last time */
  1290. if (ipa_endpoint_aggr_active(endpoint))
  1291. dev_err(dev, "endpoint %u still active during reset\n",
  1292. endpoint->endpoint_id);
  1293. gsi_trans_read_byte_done(gsi, endpoint->channel_id);
  1294. ret = gsi_channel_stop(gsi, endpoint->channel_id);
  1295. if (ret)
  1296. goto out_suspend_again;
  1297. /* Finally, reset and reconfigure the channel again (re-enabling
  1298. * the doorbell engine if appropriate). Sleep for 1 millisecond to
  1299. * complete the channel reset sequence. Finish by suspending the
  1300. * channel again (if necessary).
  1301. */
  1302. gsi_channel_reset(gsi, endpoint->channel_id, true);
  1303. usleep_range(USEC_PER_MSEC, 2 * USEC_PER_MSEC);
  1304. goto out_suspend_again;
  1305. err_endpoint_stop:
  1306. (void)gsi_channel_stop(gsi, endpoint->channel_id);
  1307. out_suspend_again:
  1308. if (suspended)
  1309. (void)ipa_endpoint_program_suspend(endpoint, true);
  1310. dma_unmap_single(dev, addr, len, DMA_FROM_DEVICE);
  1311. out_kfree:
  1312. kfree(virt);
  1313. return ret;
  1314. }
  1315. static void ipa_endpoint_reset(struct ipa_endpoint *endpoint)
  1316. {
  1317. u32 channel_id = endpoint->channel_id;
  1318. struct ipa *ipa = endpoint->ipa;
  1319. bool special;
  1320. int ret = 0;
  1321. /* On IPA v3.5.1, if an RX endpoint is reset while aggregation
  1322. * is active, we need to handle things specially to recover.
  1323. * All other cases just need to reset the underlying GSI channel.
  1324. */
  1325. special = ipa->version < IPA_VERSION_4_0 && !endpoint->toward_ipa &&
  1326. endpoint->config.aggregation;
  1327. if (special && ipa_endpoint_aggr_active(endpoint))
  1328. ret = ipa_endpoint_reset_rx_aggr(endpoint);
  1329. else
  1330. gsi_channel_reset(&ipa->gsi, channel_id, true);
  1331. if (ret)
  1332. dev_err(&ipa->pdev->dev,
  1333. "error %d resetting channel %u for endpoint %u\n",
  1334. ret, endpoint->channel_id, endpoint->endpoint_id);
  1335. }
  1336. static void ipa_endpoint_program(struct ipa_endpoint *endpoint)
  1337. {
  1338. if (endpoint->toward_ipa) {
  1339. /* Newer versions of IPA use GSI channel flow control
  1340. * instead of endpoint DELAY mode to prevent sending data.
  1341. * Flow control is disabled for newly-allocated channels,
  1342. * and we can assume flow control is not (ever) enabled
  1343. * for AP TX channels.
  1344. */
  1345. if (endpoint->ipa->version < IPA_VERSION_4_2)
  1346. ipa_endpoint_program_delay(endpoint, false);
  1347. } else {
  1348. /* Ensure suspend mode is off on all AP RX endpoints */
  1349. (void)ipa_endpoint_program_suspend(endpoint, false);
  1350. }
  1351. ipa_endpoint_init_cfg(endpoint);
  1352. ipa_endpoint_init_nat(endpoint);
  1353. ipa_endpoint_init_hdr(endpoint);
  1354. ipa_endpoint_init_hdr_ext(endpoint);
  1355. ipa_endpoint_init_hdr_metadata_mask(endpoint);
  1356. ipa_endpoint_init_mode(endpoint);
  1357. ipa_endpoint_init_aggr(endpoint);
  1358. if (!endpoint->toward_ipa) {
  1359. if (endpoint->config.rx.holb_drop)
  1360. ipa_endpoint_init_hol_block_enable(endpoint, 0);
  1361. else
  1362. ipa_endpoint_init_hol_block_disable(endpoint);
  1363. }
  1364. ipa_endpoint_init_deaggr(endpoint);
  1365. ipa_endpoint_init_rsrc_grp(endpoint);
  1366. ipa_endpoint_init_seq(endpoint);
  1367. ipa_endpoint_status(endpoint);
  1368. }
  1369. int ipa_endpoint_enable_one(struct ipa_endpoint *endpoint)
  1370. {
  1371. struct ipa *ipa = endpoint->ipa;
  1372. struct gsi *gsi = &ipa->gsi;
  1373. int ret;
  1374. ret = gsi_channel_start(gsi, endpoint->channel_id);
  1375. if (ret) {
  1376. dev_err(&ipa->pdev->dev,
  1377. "error %d starting %cX channel %u for endpoint %u\n",
  1378. ret, endpoint->toward_ipa ? 'T' : 'R',
  1379. endpoint->channel_id, endpoint->endpoint_id);
  1380. return ret;
  1381. }
  1382. if (!endpoint->toward_ipa) {
  1383. ipa_interrupt_suspend_enable(ipa->interrupt,
  1384. endpoint->endpoint_id);
  1385. ipa_endpoint_replenish_enable(endpoint);
  1386. }
  1387. ipa->enabled |= BIT(endpoint->endpoint_id);
  1388. return 0;
  1389. }
  1390. void ipa_endpoint_disable_one(struct ipa_endpoint *endpoint)
  1391. {
  1392. u32 mask = BIT(endpoint->endpoint_id);
  1393. struct ipa *ipa = endpoint->ipa;
  1394. struct gsi *gsi = &ipa->gsi;
  1395. int ret;
  1396. if (!(ipa->enabled & mask))
  1397. return;
  1398. ipa->enabled ^= mask;
  1399. if (!endpoint->toward_ipa) {
  1400. ipa_endpoint_replenish_disable(endpoint);
  1401. ipa_interrupt_suspend_disable(ipa->interrupt,
  1402. endpoint->endpoint_id);
  1403. }
  1404. /* Note that if stop fails, the channel's state is not well-defined */
  1405. ret = gsi_channel_stop(gsi, endpoint->channel_id);
  1406. if (ret)
  1407. dev_err(&ipa->pdev->dev,
  1408. "error %d attempting to stop endpoint %u\n", ret,
  1409. endpoint->endpoint_id);
  1410. }
  1411. void ipa_endpoint_suspend_one(struct ipa_endpoint *endpoint)
  1412. {
  1413. struct device *dev = &endpoint->ipa->pdev->dev;
  1414. struct gsi *gsi = &endpoint->ipa->gsi;
  1415. int ret;
  1416. if (!(endpoint->ipa->enabled & BIT(endpoint->endpoint_id)))
  1417. return;
  1418. if (!endpoint->toward_ipa) {
  1419. ipa_endpoint_replenish_disable(endpoint);
  1420. (void)ipa_endpoint_program_suspend(endpoint, true);
  1421. }
  1422. ret = gsi_channel_suspend(gsi, endpoint->channel_id);
  1423. if (ret)
  1424. dev_err(dev, "error %d suspending channel %u\n", ret,
  1425. endpoint->channel_id);
  1426. }
  1427. void ipa_endpoint_resume_one(struct ipa_endpoint *endpoint)
  1428. {
  1429. struct device *dev = &endpoint->ipa->pdev->dev;
  1430. struct gsi *gsi = &endpoint->ipa->gsi;
  1431. int ret;
  1432. if (!(endpoint->ipa->enabled & BIT(endpoint->endpoint_id)))
  1433. return;
  1434. if (!endpoint->toward_ipa)
  1435. (void)ipa_endpoint_program_suspend(endpoint, false);
  1436. ret = gsi_channel_resume(gsi, endpoint->channel_id);
  1437. if (ret)
  1438. dev_err(dev, "error %d resuming channel %u\n", ret,
  1439. endpoint->channel_id);
  1440. else if (!endpoint->toward_ipa)
  1441. ipa_endpoint_replenish_enable(endpoint);
  1442. }
  1443. void ipa_endpoint_suspend(struct ipa *ipa)
  1444. {
  1445. if (!ipa->setup_complete)
  1446. return;
  1447. if (ipa->modem_netdev)
  1448. ipa_modem_suspend(ipa->modem_netdev);
  1449. ipa_endpoint_suspend_one(ipa->name_map[IPA_ENDPOINT_AP_LAN_RX]);
  1450. ipa_endpoint_suspend_one(ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX]);
  1451. }
  1452. void ipa_endpoint_resume(struct ipa *ipa)
  1453. {
  1454. if (!ipa->setup_complete)
  1455. return;
  1456. ipa_endpoint_resume_one(ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX]);
  1457. ipa_endpoint_resume_one(ipa->name_map[IPA_ENDPOINT_AP_LAN_RX]);
  1458. if (ipa->modem_netdev)
  1459. ipa_modem_resume(ipa->modem_netdev);
  1460. }
  1461. static void ipa_endpoint_setup_one(struct ipa_endpoint *endpoint)
  1462. {
  1463. struct gsi *gsi = &endpoint->ipa->gsi;
  1464. u32 channel_id = endpoint->channel_id;
  1465. /* Only AP endpoints get set up */
  1466. if (endpoint->ee_id != GSI_EE_AP)
  1467. return;
  1468. endpoint->skb_frag_max = gsi->channel[channel_id].trans_tre_max - 1;
  1469. if (!endpoint->toward_ipa) {
  1470. /* RX transactions require a single TRE, so the maximum
  1471. * backlog is the same as the maximum outstanding TREs.
  1472. */
  1473. clear_bit(IPA_REPLENISH_ENABLED, endpoint->replenish_flags);
  1474. clear_bit(IPA_REPLENISH_ACTIVE, endpoint->replenish_flags);
  1475. INIT_DELAYED_WORK(&endpoint->replenish_work,
  1476. ipa_endpoint_replenish_work);
  1477. }
  1478. ipa_endpoint_program(endpoint);
  1479. endpoint->ipa->set_up |= BIT(endpoint->endpoint_id);
  1480. }
  1481. static void ipa_endpoint_teardown_one(struct ipa_endpoint *endpoint)
  1482. {
  1483. endpoint->ipa->set_up &= ~BIT(endpoint->endpoint_id);
  1484. if (!endpoint->toward_ipa)
  1485. cancel_delayed_work_sync(&endpoint->replenish_work);
  1486. ipa_endpoint_reset(endpoint);
  1487. }
  1488. void ipa_endpoint_setup(struct ipa *ipa)
  1489. {
  1490. u32 initialized = ipa->initialized;
  1491. ipa->set_up = 0;
  1492. while (initialized) {
  1493. u32 endpoint_id = __ffs(initialized);
  1494. initialized ^= BIT(endpoint_id);
  1495. ipa_endpoint_setup_one(&ipa->endpoint[endpoint_id]);
  1496. }
  1497. }
  1498. void ipa_endpoint_teardown(struct ipa *ipa)
  1499. {
  1500. u32 set_up = ipa->set_up;
  1501. while (set_up) {
  1502. u32 endpoint_id = __fls(set_up);
  1503. set_up ^= BIT(endpoint_id);
  1504. ipa_endpoint_teardown_one(&ipa->endpoint[endpoint_id]);
  1505. }
  1506. ipa->set_up = 0;
  1507. }
  1508. int ipa_endpoint_config(struct ipa *ipa)
  1509. {
  1510. struct device *dev = &ipa->pdev->dev;
  1511. const struct ipa_reg *reg;
  1512. u32 initialized;
  1513. u32 rx_base;
  1514. u32 rx_mask;
  1515. u32 tx_mask;
  1516. int ret = 0;
  1517. u32 max;
  1518. u32 val;
  1519. /* Prior to IPAv3.5, the FLAVOR_0 register was not supported.
  1520. * Furthermore, the endpoints were not grouped such that TX
  1521. * endpoint numbers started with 0 and RX endpoints had numbers
  1522. * higher than all TX endpoints, so we can't do the simple
  1523. * direction check used for newer hardware below.
  1524. *
  1525. * For hardware that doesn't support the FLAVOR_0 register,
  1526. * just set the available mask to support any endpoint, and
  1527. * assume the configuration is valid.
  1528. */
  1529. if (ipa->version < IPA_VERSION_3_5) {
  1530. ipa->available = ~0;
  1531. return 0;
  1532. }
  1533. /* Find out about the endpoints supplied by the hardware, and ensure
  1534. * the highest one doesn't exceed the number we support.
  1535. */
  1536. reg = ipa_reg(ipa, FLAVOR_0);
  1537. val = ioread32(ipa->reg_virt + ipa_reg_offset(reg));
  1538. /* Our RX is an IPA producer */
  1539. rx_base = ipa_reg_decode(reg, PROD_LOWEST, val);
  1540. max = rx_base + ipa_reg_decode(reg, MAX_PROD_PIPES, val);
  1541. if (max > IPA_ENDPOINT_MAX) {
  1542. dev_err(dev, "too many endpoints (%u > %u)\n",
  1543. max, IPA_ENDPOINT_MAX);
  1544. return -EINVAL;
  1545. }
  1546. rx_mask = GENMASK(max - 1, rx_base);
  1547. /* Our TX is an IPA consumer */
  1548. max = ipa_reg_decode(reg, MAX_CONS_PIPES, val);
  1549. tx_mask = GENMASK(max - 1, 0);
  1550. ipa->available = rx_mask | tx_mask;
  1551. /* Check for initialized endpoints not supported by the hardware */
  1552. if (ipa->initialized & ~ipa->available) {
  1553. dev_err(dev, "unavailable endpoint id(s) 0x%08x\n",
  1554. ipa->initialized & ~ipa->available);
  1555. ret = -EINVAL; /* Report other errors too */
  1556. }
  1557. initialized = ipa->initialized;
  1558. while (initialized) {
  1559. u32 endpoint_id = __ffs(initialized);
  1560. struct ipa_endpoint *endpoint;
  1561. initialized ^= BIT(endpoint_id);
  1562. /* Make sure it's pointing in the right direction */
  1563. endpoint = &ipa->endpoint[endpoint_id];
  1564. if ((endpoint_id < rx_base) != endpoint->toward_ipa) {
  1565. dev_err(dev, "endpoint id %u wrong direction\n",
  1566. endpoint_id);
  1567. ret = -EINVAL;
  1568. }
  1569. }
  1570. return ret;
  1571. }
  1572. void ipa_endpoint_deconfig(struct ipa *ipa)
  1573. {
  1574. ipa->available = 0; /* Nothing more to do */
  1575. }
  1576. static void ipa_endpoint_init_one(struct ipa *ipa, enum ipa_endpoint_name name,
  1577. const struct ipa_gsi_endpoint_data *data)
  1578. {
  1579. struct ipa_endpoint *endpoint;
  1580. endpoint = &ipa->endpoint[data->endpoint_id];
  1581. if (data->ee_id == GSI_EE_AP)
  1582. ipa->channel_map[data->channel_id] = endpoint;
  1583. ipa->name_map[name] = endpoint;
  1584. endpoint->ipa = ipa;
  1585. endpoint->ee_id = data->ee_id;
  1586. endpoint->channel_id = data->channel_id;
  1587. endpoint->endpoint_id = data->endpoint_id;
  1588. endpoint->toward_ipa = data->toward_ipa;
  1589. endpoint->config = data->endpoint.config;
  1590. ipa->initialized |= BIT(endpoint->endpoint_id);
  1591. }
  1592. static void ipa_endpoint_exit_one(struct ipa_endpoint *endpoint)
  1593. {
  1594. endpoint->ipa->initialized &= ~BIT(endpoint->endpoint_id);
  1595. memset(endpoint, 0, sizeof(*endpoint));
  1596. }
  1597. void ipa_endpoint_exit(struct ipa *ipa)
  1598. {
  1599. u32 initialized = ipa->initialized;
  1600. while (initialized) {
  1601. u32 endpoint_id = __fls(initialized);
  1602. initialized ^= BIT(endpoint_id);
  1603. ipa_endpoint_exit_one(&ipa->endpoint[endpoint_id]);
  1604. }
  1605. memset(ipa->name_map, 0, sizeof(ipa->name_map));
  1606. memset(ipa->channel_map, 0, sizeof(ipa->channel_map));
  1607. }
  1608. /* Returns a bitmask of endpoints that support filtering, or 0 on error */
  1609. u32 ipa_endpoint_init(struct ipa *ipa, u32 count,
  1610. const struct ipa_gsi_endpoint_data *data)
  1611. {
  1612. enum ipa_endpoint_name name;
  1613. u32 filter_map;
  1614. BUILD_BUG_ON(!IPA_REPLENISH_BATCH);
  1615. if (!ipa_endpoint_data_valid(ipa, count, data))
  1616. return 0; /* Error */
  1617. ipa->initialized = 0;
  1618. filter_map = 0;
  1619. for (name = 0; name < count; name++, data++) {
  1620. if (ipa_gsi_endpoint_data_empty(data))
  1621. continue; /* Skip over empty slots */
  1622. ipa_endpoint_init_one(ipa, name, data);
  1623. if (data->endpoint.filter_support)
  1624. filter_map |= BIT(data->endpoint_id);
  1625. if (data->ee_id == GSI_EE_MODEM && data->toward_ipa)
  1626. ipa->modem_tx_count++;
  1627. }
  1628. if (!ipa_filter_map_valid(ipa, filter_map))
  1629. goto err_endpoint_exit;
  1630. return filter_map; /* Non-zero bitmask */
  1631. err_endpoint_exit:
  1632. ipa_endpoint_exit(ipa);
  1633. return 0; /* Error */
  1634. }