qcom_q6v5_mss.c 51 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Qualcomm self-authenticating modem subsystem remoteproc driver
  4. *
  5. * Copyright (C) 2016 Linaro Ltd.
  6. * Copyright (C) 2014 Sony Mobile Communications AB
  7. * Copyright (c) 2012-2013, 2020-2021 The Linux Foundation. All rights reserved.
  8. */
  9. #include <linux/clk.h>
  10. #include <linux/delay.h>
  11. #include <linux/devcoredump.h>
  12. #include <linux/dma-mapping.h>
  13. #include <linux/interrupt.h>
  14. #include <linux/kernel.h>
  15. #include <linux/mfd/syscon.h>
  16. #include <linux/module.h>
  17. #include <linux/of_address.h>
  18. #include <linux/of_device.h>
  19. #include <linux/of_reserved_mem.h>
  20. #include <linux/platform_device.h>
  21. #include <linux/pm_domain.h>
  22. #include <linux/pm_runtime.h>
  23. #include <linux/regmap.h>
  24. #include <linux/regulator/consumer.h>
  25. #include <linux/remoteproc.h>
  26. #include <linux/reset.h>
  27. #include <linux/soc/qcom/mdt_loader.h>
  28. #include <linux/iopoll.h>
  29. #include <linux/slab.h>
  30. #include "remoteproc_internal.h"
  31. #include "qcom_common.h"
  32. #include "qcom_pil_info.h"
  33. #include "qcom_q6v5.h"
  34. #include <linux/qcom_scm.h>
  35. #define MPSS_CRASH_REASON_SMEM 421
  36. #define MBA_LOG_SIZE SZ_4K
  37. /* RMB Status Register Values */
  38. #define RMB_PBL_SUCCESS 0x1
  39. #define RMB_MBA_XPU_UNLOCKED 0x1
  40. #define RMB_MBA_XPU_UNLOCKED_SCRIBBLED 0x2
  41. #define RMB_MBA_META_DATA_AUTH_SUCCESS 0x3
  42. #define RMB_MBA_AUTH_COMPLETE 0x4
  43. /* PBL/MBA interface registers */
  44. #define RMB_MBA_IMAGE_REG 0x00
  45. #define RMB_PBL_STATUS_REG 0x04
  46. #define RMB_MBA_COMMAND_REG 0x08
  47. #define RMB_MBA_STATUS_REG 0x0C
  48. #define RMB_PMI_META_DATA_REG 0x10
  49. #define RMB_PMI_CODE_START_REG 0x14
  50. #define RMB_PMI_CODE_LENGTH_REG 0x18
  51. #define RMB_MBA_MSS_STATUS 0x40
  52. #define RMB_MBA_ALT_RESET 0x44
  53. #define RMB_CMD_META_DATA_READY 0x1
  54. #define RMB_CMD_LOAD_READY 0x2
  55. /* QDSP6SS Register Offsets */
  56. #define QDSP6SS_RESET_REG 0x014
  57. #define QDSP6SS_GFMUX_CTL_REG 0x020
  58. #define QDSP6SS_PWR_CTL_REG 0x030
  59. #define QDSP6SS_MEM_PWR_CTL 0x0B0
  60. #define QDSP6V6SS_MEM_PWR_CTL 0x034
  61. #define QDSP6SS_STRAP_ACC 0x110
  62. /* AXI Halt Register Offsets */
  63. #define AXI_HALTREQ_REG 0x0
  64. #define AXI_HALTACK_REG 0x4
  65. #define AXI_IDLE_REG 0x8
  66. #define AXI_GATING_VALID_OVERRIDE BIT(0)
  67. #define HALT_ACK_TIMEOUT_US 100000
  68. /* QDSP6SS_RESET */
  69. #define Q6SS_STOP_CORE BIT(0)
  70. #define Q6SS_CORE_ARES BIT(1)
  71. #define Q6SS_BUS_ARES_ENABLE BIT(2)
  72. /* QDSP6SS CBCR */
  73. #define Q6SS_CBCR_CLKEN BIT(0)
  74. #define Q6SS_CBCR_CLKOFF BIT(31)
  75. #define Q6SS_CBCR_TIMEOUT_US 200
  76. /* QDSP6SS_GFMUX_CTL */
  77. #define Q6SS_CLK_ENABLE BIT(1)
  78. /* QDSP6SS_PWR_CTL */
  79. #define Q6SS_L2DATA_SLP_NRET_N_0 BIT(0)
  80. #define Q6SS_L2DATA_SLP_NRET_N_1 BIT(1)
  81. #define Q6SS_L2DATA_SLP_NRET_N_2 BIT(2)
  82. #define Q6SS_L2TAG_SLP_NRET_N BIT(16)
  83. #define Q6SS_ETB_SLP_NRET_N BIT(17)
  84. #define Q6SS_L2DATA_STBY_N BIT(18)
  85. #define Q6SS_SLP_RET_N BIT(19)
  86. #define Q6SS_CLAMP_IO BIT(20)
  87. #define QDSS_BHS_ON BIT(21)
  88. #define QDSS_LDO_BYP BIT(22)
  89. /* QDSP6v56 parameters */
  90. #define QDSP6v56_LDO_BYP BIT(25)
  91. #define QDSP6v56_BHS_ON BIT(24)
  92. #define QDSP6v56_CLAMP_WL BIT(21)
  93. #define QDSP6v56_CLAMP_QMC_MEM BIT(22)
  94. #define QDSP6SS_XO_CBCR 0x0038
  95. #define QDSP6SS_ACC_OVERRIDE_VAL 0x20
  96. /* QDSP6v65 parameters */
  97. #define QDSP6SS_CORE_CBCR 0x20
  98. #define QDSP6SS_SLEEP 0x3C
  99. #define QDSP6SS_BOOT_CORE_START 0x400
  100. #define QDSP6SS_BOOT_CMD 0x404
  101. #define BOOT_FSM_TIMEOUT 10000
  102. struct qcom_mss_reg_res {
  103. const char *supply;
  104. int uV;
  105. int uA;
  106. };
  107. struct rproc_hexagon_res {
  108. const char *hexagon_mba_image;
  109. struct qcom_mss_reg_res *proxy_supply;
  110. struct qcom_mss_reg_res *active_supply;
  111. char **proxy_clk_names;
  112. char **reset_clk_names;
  113. char **active_clk_names;
  114. char **active_pd_names;
  115. char **proxy_pd_names;
  116. int version;
  117. bool need_mem_protection;
  118. bool has_alt_reset;
  119. bool has_mba_logs;
  120. bool has_spare_reg;
  121. };
  122. struct q6v5 {
  123. struct device *dev;
  124. struct rproc *rproc;
  125. void __iomem *reg_base;
  126. void __iomem *rmb_base;
  127. struct regmap *halt_map;
  128. struct regmap *conn_map;
  129. u32 halt_q6;
  130. u32 halt_modem;
  131. u32 halt_nc;
  132. u32 conn_box;
  133. struct reset_control *mss_restart;
  134. struct reset_control *pdc_reset;
  135. struct qcom_q6v5 q6v5;
  136. struct clk *active_clks[8];
  137. struct clk *reset_clks[4];
  138. struct clk *proxy_clks[4];
  139. struct device *active_pds[1];
  140. struct device *proxy_pds[3];
  141. int active_clk_count;
  142. int reset_clk_count;
  143. int proxy_clk_count;
  144. int active_pd_count;
  145. int proxy_pd_count;
  146. struct reg_info active_regs[1];
  147. struct reg_info proxy_regs[3];
  148. int active_reg_count;
  149. int proxy_reg_count;
  150. bool dump_mba_loaded;
  151. size_t current_dump_size;
  152. size_t total_dump_size;
  153. phys_addr_t mba_phys;
  154. void *mba_region;
  155. size_t mba_size;
  156. size_t dp_size;
  157. phys_addr_t mdata_phys;
  158. size_t mdata_size;
  159. phys_addr_t mpss_phys;
  160. phys_addr_t mpss_reloc;
  161. size_t mpss_size;
  162. struct qcom_rproc_glink glink_subdev;
  163. struct qcom_rproc_subdev smd_subdev;
  164. struct qcom_rproc_ssr ssr_subdev;
  165. struct qcom_sysmon *sysmon;
  166. bool need_mem_protection;
  167. bool has_alt_reset;
  168. bool has_mba_logs;
  169. bool has_spare_reg;
  170. bool has_qaccept_regs;
  171. bool has_ext_cntl_regs;
  172. bool has_vq6;
  173. u64 mpss_perm;
  174. u64 mba_perm;
  175. const char *hexagon_mdt_image;
  176. int version;
  177. };
  178. enum {
  179. MSS_MSM8916,
  180. MSS_MSM8974,
  181. MSS_MSM8996,
  182. MSS_MSM8998,
  183. MSS_SC7180,
  184. MSS_SDM845,
  185. };
  186. static int q6v5_regulator_init(struct device *dev, struct reg_info *regs,
  187. const struct qcom_mss_reg_res *reg_res)
  188. {
  189. int rc;
  190. int i;
  191. if (!reg_res)
  192. return 0;
  193. for (i = 0; reg_res[i].supply; i++) {
  194. regs[i].reg = devm_regulator_get(dev, reg_res[i].supply);
  195. if (IS_ERR(regs[i].reg)) {
  196. rc = PTR_ERR(regs[i].reg);
  197. if (rc != -EPROBE_DEFER)
  198. dev_err(dev, "Failed to get %s\n regulator",
  199. reg_res[i].supply);
  200. return rc;
  201. }
  202. regs[i].uV = reg_res[i].uV;
  203. regs[i].uA = reg_res[i].uA;
  204. }
  205. return i;
  206. }
  207. static int q6v5_regulator_enable(struct q6v5 *qproc,
  208. struct reg_info *regs, int count)
  209. {
  210. int ret;
  211. int i;
  212. for (i = 0; i < count; i++) {
  213. if (regs[i].uV > 0) {
  214. ret = regulator_set_voltage(regs[i].reg,
  215. regs[i].uV, INT_MAX);
  216. if (ret) {
  217. dev_err(qproc->dev,
  218. "Failed to request voltage for %d.\n",
  219. i);
  220. goto err;
  221. }
  222. }
  223. if (regs[i].uA > 0) {
  224. ret = regulator_set_load(regs[i].reg,
  225. regs[i].uA);
  226. if (ret < 0) {
  227. dev_err(qproc->dev,
  228. "Failed to set regulator mode\n");
  229. goto err;
  230. }
  231. }
  232. ret = regulator_enable(regs[i].reg);
  233. if (ret) {
  234. dev_err(qproc->dev, "Regulator enable failed\n");
  235. goto err;
  236. }
  237. }
  238. return 0;
  239. err:
  240. for (; i >= 0; i--) {
  241. if (regs[i].uV > 0)
  242. regulator_set_voltage(regs[i].reg, 0, INT_MAX);
  243. if (regs[i].uA > 0)
  244. regulator_set_load(regs[i].reg, 0);
  245. regulator_disable(regs[i].reg);
  246. }
  247. return ret;
  248. }
  249. static void q6v5_regulator_disable(struct q6v5 *qproc,
  250. struct reg_info *regs, int count)
  251. {
  252. int i;
  253. for (i = 0; i < count; i++) {
  254. if (regs[i].uV > 0)
  255. regulator_set_voltage(regs[i].reg, 0, INT_MAX);
  256. if (regs[i].uA > 0)
  257. regulator_set_load(regs[i].reg, 0);
  258. regulator_disable(regs[i].reg);
  259. }
  260. }
  261. static int q6v5_clk_enable(struct device *dev,
  262. struct clk **clks, int count)
  263. {
  264. int rc;
  265. int i;
  266. for (i = 0; i < count; i++) {
  267. rc = clk_prepare_enable(clks[i]);
  268. if (rc) {
  269. dev_err(dev, "Clock enable failed\n");
  270. goto err;
  271. }
  272. }
  273. return 0;
  274. err:
  275. for (i--; i >= 0; i--)
  276. clk_disable_unprepare(clks[i]);
  277. return rc;
  278. }
  279. static void q6v5_clk_disable(struct device *dev,
  280. struct clk **clks, int count)
  281. {
  282. int i;
  283. for (i = 0; i < count; i++)
  284. clk_disable_unprepare(clks[i]);
  285. }
  286. static int q6v5_pds_enable(struct q6v5 *qproc, struct device **pds,
  287. size_t pd_count)
  288. {
  289. int ret;
  290. int i;
  291. for (i = 0; i < pd_count; i++) {
  292. dev_pm_genpd_set_performance_state(pds[i], INT_MAX);
  293. ret = pm_runtime_get_sync(pds[i]);
  294. if (ret < 0) {
  295. pm_runtime_put_noidle(pds[i]);
  296. dev_pm_genpd_set_performance_state(pds[i], 0);
  297. goto unroll_pd_votes;
  298. }
  299. }
  300. return 0;
  301. unroll_pd_votes:
  302. for (i--; i >= 0; i--) {
  303. dev_pm_genpd_set_performance_state(pds[i], 0);
  304. pm_runtime_put(pds[i]);
  305. }
  306. return ret;
  307. }
  308. static void q6v5_pds_disable(struct q6v5 *qproc, struct device **pds,
  309. size_t pd_count)
  310. {
  311. int i;
  312. for (i = 0; i < pd_count; i++) {
  313. dev_pm_genpd_set_performance_state(pds[i], 0);
  314. pm_runtime_put(pds[i]);
  315. }
  316. }
  317. static int q6v5_xfer_mem_ownership(struct q6v5 *qproc, u64 *current_perm,
  318. bool local, bool remote, phys_addr_t addr,
  319. size_t size)
  320. {
  321. struct qcom_scm_vmperm next[2];
  322. int perms = 0;
  323. if (!qproc->need_mem_protection)
  324. return 0;
  325. if (local == !!(*current_perm & BIT(QCOM_SCM_VMID_HLOS)) &&
  326. remote == !!(*current_perm & BIT(QCOM_SCM_VMID_MSS_MSA)))
  327. return 0;
  328. if (local) {
  329. next[perms].vmid = QCOM_SCM_VMID_HLOS;
  330. next[perms].perm = QCOM_SCM_PERM_RWX;
  331. perms++;
  332. }
  333. if (remote) {
  334. next[perms].vmid = QCOM_SCM_VMID_MSS_MSA;
  335. next[perms].perm = QCOM_SCM_PERM_RW;
  336. perms++;
  337. }
  338. return qcom_scm_assign_mem(addr, ALIGN(size, SZ_4K),
  339. current_perm, next, perms);
  340. }
  341. static void q6v5_debug_policy_load(struct q6v5 *qproc)
  342. {
  343. const struct firmware *dp_fw;
  344. if (request_firmware_direct(&dp_fw, "msadp", qproc->dev))
  345. return;
  346. if (SZ_1M + dp_fw->size <= qproc->mba_size) {
  347. memcpy(qproc->mba_region + SZ_1M, dp_fw->data, dp_fw->size);
  348. qproc->dp_size = dp_fw->size;
  349. }
  350. release_firmware(dp_fw);
  351. }
  352. static int q6v5_load(struct rproc *rproc, const struct firmware *fw)
  353. {
  354. struct q6v5 *qproc = rproc->priv;
  355. /* MBA is restricted to a maximum size of 1M */
  356. if (fw->size > qproc->mba_size || fw->size > SZ_1M) {
  357. dev_err(qproc->dev, "MBA firmware load failed\n");
  358. return -EINVAL;
  359. }
  360. memcpy(qproc->mba_region, fw->data, fw->size);
  361. q6v5_debug_policy_load(qproc);
  362. return 0;
  363. }
  364. static int q6v5_reset_assert(struct q6v5 *qproc)
  365. {
  366. int ret;
  367. if (qproc->has_alt_reset) {
  368. reset_control_assert(qproc->pdc_reset);
  369. ret = reset_control_reset(qproc->mss_restart);
  370. reset_control_deassert(qproc->pdc_reset);
  371. } else if (qproc->has_spare_reg) {
  372. /*
  373. * When the AXI pipeline is being reset with the Q6 modem partly
  374. * operational there is possibility of AXI valid signal to
  375. * glitch, leading to spurious transactions and Q6 hangs. A work
  376. * around is employed by asserting the AXI_GATING_VALID_OVERRIDE
  377. * BIT before triggering Q6 MSS reset. AXI_GATING_VALID_OVERRIDE
  378. * is withdrawn post MSS assert followed by a MSS deassert,
  379. * while holding the PDC reset.
  380. */
  381. reset_control_assert(qproc->pdc_reset);
  382. regmap_update_bits(qproc->conn_map, qproc->conn_box,
  383. AXI_GATING_VALID_OVERRIDE, 1);
  384. reset_control_assert(qproc->mss_restart);
  385. reset_control_deassert(qproc->pdc_reset);
  386. regmap_update_bits(qproc->conn_map, qproc->conn_box,
  387. AXI_GATING_VALID_OVERRIDE, 0);
  388. ret = reset_control_deassert(qproc->mss_restart);
  389. } else {
  390. ret = reset_control_assert(qproc->mss_restart);
  391. }
  392. return ret;
  393. }
  394. static int q6v5_reset_deassert(struct q6v5 *qproc)
  395. {
  396. int ret;
  397. if (qproc->has_alt_reset) {
  398. reset_control_assert(qproc->pdc_reset);
  399. writel(1, qproc->rmb_base + RMB_MBA_ALT_RESET);
  400. ret = reset_control_reset(qproc->mss_restart);
  401. writel(0, qproc->rmb_base + RMB_MBA_ALT_RESET);
  402. reset_control_deassert(qproc->pdc_reset);
  403. } else if (qproc->has_spare_reg) {
  404. ret = reset_control_reset(qproc->mss_restart);
  405. } else {
  406. ret = reset_control_deassert(qproc->mss_restart);
  407. }
  408. return ret;
  409. }
  410. static int q6v5_rmb_pbl_wait(struct q6v5 *qproc, int ms)
  411. {
  412. unsigned long timeout;
  413. s32 val;
  414. timeout = jiffies + msecs_to_jiffies(ms);
  415. for (;;) {
  416. val = readl(qproc->rmb_base + RMB_PBL_STATUS_REG);
  417. if (val)
  418. break;
  419. if (time_after(jiffies, timeout))
  420. return -ETIMEDOUT;
  421. msleep(1);
  422. }
  423. return val;
  424. }
  425. static int q6v5_rmb_mba_wait(struct q6v5 *qproc, u32 status, int ms)
  426. {
  427. unsigned long timeout;
  428. s32 val;
  429. timeout = jiffies + msecs_to_jiffies(ms);
  430. for (;;) {
  431. val = readl(qproc->rmb_base + RMB_MBA_STATUS_REG);
  432. if (val < 0)
  433. break;
  434. if (!status && val)
  435. break;
  436. else if (status && val == status)
  437. break;
  438. if (time_after(jiffies, timeout))
  439. return -ETIMEDOUT;
  440. msleep(1);
  441. }
  442. return val;
  443. }
  444. static void q6v5_dump_mba_logs(struct q6v5 *qproc)
  445. {
  446. struct rproc *rproc = qproc->rproc;
  447. void *data;
  448. if (!qproc->has_mba_logs)
  449. return;
  450. if (q6v5_xfer_mem_ownership(qproc, &qproc->mba_perm, true, false, qproc->mba_phys,
  451. qproc->mba_size))
  452. return;
  453. data = vmalloc(MBA_LOG_SIZE);
  454. if (!data)
  455. return;
  456. memcpy(data, qproc->mba_region, MBA_LOG_SIZE);
  457. dev_coredumpv(&rproc->dev, data, MBA_LOG_SIZE, GFP_KERNEL);
  458. }
  459. static int q6v5proc_reset(struct q6v5 *qproc)
  460. {
  461. u32 val;
  462. int ret;
  463. int i;
  464. if (qproc->version == MSS_SDM845) {
  465. val = readl(qproc->reg_base + QDSP6SS_SLEEP);
  466. val |= Q6SS_CBCR_CLKEN;
  467. writel(val, qproc->reg_base + QDSP6SS_SLEEP);
  468. ret = readl_poll_timeout(qproc->reg_base + QDSP6SS_SLEEP,
  469. val, !(val & Q6SS_CBCR_CLKOFF), 1,
  470. Q6SS_CBCR_TIMEOUT_US);
  471. if (ret) {
  472. dev_err(qproc->dev, "QDSP6SS Sleep clock timed out\n");
  473. return -ETIMEDOUT;
  474. }
  475. /* De-assert QDSP6 stop core */
  476. writel(1, qproc->reg_base + QDSP6SS_BOOT_CORE_START);
  477. /* Trigger boot FSM */
  478. writel(1, qproc->reg_base + QDSP6SS_BOOT_CMD);
  479. ret = readl_poll_timeout(qproc->rmb_base + RMB_MBA_MSS_STATUS,
  480. val, (val & BIT(0)) != 0, 10, BOOT_FSM_TIMEOUT);
  481. if (ret) {
  482. dev_err(qproc->dev, "Boot FSM failed to complete.\n");
  483. /* Reset the modem so that boot FSM is in reset state */
  484. q6v5_reset_deassert(qproc);
  485. return ret;
  486. }
  487. goto pbl_wait;
  488. } else if (qproc->version == MSS_SC7180) {
  489. val = readl(qproc->reg_base + QDSP6SS_SLEEP);
  490. val |= Q6SS_CBCR_CLKEN;
  491. writel(val, qproc->reg_base + QDSP6SS_SLEEP);
  492. ret = readl_poll_timeout(qproc->reg_base + QDSP6SS_SLEEP,
  493. val, !(val & Q6SS_CBCR_CLKOFF), 1,
  494. Q6SS_CBCR_TIMEOUT_US);
  495. if (ret) {
  496. dev_err(qproc->dev, "QDSP6SS Sleep clock timed out\n");
  497. return -ETIMEDOUT;
  498. }
  499. /* Turn on the XO clock needed for PLL setup */
  500. val = readl(qproc->reg_base + QDSP6SS_XO_CBCR);
  501. val |= Q6SS_CBCR_CLKEN;
  502. writel(val, qproc->reg_base + QDSP6SS_XO_CBCR);
  503. ret = readl_poll_timeout(qproc->reg_base + QDSP6SS_XO_CBCR,
  504. val, !(val & Q6SS_CBCR_CLKOFF), 1,
  505. Q6SS_CBCR_TIMEOUT_US);
  506. if (ret) {
  507. dev_err(qproc->dev, "QDSP6SS XO clock timed out\n");
  508. return -ETIMEDOUT;
  509. }
  510. /* Configure Q6 core CBCR to auto-enable after reset sequence */
  511. val = readl(qproc->reg_base + QDSP6SS_CORE_CBCR);
  512. val |= Q6SS_CBCR_CLKEN;
  513. writel(val, qproc->reg_base + QDSP6SS_CORE_CBCR);
  514. /* De-assert the Q6 stop core signal */
  515. writel(1, qproc->reg_base + QDSP6SS_BOOT_CORE_START);
  516. /* Wait for 10 us for any staggering logic to settle */
  517. usleep_range(10, 20);
  518. /* Trigger the boot FSM to start the Q6 out-of-reset sequence */
  519. writel(1, qproc->reg_base + QDSP6SS_BOOT_CMD);
  520. /* Poll the MSS_STATUS for FSM completion */
  521. ret = readl_poll_timeout(qproc->rmb_base + RMB_MBA_MSS_STATUS,
  522. val, (val & BIT(0)) != 0, 10, BOOT_FSM_TIMEOUT);
  523. if (ret) {
  524. dev_err(qproc->dev, "Boot FSM failed to complete.\n");
  525. /* Reset the modem so that boot FSM is in reset state */
  526. q6v5_reset_deassert(qproc);
  527. return ret;
  528. }
  529. goto pbl_wait;
  530. } else if (qproc->version == MSS_MSM8996 ||
  531. qproc->version == MSS_MSM8998) {
  532. int mem_pwr_ctl;
  533. /* Override the ACC value if required */
  534. writel(QDSP6SS_ACC_OVERRIDE_VAL,
  535. qproc->reg_base + QDSP6SS_STRAP_ACC);
  536. /* Assert resets, stop core */
  537. val = readl(qproc->reg_base + QDSP6SS_RESET_REG);
  538. val |= Q6SS_CORE_ARES | Q6SS_BUS_ARES_ENABLE | Q6SS_STOP_CORE;
  539. writel(val, qproc->reg_base + QDSP6SS_RESET_REG);
  540. /* BHS require xo cbcr to be enabled */
  541. val = readl(qproc->reg_base + QDSP6SS_XO_CBCR);
  542. val |= Q6SS_CBCR_CLKEN;
  543. writel(val, qproc->reg_base + QDSP6SS_XO_CBCR);
  544. /* Read CLKOFF bit to go low indicating CLK is enabled */
  545. ret = readl_poll_timeout(qproc->reg_base + QDSP6SS_XO_CBCR,
  546. val, !(val & Q6SS_CBCR_CLKOFF), 1,
  547. Q6SS_CBCR_TIMEOUT_US);
  548. if (ret) {
  549. dev_err(qproc->dev,
  550. "xo cbcr enabling timed out (rc:%d)\n", ret);
  551. return ret;
  552. }
  553. /* Enable power block headswitch and wait for it to stabilize */
  554. val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
  555. val |= QDSP6v56_BHS_ON;
  556. writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
  557. val |= readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
  558. udelay(1);
  559. /* Put LDO in bypass mode */
  560. val |= QDSP6v56_LDO_BYP;
  561. writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
  562. /* Deassert QDSP6 compiler memory clamp */
  563. val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
  564. val &= ~QDSP6v56_CLAMP_QMC_MEM;
  565. writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
  566. /* Deassert memory peripheral sleep and L2 memory standby */
  567. val |= Q6SS_L2DATA_STBY_N | Q6SS_SLP_RET_N;
  568. writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
  569. /* Turn on L1, L2, ETB and JU memories 1 at a time */
  570. if (qproc->version == MSS_MSM8996) {
  571. mem_pwr_ctl = QDSP6SS_MEM_PWR_CTL;
  572. i = 19;
  573. } else {
  574. /* MSS_MSM8998 */
  575. mem_pwr_ctl = QDSP6V6SS_MEM_PWR_CTL;
  576. i = 28;
  577. }
  578. val = readl(qproc->reg_base + mem_pwr_ctl);
  579. for (; i >= 0; i--) {
  580. val |= BIT(i);
  581. writel(val, qproc->reg_base + mem_pwr_ctl);
  582. /*
  583. * Read back value to ensure the write is done then
  584. * wait for 1us for both memory peripheral and data
  585. * array to turn on.
  586. */
  587. val |= readl(qproc->reg_base + mem_pwr_ctl);
  588. udelay(1);
  589. }
  590. /* Remove word line clamp */
  591. val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
  592. val &= ~QDSP6v56_CLAMP_WL;
  593. writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
  594. } else {
  595. /* Assert resets, stop core */
  596. val = readl(qproc->reg_base + QDSP6SS_RESET_REG);
  597. val |= Q6SS_CORE_ARES | Q6SS_BUS_ARES_ENABLE | Q6SS_STOP_CORE;
  598. writel(val, qproc->reg_base + QDSP6SS_RESET_REG);
  599. /* Enable power block headswitch and wait for it to stabilize */
  600. val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
  601. val |= QDSS_BHS_ON | QDSS_LDO_BYP;
  602. writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
  603. val |= readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
  604. udelay(1);
  605. /*
  606. * Turn on memories. L2 banks should be done individually
  607. * to minimize inrush current.
  608. */
  609. val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
  610. val |= Q6SS_SLP_RET_N | Q6SS_L2TAG_SLP_NRET_N |
  611. Q6SS_ETB_SLP_NRET_N | Q6SS_L2DATA_STBY_N;
  612. writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
  613. val |= Q6SS_L2DATA_SLP_NRET_N_2;
  614. writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
  615. val |= Q6SS_L2DATA_SLP_NRET_N_1;
  616. writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
  617. val |= Q6SS_L2DATA_SLP_NRET_N_0;
  618. writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
  619. }
  620. /* Remove IO clamp */
  621. val &= ~Q6SS_CLAMP_IO;
  622. writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
  623. /* Bring core out of reset */
  624. val = readl(qproc->reg_base + QDSP6SS_RESET_REG);
  625. val &= ~Q6SS_CORE_ARES;
  626. writel(val, qproc->reg_base + QDSP6SS_RESET_REG);
  627. /* Turn on core clock */
  628. val = readl(qproc->reg_base + QDSP6SS_GFMUX_CTL_REG);
  629. val |= Q6SS_CLK_ENABLE;
  630. writel(val, qproc->reg_base + QDSP6SS_GFMUX_CTL_REG);
  631. /* Start core execution */
  632. val = readl(qproc->reg_base + QDSP6SS_RESET_REG);
  633. val &= ~Q6SS_STOP_CORE;
  634. writel(val, qproc->reg_base + QDSP6SS_RESET_REG);
  635. pbl_wait:
  636. /* Wait for PBL status */
  637. ret = q6v5_rmb_pbl_wait(qproc, 1000);
  638. if (ret == -ETIMEDOUT) {
  639. dev_err(qproc->dev, "PBL boot timed out\n");
  640. } else if (ret != RMB_PBL_SUCCESS) {
  641. dev_err(qproc->dev, "PBL returned unexpected status %d\n", ret);
  642. ret = -EINVAL;
  643. } else {
  644. ret = 0;
  645. }
  646. return ret;
  647. }
  648. static void q6v5proc_halt_axi_port(struct q6v5 *qproc,
  649. struct regmap *halt_map,
  650. u32 offset)
  651. {
  652. unsigned int val;
  653. int ret;
  654. /* Check if we're already idle */
  655. ret = regmap_read(halt_map, offset + AXI_IDLE_REG, &val);
  656. if (!ret && val)
  657. return;
  658. /* Assert halt request */
  659. regmap_write(halt_map, offset + AXI_HALTREQ_REG, 1);
  660. /* Wait for halt */
  661. regmap_read_poll_timeout(halt_map, offset + AXI_HALTACK_REG, val,
  662. val, 1000, HALT_ACK_TIMEOUT_US);
  663. ret = regmap_read(halt_map, offset + AXI_IDLE_REG, &val);
  664. if (ret || !val)
  665. dev_err(qproc->dev, "port failed halt\n");
  666. /* Clear halt request (port will remain halted until reset) */
  667. regmap_write(halt_map, offset + AXI_HALTREQ_REG, 0);
  668. }
  669. static int q6v5_mpss_init_image(struct q6v5 *qproc, const struct firmware *fw)
  670. {
  671. unsigned long dma_attrs = DMA_ATTR_FORCE_CONTIGUOUS;
  672. dma_addr_t phys;
  673. void *metadata;
  674. u64 mdata_perm;
  675. int xferop_ret;
  676. size_t size;
  677. void *ptr;
  678. int ret;
  679. metadata = qcom_mdt_read_metadata(qproc->dev, fw, qproc->hexagon_mdt_image,
  680. &size, false, NULL);
  681. if (IS_ERR(metadata))
  682. return PTR_ERR(metadata);
  683. if (qproc->mdata_phys) {
  684. if (size > qproc->mdata_size) {
  685. ret = -EINVAL;
  686. dev_err(qproc->dev, "metadata size outside memory range\n");
  687. goto free_metadata;
  688. }
  689. phys = qproc->mdata_phys;
  690. ptr = memremap(qproc->mdata_phys, size, MEMREMAP_WC);
  691. if (!ptr) {
  692. ret = -EBUSY;
  693. dev_err(qproc->dev, "unable to map memory region: %pa+%zx\n",
  694. &qproc->mdata_phys, size);
  695. goto free_metadata;
  696. }
  697. } else {
  698. ptr = dma_alloc_attrs(qproc->dev, size, &phys, GFP_KERNEL, dma_attrs);
  699. if (!ptr) {
  700. ret = -ENOMEM;
  701. dev_err(qproc->dev, "failed to allocate mdt buffer\n");
  702. goto free_metadata;
  703. }
  704. }
  705. memcpy(ptr, metadata, size);
  706. if (qproc->mdata_phys)
  707. memunmap(ptr);
  708. /* Hypervisor mapping to access metadata by modem */
  709. mdata_perm = BIT(QCOM_SCM_VMID_HLOS);
  710. ret = q6v5_xfer_mem_ownership(qproc, &mdata_perm, false, true,
  711. phys, size);
  712. if (ret) {
  713. dev_err(qproc->dev,
  714. "assigning Q6 access to metadata failed: %d\n", ret);
  715. ret = -EAGAIN;
  716. goto free_dma_attrs;
  717. }
  718. writel(phys, qproc->rmb_base + RMB_PMI_META_DATA_REG);
  719. writel(RMB_CMD_META_DATA_READY, qproc->rmb_base + RMB_MBA_COMMAND_REG);
  720. ret = q6v5_rmb_mba_wait(qproc, RMB_MBA_META_DATA_AUTH_SUCCESS, 1000);
  721. if (ret == -ETIMEDOUT)
  722. dev_err(qproc->dev, "MPSS header authentication timed out\n");
  723. else if (ret < 0)
  724. dev_err(qproc->dev, "MPSS header authentication failed: %d\n", ret);
  725. /* Metadata authentication done, remove modem access */
  726. xferop_ret = q6v5_xfer_mem_ownership(qproc, &mdata_perm, true, false,
  727. phys, size);
  728. if (xferop_ret)
  729. dev_warn(qproc->dev,
  730. "mdt buffer not reclaimed system may become unstable\n");
  731. free_dma_attrs:
  732. if (!qproc->mdata_phys)
  733. dma_free_attrs(qproc->dev, size, ptr, phys, dma_attrs);
  734. free_metadata:
  735. kfree(metadata);
  736. return ret < 0 ? ret : 0;
  737. }
  738. static bool q6v5_phdr_valid(const struct elf32_phdr *phdr)
  739. {
  740. if (phdr->p_type != PT_LOAD)
  741. return false;
  742. if ((phdr->p_flags & QCOM_MDT_TYPE_MASK) == QCOM_MDT_TYPE_HASH)
  743. return false;
  744. if (!phdr->p_memsz)
  745. return false;
  746. return true;
  747. }
  748. static int q6v5_mba_load(struct q6v5 *qproc)
  749. {
  750. int ret;
  751. int xfermemop_ret;
  752. bool mba_load_err = false;
  753. qcom_q6v5_prepare(&qproc->q6v5);
  754. ret = q6v5_pds_enable(qproc, qproc->active_pds, qproc->active_pd_count);
  755. if (ret < 0) {
  756. dev_err(qproc->dev, "failed to enable active power domains\n");
  757. goto disable_irqs;
  758. }
  759. ret = q6v5_pds_enable(qproc, qproc->proxy_pds, qproc->proxy_pd_count);
  760. if (ret < 0) {
  761. dev_err(qproc->dev, "failed to enable proxy power domains\n");
  762. goto disable_active_pds;
  763. }
  764. ret = q6v5_regulator_enable(qproc, qproc->proxy_regs,
  765. qproc->proxy_reg_count);
  766. if (ret) {
  767. dev_err(qproc->dev, "failed to enable proxy supplies\n");
  768. goto disable_proxy_pds;
  769. }
  770. ret = q6v5_clk_enable(qproc->dev, qproc->proxy_clks,
  771. qproc->proxy_clk_count);
  772. if (ret) {
  773. dev_err(qproc->dev, "failed to enable proxy clocks\n");
  774. goto disable_proxy_reg;
  775. }
  776. ret = q6v5_regulator_enable(qproc, qproc->active_regs,
  777. qproc->active_reg_count);
  778. if (ret) {
  779. dev_err(qproc->dev, "failed to enable supplies\n");
  780. goto disable_proxy_clk;
  781. }
  782. ret = q6v5_clk_enable(qproc->dev, qproc->reset_clks,
  783. qproc->reset_clk_count);
  784. if (ret) {
  785. dev_err(qproc->dev, "failed to enable reset clocks\n");
  786. goto disable_vdd;
  787. }
  788. ret = q6v5_reset_deassert(qproc);
  789. if (ret) {
  790. dev_err(qproc->dev, "failed to deassert mss restart\n");
  791. goto disable_reset_clks;
  792. }
  793. ret = q6v5_clk_enable(qproc->dev, qproc->active_clks,
  794. qproc->active_clk_count);
  795. if (ret) {
  796. dev_err(qproc->dev, "failed to enable clocks\n");
  797. goto assert_reset;
  798. }
  799. /*
  800. * Some versions of the MBA firmware will upon boot wipe the MPSS region as well, so provide
  801. * the Q6 access to this region.
  802. */
  803. ret = q6v5_xfer_mem_ownership(qproc, &qproc->mpss_perm, false, true,
  804. qproc->mpss_phys, qproc->mpss_size);
  805. if (ret) {
  806. dev_err(qproc->dev, "assigning Q6 access to mpss memory failed: %d\n", ret);
  807. goto disable_active_clks;
  808. }
  809. /* Assign MBA image access in DDR to q6 */
  810. ret = q6v5_xfer_mem_ownership(qproc, &qproc->mba_perm, false, true,
  811. qproc->mba_phys, qproc->mba_size);
  812. if (ret) {
  813. dev_err(qproc->dev,
  814. "assigning Q6 access to mba memory failed: %d\n", ret);
  815. goto disable_active_clks;
  816. }
  817. writel(qproc->mba_phys, qproc->rmb_base + RMB_MBA_IMAGE_REG);
  818. if (qproc->dp_size) {
  819. writel(qproc->mba_phys + SZ_1M, qproc->rmb_base + RMB_PMI_CODE_START_REG);
  820. writel(qproc->dp_size, qproc->rmb_base + RMB_PMI_CODE_LENGTH_REG);
  821. }
  822. ret = q6v5proc_reset(qproc);
  823. if (ret)
  824. goto reclaim_mba;
  825. if (qproc->has_mba_logs)
  826. qcom_pil_info_store("mba", qproc->mba_phys, MBA_LOG_SIZE);
  827. ret = q6v5_rmb_mba_wait(qproc, 0, 5000);
  828. if (ret == -ETIMEDOUT) {
  829. dev_err(qproc->dev, "MBA boot timed out\n");
  830. goto halt_axi_ports;
  831. } else if (ret != RMB_MBA_XPU_UNLOCKED &&
  832. ret != RMB_MBA_XPU_UNLOCKED_SCRIBBLED) {
  833. dev_err(qproc->dev, "MBA returned unexpected status %d\n", ret);
  834. ret = -EINVAL;
  835. goto halt_axi_ports;
  836. }
  837. qproc->dump_mba_loaded = true;
  838. return 0;
  839. halt_axi_ports:
  840. q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_q6);
  841. q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_modem);
  842. q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_nc);
  843. mba_load_err = true;
  844. reclaim_mba:
  845. xfermemop_ret = q6v5_xfer_mem_ownership(qproc, &qproc->mba_perm, true,
  846. false, qproc->mba_phys,
  847. qproc->mba_size);
  848. if (xfermemop_ret) {
  849. dev_err(qproc->dev,
  850. "Failed to reclaim mba buffer, system may become unstable\n");
  851. } else if (mba_load_err) {
  852. q6v5_dump_mba_logs(qproc);
  853. }
  854. disable_active_clks:
  855. q6v5_clk_disable(qproc->dev, qproc->active_clks,
  856. qproc->active_clk_count);
  857. assert_reset:
  858. q6v5_reset_assert(qproc);
  859. disable_reset_clks:
  860. q6v5_clk_disable(qproc->dev, qproc->reset_clks,
  861. qproc->reset_clk_count);
  862. disable_vdd:
  863. q6v5_regulator_disable(qproc, qproc->active_regs,
  864. qproc->active_reg_count);
  865. disable_proxy_clk:
  866. q6v5_clk_disable(qproc->dev, qproc->proxy_clks,
  867. qproc->proxy_clk_count);
  868. disable_proxy_reg:
  869. q6v5_regulator_disable(qproc, qproc->proxy_regs,
  870. qproc->proxy_reg_count);
  871. disable_proxy_pds:
  872. q6v5_pds_disable(qproc, qproc->proxy_pds, qproc->proxy_pd_count);
  873. disable_active_pds:
  874. q6v5_pds_disable(qproc, qproc->active_pds, qproc->active_pd_count);
  875. disable_irqs:
  876. qcom_q6v5_unprepare(&qproc->q6v5);
  877. return ret;
  878. }
  879. static void q6v5_mba_reclaim(struct q6v5 *qproc)
  880. {
  881. int ret;
  882. u32 val;
  883. qproc->dump_mba_loaded = false;
  884. qproc->dp_size = 0;
  885. q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_q6);
  886. q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_modem);
  887. q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_nc);
  888. if (qproc->version == MSS_MSM8996) {
  889. /*
  890. * To avoid high MX current during LPASS/MSS restart.
  891. */
  892. val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
  893. val |= Q6SS_CLAMP_IO | QDSP6v56_CLAMP_WL |
  894. QDSP6v56_CLAMP_QMC_MEM;
  895. writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
  896. }
  897. q6v5_reset_assert(qproc);
  898. q6v5_clk_disable(qproc->dev, qproc->reset_clks,
  899. qproc->reset_clk_count);
  900. q6v5_clk_disable(qproc->dev, qproc->active_clks,
  901. qproc->active_clk_count);
  902. q6v5_regulator_disable(qproc, qproc->active_regs,
  903. qproc->active_reg_count);
  904. q6v5_pds_disable(qproc, qproc->active_pds, qproc->active_pd_count);
  905. /* In case of failure or coredump scenario where reclaiming MBA memory
  906. * could not happen reclaim it here.
  907. */
  908. ret = q6v5_xfer_mem_ownership(qproc, &qproc->mba_perm, true, false,
  909. qproc->mba_phys,
  910. qproc->mba_size);
  911. WARN_ON(ret);
  912. ret = qcom_q6v5_unprepare(&qproc->q6v5);
  913. if (ret) {
  914. q6v5_pds_disable(qproc, qproc->proxy_pds,
  915. qproc->proxy_pd_count);
  916. q6v5_clk_disable(qproc->dev, qproc->proxy_clks,
  917. qproc->proxy_clk_count);
  918. q6v5_regulator_disable(qproc, qproc->proxy_regs,
  919. qproc->proxy_reg_count);
  920. }
  921. }
  922. static int q6v5_reload_mba(struct rproc *rproc)
  923. {
  924. struct q6v5 *qproc = rproc->priv;
  925. const struct firmware *fw;
  926. int ret;
  927. ret = request_firmware(&fw, rproc->firmware, qproc->dev);
  928. if (ret < 0)
  929. return ret;
  930. q6v5_load(rproc, fw);
  931. ret = q6v5_mba_load(qproc);
  932. release_firmware(fw);
  933. return ret;
  934. }
  935. static int q6v5_mpss_load(struct q6v5 *qproc)
  936. {
  937. const struct elf32_phdr *phdrs;
  938. const struct elf32_phdr *phdr;
  939. const struct firmware *seg_fw;
  940. const struct firmware *fw;
  941. struct elf32_hdr *ehdr;
  942. phys_addr_t mpss_reloc;
  943. phys_addr_t boot_addr;
  944. phys_addr_t min_addr = PHYS_ADDR_MAX;
  945. phys_addr_t max_addr = 0;
  946. u32 code_length;
  947. bool relocate = false;
  948. char *fw_name;
  949. size_t fw_name_len;
  950. ssize_t offset;
  951. size_t size = 0;
  952. void *ptr;
  953. int ret;
  954. int i;
  955. fw_name_len = strlen(qproc->hexagon_mdt_image);
  956. if (fw_name_len <= 4)
  957. return -EINVAL;
  958. fw_name = kstrdup(qproc->hexagon_mdt_image, GFP_KERNEL);
  959. if (!fw_name)
  960. return -ENOMEM;
  961. ret = request_firmware(&fw, fw_name, qproc->dev);
  962. if (ret < 0) {
  963. dev_err(qproc->dev, "unable to load %s\n", fw_name);
  964. goto out;
  965. }
  966. /* Initialize the RMB validator */
  967. writel(0, qproc->rmb_base + RMB_PMI_CODE_LENGTH_REG);
  968. ret = q6v5_mpss_init_image(qproc, fw);
  969. if (ret)
  970. goto release_firmware;
  971. ehdr = (struct elf32_hdr *)fw->data;
  972. phdrs = (struct elf32_phdr *)(ehdr + 1);
  973. for (i = 0; i < ehdr->e_phnum; i++) {
  974. phdr = &phdrs[i];
  975. if (!q6v5_phdr_valid(phdr))
  976. continue;
  977. if (phdr->p_flags & QCOM_MDT_RELOCATABLE)
  978. relocate = true;
  979. if (phdr->p_paddr < min_addr)
  980. min_addr = phdr->p_paddr;
  981. if (phdr->p_paddr + phdr->p_memsz > max_addr)
  982. max_addr = ALIGN(phdr->p_paddr + phdr->p_memsz, SZ_4K);
  983. }
  984. /*
  985. * In case of a modem subsystem restart on secure devices, the modem
  986. * memory can be reclaimed only after MBA is loaded.
  987. */
  988. q6v5_xfer_mem_ownership(qproc, &qproc->mpss_perm, true, false,
  989. qproc->mpss_phys, qproc->mpss_size);
  990. /* Share ownership between Linux and MSS, during segment loading */
  991. ret = q6v5_xfer_mem_ownership(qproc, &qproc->mpss_perm, true, true,
  992. qproc->mpss_phys, qproc->mpss_size);
  993. if (ret) {
  994. dev_err(qproc->dev,
  995. "assigning Q6 access to mpss memory failed: %d\n", ret);
  996. ret = -EAGAIN;
  997. goto release_firmware;
  998. }
  999. mpss_reloc = relocate ? min_addr : qproc->mpss_phys;
  1000. qproc->mpss_reloc = mpss_reloc;
  1001. /* Load firmware segments */
  1002. for (i = 0; i < ehdr->e_phnum; i++) {
  1003. phdr = &phdrs[i];
  1004. if (!q6v5_phdr_valid(phdr))
  1005. continue;
  1006. offset = phdr->p_paddr - mpss_reloc;
  1007. if (offset < 0 || offset + phdr->p_memsz > qproc->mpss_size) {
  1008. dev_err(qproc->dev, "segment outside memory range\n");
  1009. ret = -EINVAL;
  1010. goto release_firmware;
  1011. }
  1012. if (phdr->p_filesz > phdr->p_memsz) {
  1013. dev_err(qproc->dev,
  1014. "refusing to load segment %d with p_filesz > p_memsz\n",
  1015. i);
  1016. ret = -EINVAL;
  1017. goto release_firmware;
  1018. }
  1019. ptr = memremap(qproc->mpss_phys + offset, phdr->p_memsz, MEMREMAP_WC);
  1020. if (!ptr) {
  1021. dev_err(qproc->dev,
  1022. "unable to map memory region: %pa+%zx-%x\n",
  1023. &qproc->mpss_phys, offset, phdr->p_memsz);
  1024. goto release_firmware;
  1025. }
  1026. if (phdr->p_filesz && phdr->p_offset < fw->size) {
  1027. /* Firmware is large enough to be non-split */
  1028. if (phdr->p_offset + phdr->p_filesz > fw->size) {
  1029. dev_err(qproc->dev,
  1030. "failed to load segment %d from truncated file %s\n",
  1031. i, fw_name);
  1032. ret = -EINVAL;
  1033. memunmap(ptr);
  1034. goto release_firmware;
  1035. }
  1036. memcpy(ptr, fw->data + phdr->p_offset, phdr->p_filesz);
  1037. } else if (phdr->p_filesz) {
  1038. /* Replace "xxx.xxx" with "xxx.bxx" */
  1039. sprintf(fw_name + fw_name_len - 3, "b%02d", i);
  1040. ret = request_firmware_into_buf(&seg_fw, fw_name, qproc->dev,
  1041. ptr, phdr->p_filesz);
  1042. if (ret) {
  1043. dev_err(qproc->dev, "failed to load %s\n", fw_name);
  1044. memunmap(ptr);
  1045. goto release_firmware;
  1046. }
  1047. if (seg_fw->size != phdr->p_filesz) {
  1048. dev_err(qproc->dev,
  1049. "failed to load segment %d from truncated file %s\n",
  1050. i, fw_name);
  1051. ret = -EINVAL;
  1052. release_firmware(seg_fw);
  1053. memunmap(ptr);
  1054. goto release_firmware;
  1055. }
  1056. release_firmware(seg_fw);
  1057. }
  1058. if (phdr->p_memsz > phdr->p_filesz) {
  1059. memset(ptr + phdr->p_filesz, 0,
  1060. phdr->p_memsz - phdr->p_filesz);
  1061. }
  1062. memunmap(ptr);
  1063. size += phdr->p_memsz;
  1064. code_length = readl(qproc->rmb_base + RMB_PMI_CODE_LENGTH_REG);
  1065. if (!code_length) {
  1066. boot_addr = relocate ? qproc->mpss_phys : min_addr;
  1067. writel(boot_addr, qproc->rmb_base + RMB_PMI_CODE_START_REG);
  1068. writel(RMB_CMD_LOAD_READY, qproc->rmb_base + RMB_MBA_COMMAND_REG);
  1069. }
  1070. writel(size, qproc->rmb_base + RMB_PMI_CODE_LENGTH_REG);
  1071. ret = readl(qproc->rmb_base + RMB_MBA_STATUS_REG);
  1072. if (ret < 0) {
  1073. dev_err(qproc->dev, "MPSS authentication failed: %d\n",
  1074. ret);
  1075. goto release_firmware;
  1076. }
  1077. }
  1078. /* Transfer ownership of modem ddr region to q6 */
  1079. ret = q6v5_xfer_mem_ownership(qproc, &qproc->mpss_perm, false, true,
  1080. qproc->mpss_phys, qproc->mpss_size);
  1081. if (ret) {
  1082. dev_err(qproc->dev,
  1083. "assigning Q6 access to mpss memory failed: %d\n", ret);
  1084. ret = -EAGAIN;
  1085. goto release_firmware;
  1086. }
  1087. ret = q6v5_rmb_mba_wait(qproc, RMB_MBA_AUTH_COMPLETE, 10000);
  1088. if (ret == -ETIMEDOUT)
  1089. dev_err(qproc->dev, "MPSS authentication timed out\n");
  1090. else if (ret < 0)
  1091. dev_err(qproc->dev, "MPSS authentication failed: %d\n", ret);
  1092. qcom_pil_info_store("modem", qproc->mpss_phys, qproc->mpss_size);
  1093. release_firmware:
  1094. release_firmware(fw);
  1095. out:
  1096. kfree(fw_name);
  1097. return ret < 0 ? ret : 0;
  1098. }
  1099. static void qcom_q6v5_dump_segment(struct rproc *rproc,
  1100. struct rproc_dump_segment *segment,
  1101. void *dest, size_t cp_offset, size_t size)
  1102. {
  1103. int ret = 0;
  1104. struct q6v5 *qproc = rproc->priv;
  1105. int offset = segment->da - qproc->mpss_reloc;
  1106. void *ptr = NULL;
  1107. /* Unlock mba before copying segments */
  1108. if (!qproc->dump_mba_loaded) {
  1109. ret = q6v5_reload_mba(rproc);
  1110. if (!ret) {
  1111. /* Reset ownership back to Linux to copy segments */
  1112. ret = q6v5_xfer_mem_ownership(qproc, &qproc->mpss_perm,
  1113. true, false,
  1114. qproc->mpss_phys,
  1115. qproc->mpss_size);
  1116. }
  1117. }
  1118. if (!ret)
  1119. ptr = memremap(qproc->mpss_phys + offset + cp_offset, size, MEMREMAP_WC);
  1120. if (ptr) {
  1121. memcpy(dest, ptr, size);
  1122. memunmap(ptr);
  1123. } else {
  1124. memset(dest, 0xff, size);
  1125. }
  1126. qproc->current_dump_size += size;
  1127. /* Reclaim mba after copying segments */
  1128. if (qproc->current_dump_size == qproc->total_dump_size) {
  1129. if (qproc->dump_mba_loaded) {
  1130. /* Try to reset ownership back to Q6 */
  1131. q6v5_xfer_mem_ownership(qproc, &qproc->mpss_perm,
  1132. false, true,
  1133. qproc->mpss_phys,
  1134. qproc->mpss_size);
  1135. q6v5_mba_reclaim(qproc);
  1136. }
  1137. }
  1138. }
  1139. static int q6v5_start(struct rproc *rproc)
  1140. {
  1141. struct q6v5 *qproc = (struct q6v5 *)rproc->priv;
  1142. int xfermemop_ret;
  1143. int ret;
  1144. ret = q6v5_mba_load(qproc);
  1145. if (ret)
  1146. return ret;
  1147. dev_info(qproc->dev, "MBA booted with%s debug policy, loading mpss\n",
  1148. qproc->dp_size ? "" : "out");
  1149. ret = q6v5_mpss_load(qproc);
  1150. if (ret)
  1151. goto reclaim_mpss;
  1152. ret = qcom_q6v5_wait_for_start(&qproc->q6v5, msecs_to_jiffies(5000));
  1153. if (ret == -ETIMEDOUT) {
  1154. dev_err(qproc->dev, "start timed out\n");
  1155. goto reclaim_mpss;
  1156. }
  1157. xfermemop_ret = q6v5_xfer_mem_ownership(qproc, &qproc->mba_perm, true,
  1158. false, qproc->mba_phys,
  1159. qproc->mba_size);
  1160. if (xfermemop_ret)
  1161. dev_err(qproc->dev,
  1162. "Failed to reclaim mba buffer system may become unstable\n");
  1163. /* Reset Dump Segment Mask */
  1164. qproc->current_dump_size = 0;
  1165. return 0;
  1166. reclaim_mpss:
  1167. q6v5_mba_reclaim(qproc);
  1168. q6v5_dump_mba_logs(qproc);
  1169. return ret;
  1170. }
  1171. static int q6v5_stop(struct rproc *rproc)
  1172. {
  1173. struct q6v5 *qproc = (struct q6v5 *)rproc->priv;
  1174. int ret;
  1175. ret = qcom_q6v5_request_stop(&qproc->q6v5, qproc->sysmon);
  1176. if (ret == -ETIMEDOUT)
  1177. dev_err(qproc->dev, "timed out on wait\n");
  1178. q6v5_mba_reclaim(qproc);
  1179. return 0;
  1180. }
  1181. static int qcom_q6v5_register_dump_segments(struct rproc *rproc,
  1182. const struct firmware *mba_fw)
  1183. {
  1184. const struct firmware *fw;
  1185. const struct elf32_phdr *phdrs;
  1186. const struct elf32_phdr *phdr;
  1187. const struct elf32_hdr *ehdr;
  1188. struct q6v5 *qproc = rproc->priv;
  1189. unsigned long i;
  1190. int ret;
  1191. ret = request_firmware(&fw, qproc->hexagon_mdt_image, qproc->dev);
  1192. if (ret < 0) {
  1193. dev_err(qproc->dev, "unable to load %s\n",
  1194. qproc->hexagon_mdt_image);
  1195. return ret;
  1196. }
  1197. rproc_coredump_set_elf_info(rproc, ELFCLASS32, EM_NONE);
  1198. ehdr = (struct elf32_hdr *)fw->data;
  1199. phdrs = (struct elf32_phdr *)(ehdr + 1);
  1200. qproc->total_dump_size = 0;
  1201. for (i = 0; i < ehdr->e_phnum; i++) {
  1202. phdr = &phdrs[i];
  1203. if (!q6v5_phdr_valid(phdr))
  1204. continue;
  1205. ret = rproc_coredump_add_custom_segment(rproc, phdr->p_paddr,
  1206. phdr->p_memsz,
  1207. qcom_q6v5_dump_segment,
  1208. NULL);
  1209. if (ret)
  1210. break;
  1211. qproc->total_dump_size += phdr->p_memsz;
  1212. }
  1213. release_firmware(fw);
  1214. return ret;
  1215. }
  1216. static unsigned long q6v5_panic(struct rproc *rproc)
  1217. {
  1218. struct q6v5 *qproc = (struct q6v5 *)rproc->priv;
  1219. return qcom_q6v5_panic(&qproc->q6v5);
  1220. }
  1221. static const struct rproc_ops q6v5_ops = {
  1222. .start = q6v5_start,
  1223. .stop = q6v5_stop,
  1224. .parse_fw = qcom_q6v5_register_dump_segments,
  1225. .load = q6v5_load,
  1226. .panic = q6v5_panic,
  1227. };
  1228. static void qcom_msa_handover(struct qcom_q6v5 *q6v5)
  1229. {
  1230. struct q6v5 *qproc = container_of(q6v5, struct q6v5, q6v5);
  1231. q6v5_clk_disable(qproc->dev, qproc->proxy_clks,
  1232. qproc->proxy_clk_count);
  1233. q6v5_regulator_disable(qproc, qproc->proxy_regs,
  1234. qproc->proxy_reg_count);
  1235. q6v5_pds_disable(qproc, qproc->proxy_pds, qproc->proxy_pd_count);
  1236. }
  1237. static int q6v5_init_mem(struct q6v5 *qproc, struct platform_device *pdev)
  1238. {
  1239. struct of_phandle_args args;
  1240. struct resource *res;
  1241. int ret;
  1242. res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "qdsp6");
  1243. qproc->reg_base = devm_ioremap_resource(&pdev->dev, res);
  1244. if (IS_ERR(qproc->reg_base))
  1245. return PTR_ERR(qproc->reg_base);
  1246. res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "rmb");
  1247. qproc->rmb_base = devm_ioremap_resource(&pdev->dev, res);
  1248. if (IS_ERR(qproc->rmb_base))
  1249. return PTR_ERR(qproc->rmb_base);
  1250. ret = of_parse_phandle_with_fixed_args(pdev->dev.of_node,
  1251. "qcom,halt-regs", 3, 0, &args);
  1252. if (ret < 0) {
  1253. dev_err(&pdev->dev, "failed to parse qcom,halt-regs\n");
  1254. return -EINVAL;
  1255. }
  1256. qproc->halt_map = syscon_node_to_regmap(args.np);
  1257. of_node_put(args.np);
  1258. if (IS_ERR(qproc->halt_map))
  1259. return PTR_ERR(qproc->halt_map);
  1260. qproc->halt_q6 = args.args[0];
  1261. qproc->halt_modem = args.args[1];
  1262. qproc->halt_nc = args.args[2];
  1263. if (qproc->has_spare_reg) {
  1264. ret = of_parse_phandle_with_fixed_args(pdev->dev.of_node,
  1265. "qcom,spare-regs",
  1266. 1, 0, &args);
  1267. if (ret < 0) {
  1268. dev_err(&pdev->dev, "failed to parse spare-regs\n");
  1269. return -EINVAL;
  1270. }
  1271. qproc->conn_map = syscon_node_to_regmap(args.np);
  1272. of_node_put(args.np);
  1273. if (IS_ERR(qproc->conn_map))
  1274. return PTR_ERR(qproc->conn_map);
  1275. qproc->conn_box = args.args[0];
  1276. }
  1277. return 0;
  1278. }
  1279. static int q6v5_init_clocks(struct device *dev, struct clk **clks,
  1280. char **clk_names)
  1281. {
  1282. int i;
  1283. if (!clk_names)
  1284. return 0;
  1285. for (i = 0; clk_names[i]; i++) {
  1286. clks[i] = devm_clk_get(dev, clk_names[i]);
  1287. if (IS_ERR(clks[i])) {
  1288. int rc = PTR_ERR(clks[i]);
  1289. if (rc != -EPROBE_DEFER)
  1290. dev_err(dev, "Failed to get %s clock\n",
  1291. clk_names[i]);
  1292. return rc;
  1293. }
  1294. }
  1295. return i;
  1296. }
  1297. static int q6v5_pds_attach(struct device *dev, struct device **devs,
  1298. char **pd_names)
  1299. {
  1300. size_t num_pds = 0;
  1301. int ret;
  1302. int i;
  1303. if (!pd_names)
  1304. return 0;
  1305. while (pd_names[num_pds])
  1306. num_pds++;
  1307. for (i = 0; i < num_pds; i++) {
  1308. devs[i] = dev_pm_domain_attach_by_name(dev, pd_names[i]);
  1309. if (IS_ERR_OR_NULL(devs[i])) {
  1310. ret = PTR_ERR(devs[i]) ? : -ENODATA;
  1311. goto unroll_attach;
  1312. }
  1313. }
  1314. return num_pds;
  1315. unroll_attach:
  1316. for (i--; i >= 0; i--)
  1317. dev_pm_domain_detach(devs[i], false);
  1318. return ret;
  1319. }
  1320. static void q6v5_pds_detach(struct q6v5 *qproc, struct device **pds,
  1321. size_t pd_count)
  1322. {
  1323. int i;
  1324. for (i = 0; i < pd_count; i++)
  1325. dev_pm_domain_detach(pds[i], false);
  1326. }
  1327. static int q6v5_init_reset(struct q6v5 *qproc)
  1328. {
  1329. qproc->mss_restart = devm_reset_control_get_exclusive(qproc->dev,
  1330. "mss_restart");
  1331. if (IS_ERR(qproc->mss_restart)) {
  1332. dev_err(qproc->dev, "failed to acquire mss restart\n");
  1333. return PTR_ERR(qproc->mss_restart);
  1334. }
  1335. if (qproc->has_alt_reset || qproc->has_spare_reg) {
  1336. qproc->pdc_reset = devm_reset_control_get_exclusive(qproc->dev,
  1337. "pdc_reset");
  1338. if (IS_ERR(qproc->pdc_reset)) {
  1339. dev_err(qproc->dev, "failed to acquire pdc reset\n");
  1340. return PTR_ERR(qproc->pdc_reset);
  1341. }
  1342. }
  1343. return 0;
  1344. }
  1345. static int q6v5_alloc_memory_region(struct q6v5 *qproc)
  1346. {
  1347. struct device_node *child;
  1348. struct reserved_mem *rmem;
  1349. struct device_node *node;
  1350. struct resource r;
  1351. int ret;
  1352. /*
  1353. * In the absence of mba/mpss sub-child, extract the mba and mpss
  1354. * reserved memory regions from device's memory-region property.
  1355. */
  1356. child = of_get_child_by_name(qproc->dev->of_node, "mba");
  1357. if (!child) {
  1358. node = of_parse_phandle(qproc->dev->of_node,
  1359. "memory-region", 0);
  1360. } else {
  1361. node = of_parse_phandle(child, "memory-region", 0);
  1362. of_node_put(child);
  1363. }
  1364. ret = of_address_to_resource(node, 0, &r);
  1365. of_node_put(node);
  1366. if (ret) {
  1367. dev_err(qproc->dev, "unable to resolve mba region\n");
  1368. return ret;
  1369. }
  1370. qproc->mba_phys = r.start;
  1371. qproc->mba_size = resource_size(&r);
  1372. qproc->mba_region = devm_ioremap_wc(qproc->dev, qproc->mba_phys, qproc->mba_size);
  1373. if (!qproc->mba_region) {
  1374. dev_err(qproc->dev, "unable to map memory region: %pa+%zx\n",
  1375. &r.start, qproc->mba_size);
  1376. return -EBUSY;
  1377. }
  1378. if (!child) {
  1379. node = of_parse_phandle(qproc->dev->of_node,
  1380. "memory-region", 1);
  1381. } else {
  1382. child = of_get_child_by_name(qproc->dev->of_node, "mpss");
  1383. node = of_parse_phandle(child, "memory-region", 0);
  1384. of_node_put(child);
  1385. }
  1386. ret = of_address_to_resource(node, 0, &r);
  1387. of_node_put(node);
  1388. if (ret) {
  1389. dev_err(qproc->dev, "unable to resolve mpss region\n");
  1390. return ret;
  1391. }
  1392. qproc->mpss_phys = qproc->mpss_reloc = r.start;
  1393. qproc->mpss_size = resource_size(&r);
  1394. if (!child) {
  1395. node = of_parse_phandle(qproc->dev->of_node, "memory-region", 2);
  1396. } else {
  1397. child = of_get_child_by_name(qproc->dev->of_node, "metadata");
  1398. node = of_parse_phandle(child, "memory-region", 0);
  1399. of_node_put(child);
  1400. }
  1401. if (!node)
  1402. return 0;
  1403. rmem = of_reserved_mem_lookup(node);
  1404. if (!rmem) {
  1405. dev_err(qproc->dev, "unable to resolve metadata region\n");
  1406. return -EINVAL;
  1407. }
  1408. qproc->mdata_phys = rmem->base;
  1409. qproc->mdata_size = rmem->size;
  1410. return 0;
  1411. }
  1412. static int q6v5_probe(struct platform_device *pdev)
  1413. {
  1414. const struct rproc_hexagon_res *desc;
  1415. struct q6v5 *qproc;
  1416. struct rproc *rproc;
  1417. const char *mba_image;
  1418. int ret;
  1419. desc = of_device_get_match_data(&pdev->dev);
  1420. if (!desc)
  1421. return -EINVAL;
  1422. if (desc->need_mem_protection && !qcom_scm_is_available())
  1423. return -EPROBE_DEFER;
  1424. mba_image = desc->hexagon_mba_image;
  1425. ret = of_property_read_string_index(pdev->dev.of_node, "firmware-name",
  1426. 0, &mba_image);
  1427. if (ret < 0 && ret != -EINVAL)
  1428. return ret;
  1429. rproc = rproc_alloc(&pdev->dev, pdev->name, &q6v5_ops,
  1430. mba_image, sizeof(*qproc));
  1431. if (!rproc) {
  1432. dev_err(&pdev->dev, "failed to allocate rproc\n");
  1433. return -ENOMEM;
  1434. }
  1435. rproc->auto_boot = false;
  1436. rproc_coredump_set_elf_info(rproc, ELFCLASS32, EM_NONE);
  1437. qproc = (struct q6v5 *)rproc->priv;
  1438. qproc->dev = &pdev->dev;
  1439. qproc->rproc = rproc;
  1440. qproc->hexagon_mdt_image = "modem.mdt";
  1441. ret = of_property_read_string_index(pdev->dev.of_node, "firmware-name",
  1442. 1, &qproc->hexagon_mdt_image);
  1443. if (ret < 0 && ret != -EINVAL)
  1444. goto free_rproc;
  1445. platform_set_drvdata(pdev, qproc);
  1446. qproc->has_spare_reg = desc->has_spare_reg;
  1447. ret = q6v5_init_mem(qproc, pdev);
  1448. if (ret)
  1449. goto free_rproc;
  1450. ret = q6v5_alloc_memory_region(qproc);
  1451. if (ret)
  1452. goto free_rproc;
  1453. ret = q6v5_init_clocks(&pdev->dev, qproc->proxy_clks,
  1454. desc->proxy_clk_names);
  1455. if (ret < 0) {
  1456. dev_err(&pdev->dev, "Failed to get proxy clocks.\n");
  1457. goto free_rproc;
  1458. }
  1459. qproc->proxy_clk_count = ret;
  1460. ret = q6v5_init_clocks(&pdev->dev, qproc->reset_clks,
  1461. desc->reset_clk_names);
  1462. if (ret < 0) {
  1463. dev_err(&pdev->dev, "Failed to get reset clocks.\n");
  1464. goto free_rproc;
  1465. }
  1466. qproc->reset_clk_count = ret;
  1467. ret = q6v5_init_clocks(&pdev->dev, qproc->active_clks,
  1468. desc->active_clk_names);
  1469. if (ret < 0) {
  1470. dev_err(&pdev->dev, "Failed to get active clocks.\n");
  1471. goto free_rproc;
  1472. }
  1473. qproc->active_clk_count = ret;
  1474. ret = q6v5_regulator_init(&pdev->dev, qproc->proxy_regs,
  1475. desc->proxy_supply);
  1476. if (ret < 0) {
  1477. dev_err(&pdev->dev, "Failed to get proxy regulators.\n");
  1478. goto free_rproc;
  1479. }
  1480. qproc->proxy_reg_count = ret;
  1481. ret = q6v5_regulator_init(&pdev->dev, qproc->active_regs,
  1482. desc->active_supply);
  1483. if (ret < 0) {
  1484. dev_err(&pdev->dev, "Failed to get active regulators.\n");
  1485. goto free_rproc;
  1486. }
  1487. qproc->active_reg_count = ret;
  1488. ret = q6v5_pds_attach(&pdev->dev, qproc->active_pds,
  1489. desc->active_pd_names);
  1490. if (ret < 0) {
  1491. dev_err(&pdev->dev, "Failed to attach active power domains\n");
  1492. goto free_rproc;
  1493. }
  1494. qproc->active_pd_count = ret;
  1495. ret = q6v5_pds_attach(&pdev->dev, qproc->proxy_pds,
  1496. desc->proxy_pd_names);
  1497. if (ret < 0) {
  1498. dev_err(&pdev->dev, "Failed to init power domains\n");
  1499. goto detach_active_pds;
  1500. }
  1501. qproc->proxy_pd_count = ret;
  1502. qproc->has_alt_reset = desc->has_alt_reset;
  1503. ret = q6v5_init_reset(qproc);
  1504. if (ret)
  1505. goto detach_proxy_pds;
  1506. qproc->version = desc->version;
  1507. qproc->need_mem_protection = desc->need_mem_protection;
  1508. qproc->has_mba_logs = desc->has_mba_logs;
  1509. ret = qcom_q6v5_init(&qproc->q6v5, pdev, rproc, MPSS_CRASH_REASON_SMEM,
  1510. qcom_msa_handover);
  1511. if (ret)
  1512. goto detach_proxy_pds;
  1513. qproc->mpss_perm = BIT(QCOM_SCM_VMID_HLOS);
  1514. qproc->mba_perm = BIT(QCOM_SCM_VMID_HLOS);
  1515. qcom_add_glink_subdev(rproc, &qproc->glink_subdev, "mpss");
  1516. qcom_add_smd_subdev(rproc, &qproc->smd_subdev);
  1517. qcom_add_ssr_subdev(rproc, &qproc->ssr_subdev, "mpss");
  1518. qproc->sysmon = qcom_add_sysmon_subdev(rproc, "modem", 0x12);
  1519. if (IS_ERR(qproc->sysmon)) {
  1520. ret = PTR_ERR(qproc->sysmon);
  1521. goto remove_subdevs;
  1522. }
  1523. ret = rproc_add(rproc);
  1524. if (ret)
  1525. goto remove_sysmon_subdev;
  1526. return 0;
  1527. remove_sysmon_subdev:
  1528. qcom_remove_sysmon_subdev(qproc->sysmon);
  1529. remove_subdevs:
  1530. qcom_remove_ssr_subdev(rproc, &qproc->ssr_subdev);
  1531. qcom_remove_smd_subdev(rproc, &qproc->smd_subdev);
  1532. qcom_remove_glink_subdev(rproc, &qproc->glink_subdev);
  1533. detach_proxy_pds:
  1534. q6v5_pds_detach(qproc, qproc->proxy_pds, qproc->proxy_pd_count);
  1535. detach_active_pds:
  1536. q6v5_pds_detach(qproc, qproc->active_pds, qproc->active_pd_count);
  1537. free_rproc:
  1538. rproc_free(rproc);
  1539. return ret;
  1540. }
  1541. static int q6v5_remove(struct platform_device *pdev)
  1542. {
  1543. struct q6v5 *qproc = platform_get_drvdata(pdev);
  1544. struct rproc *rproc = qproc->rproc;
  1545. rproc_del(rproc);
  1546. qcom_remove_sysmon_subdev(qproc->sysmon);
  1547. qcom_remove_ssr_subdev(rproc, &qproc->ssr_subdev);
  1548. qcom_remove_smd_subdev(rproc, &qproc->smd_subdev);
  1549. qcom_remove_glink_subdev(rproc, &qproc->glink_subdev);
  1550. q6v5_pds_detach(qproc, qproc->proxy_pds, qproc->proxy_pd_count);
  1551. q6v5_pds_detach(qproc, qproc->active_pds, qproc->active_pd_count);
  1552. rproc_free(rproc);
  1553. return 0;
  1554. }
  1555. static const struct rproc_hexagon_res sc7180_mss = {
  1556. .hexagon_mba_image = "mba.mbn",
  1557. .proxy_clk_names = (char*[]){
  1558. "xo",
  1559. NULL
  1560. },
  1561. .reset_clk_names = (char*[]){
  1562. "iface",
  1563. "bus",
  1564. "snoc_axi",
  1565. NULL
  1566. },
  1567. .active_clk_names = (char*[]){
  1568. "mnoc_axi",
  1569. "nav",
  1570. NULL
  1571. },
  1572. .active_pd_names = (char*[]){
  1573. "load_state",
  1574. NULL
  1575. },
  1576. .proxy_pd_names = (char*[]){
  1577. "cx",
  1578. "mx",
  1579. "mss",
  1580. NULL
  1581. },
  1582. .need_mem_protection = true,
  1583. .has_alt_reset = false,
  1584. .has_mba_logs = true,
  1585. .has_spare_reg = true,
  1586. .version = MSS_SC7180,
  1587. };
  1588. static const struct rproc_hexagon_res sdm845_mss = {
  1589. .hexagon_mba_image = "mba.mbn",
  1590. .proxy_clk_names = (char*[]){
  1591. "xo",
  1592. "prng",
  1593. NULL
  1594. },
  1595. .reset_clk_names = (char*[]){
  1596. "iface",
  1597. "snoc_axi",
  1598. NULL
  1599. },
  1600. .active_clk_names = (char*[]){
  1601. "bus",
  1602. "mem",
  1603. "gpll0_mss",
  1604. "mnoc_axi",
  1605. NULL
  1606. },
  1607. .active_pd_names = (char*[]){
  1608. "load_state",
  1609. NULL
  1610. },
  1611. .proxy_pd_names = (char*[]){
  1612. "cx",
  1613. "mx",
  1614. "mss",
  1615. NULL
  1616. },
  1617. .need_mem_protection = true,
  1618. .has_alt_reset = true,
  1619. .has_mba_logs = false,
  1620. .has_spare_reg = false,
  1621. .version = MSS_SDM845,
  1622. };
  1623. static const struct rproc_hexagon_res msm8998_mss = {
  1624. .hexagon_mba_image = "mba.mbn",
  1625. .proxy_clk_names = (char*[]){
  1626. "xo",
  1627. "qdss",
  1628. "mem",
  1629. NULL
  1630. },
  1631. .active_clk_names = (char*[]){
  1632. "iface",
  1633. "bus",
  1634. "gpll0_mss",
  1635. "mnoc_axi",
  1636. "snoc_axi",
  1637. NULL
  1638. },
  1639. .proxy_pd_names = (char*[]){
  1640. "cx",
  1641. "mx",
  1642. NULL
  1643. },
  1644. .need_mem_protection = true,
  1645. .has_alt_reset = false,
  1646. .has_mba_logs = false,
  1647. .has_spare_reg = false,
  1648. .version = MSS_MSM8998,
  1649. };
  1650. static const struct rproc_hexagon_res msm8996_mss = {
  1651. .hexagon_mba_image = "mba.mbn",
  1652. .proxy_supply = (struct qcom_mss_reg_res[]) {
  1653. {
  1654. .supply = "pll",
  1655. .uA = 100000,
  1656. },
  1657. {}
  1658. },
  1659. .proxy_clk_names = (char*[]){
  1660. "xo",
  1661. "pnoc",
  1662. "qdss",
  1663. NULL
  1664. },
  1665. .active_clk_names = (char*[]){
  1666. "iface",
  1667. "bus",
  1668. "mem",
  1669. "gpll0_mss",
  1670. "snoc_axi",
  1671. "mnoc_axi",
  1672. NULL
  1673. },
  1674. .proxy_pd_names = (char*[]){
  1675. "mx",
  1676. "cx",
  1677. NULL
  1678. },
  1679. .need_mem_protection = true,
  1680. .has_alt_reset = false,
  1681. .has_mba_logs = false,
  1682. .has_spare_reg = false,
  1683. .version = MSS_MSM8996,
  1684. };
  1685. static const struct rproc_hexagon_res msm8916_mss = {
  1686. .hexagon_mba_image = "mba.mbn",
  1687. .proxy_supply = (struct qcom_mss_reg_res[]) {
  1688. {
  1689. .supply = "mx",
  1690. .uV = 1050000,
  1691. },
  1692. {
  1693. .supply = "cx",
  1694. .uA = 100000,
  1695. },
  1696. {
  1697. .supply = "pll",
  1698. .uA = 100000,
  1699. },
  1700. {}
  1701. },
  1702. .proxy_clk_names = (char*[]){
  1703. "xo",
  1704. NULL
  1705. },
  1706. .active_clk_names = (char*[]){
  1707. "iface",
  1708. "bus",
  1709. "mem",
  1710. NULL
  1711. },
  1712. .need_mem_protection = false,
  1713. .has_alt_reset = false,
  1714. .has_mba_logs = false,
  1715. .has_spare_reg = false,
  1716. .version = MSS_MSM8916,
  1717. };
  1718. static const struct rproc_hexagon_res msm8974_mss = {
  1719. .hexagon_mba_image = "mba.b00",
  1720. .proxy_supply = (struct qcom_mss_reg_res[]) {
  1721. {
  1722. .supply = "mx",
  1723. .uV = 1050000,
  1724. },
  1725. {
  1726. .supply = "cx",
  1727. .uA = 100000,
  1728. },
  1729. {
  1730. .supply = "pll",
  1731. .uA = 100000,
  1732. },
  1733. {}
  1734. },
  1735. .active_supply = (struct qcom_mss_reg_res[]) {
  1736. {
  1737. .supply = "mss",
  1738. .uV = 1050000,
  1739. .uA = 100000,
  1740. },
  1741. {}
  1742. },
  1743. .proxy_clk_names = (char*[]){
  1744. "xo",
  1745. NULL
  1746. },
  1747. .active_clk_names = (char*[]){
  1748. "iface",
  1749. "bus",
  1750. "mem",
  1751. NULL
  1752. },
  1753. .need_mem_protection = false,
  1754. .has_alt_reset = false,
  1755. .has_mba_logs = false,
  1756. .has_spare_reg = false,
  1757. .version = MSS_MSM8974,
  1758. };
  1759. static const struct of_device_id q6v5_of_match[] = {
  1760. { .compatible = "qcom,q6v5-pil", .data = &msm8916_mss},
  1761. { .compatible = "qcom,msm8916-mss-pil", .data = &msm8916_mss},
  1762. { .compatible = "qcom,msm8974-mss-pil", .data = &msm8974_mss},
  1763. { .compatible = "qcom,msm8996-mss-pil", .data = &msm8996_mss},
  1764. { .compatible = "qcom,msm8998-mss-pil", .data = &msm8998_mss},
  1765. { .compatible = "qcom,sc7180-mss-pil", .data = &sc7180_mss},
  1766. { .compatible = "qcom,sdm845-mss-pil", .data = &sdm845_mss},
  1767. { },
  1768. };
  1769. MODULE_DEVICE_TABLE(of, q6v5_of_match);
  1770. static struct platform_driver q6v5_driver = {
  1771. .probe = q6v5_probe,
  1772. .remove = q6v5_remove,
  1773. .driver = {
  1774. .name = "qcom-q6v5-mss",
  1775. .of_match_table = q6v5_of_match,
  1776. },
  1777. };
  1778. module_platform_driver(q6v5_driver);
  1779. MODULE_DESCRIPTION("Qualcomm Self-authenticating modem remoteproc driver");
  1780. MODULE_LICENSE("GPL v2");