rpmh-rsc.c 52 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright (c) 2016-2018, 2020-2021, The Linux Foundation. All rights reserved.
  4. * Copyright (c) 2022-2024, Qualcomm Innovation Center, Inc. All rights reserved.
  5. */
  6. #define pr_fmt(fmt) "%s " fmt, KBUILD_MODNAME
  7. #include <linux/atomic.h>
  8. #include <linux/cpu_pm.h>
  9. #include <linux/delay.h>
  10. #include <linux/interrupt.h>
  11. #include <linux/io.h>
  12. #include <linux/iopoll.h>
  13. #include <linux/kernel.h>
  14. #include <linux/list.h>
  15. #include <linux/module.h>
  16. #include <linux/notifier.h>
  17. #include <linux/of.h>
  18. #include <linux/of_irq.h>
  19. #include <linux/of_platform.h>
  20. #include <linux/platform_device.h>
  21. #include <linux/pm_domain.h>
  22. #include <linux/pm_runtime.h>
  23. #include <linux/slab.h>
  24. #include <linux/spinlock.h>
  25. #include <linux/wait.h>
  26. #include <soc/qcom/cmd-db.h>
  27. #include <soc/qcom/tcs.h>
  28. #include <soc/qcom/crm.h>
  29. #include <dt-bindings/soc/qcom,rpmh-rsc.h>
  30. #include "rpmh-internal.h"
  31. #define CREATE_TRACE_POINTS
  32. #include "trace-rpmh.h"
  33. #include <linux/ipc_logging.h>
  34. #define RSC_DRV_IPC_LOG_SIZE 2
  35. /* DRV ID Register */
  36. #define RSC_DRV_ID 0
  37. #define MAJOR_VER_MASK 0xFF
  38. #define MAJOR_VER_SHIFT 16
  39. #define MINOR_VER_MASK 0xFF
  40. #define MINOR_VER_SHIFT 8
  41. /* DRV HW Solver Configuration Register Mask */
  42. #define DRV_HW_SOLVER_MASK 1
  43. #define DRV_HW_SOLVER_SHIFT 24
  44. /* DRV TCS Configuration Information Register Mask */
  45. #define DRV_NUM_TCS_MASK 0x3F
  46. #define DRV_NUM_TCS_SHIFT 6
  47. #define DRV_NCPT_MASK 0x1F
  48. #define DRV_NCPT_SHIFT 27
  49. #define TCS_AMC_MODE_ENABLE BIT(16)
  50. #define TCS_AMC_MODE_TRIGGER BIT(24)
  51. /* TCS CMD register bit mask */
  52. #define CMD_MSGID_LEN 8
  53. #define CMD_MSGID_RESP_REQ BIT(8)
  54. #define CMD_MSGID_WRITE BIT(16)
  55. #define CMD_STATUS_ISSUED BIT(8)
  56. #define CMD_STATUS_COMPL BIT(16)
  57. /* Offsets for DRV channel status register */
  58. #define CH0_CHN_BUSY BIT(0)
  59. #define CH1_CHN_BUSY BIT(1)
  60. #define CH0_WAKE_TCS_STATUS BIT(0)
  61. #define CH0_SLEEP_TCS_STATUS BIT(1)
  62. #define CH1_WAKE_TCS_STATUS BIT(2)
  63. #define CH1_SLEEP_TCS_STATUS BIT(3)
  64. #define CH_CLEAR_STATUS BIT(31)
  65. #define ACCL_TYPE(addr) ((addr >> 16) & 0xF)
  66. #define VREG_ADDR(addr) (addr & ~0xF)
  67. #define MAX_RSC_COUNT 5
  68. enum {
  69. HW_ACCL_CLK = 0x3,
  70. HW_ACCL_VREG,
  71. HW_ACCL_BUS,
  72. };
  73. static const char * const accl_str[] = {
  74. "", "", "", "CLK", "VREG", "BUS",
  75. };
  76. static LIST_HEAD(rpmh_rsc_dev_list);
  77. static struct rsc_drv *__rsc_drv[MAX_RSC_COUNT];
  78. static int __rsc_count;
  79. bool rpmh_standalone;
  80. /*
  81. * Here's a high level overview of how all the registers in RPMH work
  82. * together:
  83. *
  84. * - The main rpmh-rsc address is the base of a register space that can
  85. * be used to find overall configuration of the hardware
  86. * (DRV_PRNT_CHLD_CONFIG). Also found within the rpmh-rsc register
  87. * space are all the TCS blocks. The offset of the TCS blocks is
  88. * specified in the device tree by "qcom,tcs-offset" and used to
  89. * compute tcs_base.
  90. * - TCS blocks come one after another. Type, count, and order are
  91. * specified by the device tree as "qcom,tcs-config".
  92. * - Each TCS block has some registers, then space for up to 16 commands.
  93. * Note that though address space is reserved for 16 commands, fewer
  94. * might be present. See ncpt (num cmds per TCS).
  95. *
  96. * Here's a picture:
  97. *
  98. * +---------------------------------------------------+
  99. * |RSC |
  100. * | ctrl |
  101. * | |
  102. * | Drvs: |
  103. * | +-----------------------------------------------+ |
  104. * | |DRV0 | |
  105. * | | ctrl/config | |
  106. * | | IRQ | |
  107. * | | | |
  108. * | | TCSes: | |
  109. * | | +------------------------------------------+ | |
  110. * | | |TCS0 | | | | | | | | | | | | | | |
  111. * | | | ctrl | 0| 1| 2| 3| 4| 5| .| .| .| .|14|15| | |
  112. * | | | | | | | | | | | | | | | | | |
  113. * | | +------------------------------------------+ | |
  114. * | | +------------------------------------------+ | |
  115. * | | |TCS1 | | | | | | | | | | | | | | |
  116. * | | | ctrl | 0| 1| 2| 3| 4| 5| .| .| .| .|14|15| | |
  117. * | | | | | | | | | | | | | | | | | |
  118. * | | +------------------------------------------+ | |
  119. * | | +------------------------------------------+ | |
  120. * | | |TCS2 | | | | | | | | | | | | | | |
  121. * | | | ctrl | 0| 1| 2| 3| 4| 5| .| .| .| .|14|15| | |
  122. * | | | | | | | | | | | | | | | | | |
  123. * | | +------------------------------------------+ | |
  124. * | | ...... | |
  125. * | +-----------------------------------------------+ |
  126. * | +-----------------------------------------------+ |
  127. * | |DRV1 | |
  128. * | | (same as DRV0) | |
  129. * | +-----------------------------------------------+ |
  130. * | ...... |
  131. * +---------------------------------------------------+
  132. */
  133. enum {
  134. RSC_DRV_TCS_OFFSET,
  135. RSC_DRV_CMD_OFFSET,
  136. /* DRV HW Solver Configuration Information Register */
  137. DRV_SOLVER_CONFIG,
  138. /* DRV TCS Configuration Information Register */
  139. DRV_PRNT_CHLD_CONFIG,
  140. /* Offsets for common TCS Registers, one bit per TCS */
  141. RSC_DRV_IRQ_ENABLE,
  142. RSC_DRV_IRQ_STATUS,
  143. RSC_DRV_IRQ_CLEAR, /* w/o; write 1 to clear */
  144. /*
  145. * Offsets for per TCS Registers.
  146. *
  147. * TCSes start at 0x10 from tcs_base and are stored one after another.
  148. * Multiply tcs_id by RSC_DRV_TCS_OFFSET to find a given TCS and add one
  149. * of the below to find a register.
  150. */
  151. RSC_DRV_CMD_WAIT_FOR_CMPL, /* 1 bit per command */
  152. RSC_DRV_CONTROL,
  153. RSC_DRV_STATUS, /* zero if tcs is busy */
  154. RSC_DRV_CMD_ENABLE, /* 1 bit per command */
  155. /*
  156. * Offsets for per command in a TCS.
  157. *
  158. * Commands (up to 16) start at 0x30 in a TCS; multiply command index
  159. * by RSC_DRV_CMD_OFFSET and add one of the below to find a register.
  160. */
  161. RSC_DRV_CMD_MSGID,
  162. RSC_DRV_CMD_ADDR,
  163. RSC_DRV_CMD_DATA,
  164. RSC_DRV_CMD_STATUS,
  165. RSC_DRV_CMD_RESP_DATA,
  166. /* DRV channel Registers */
  167. RSC_DRV_CHN_SEQ_BUSY,
  168. RSC_DRV_CHN_SEQ_PC,
  169. RSC_DRV_CHN_TCS_TRIGGER,
  170. RSC_DRV_CHN_TCS_COMPLETE,
  171. RSC_DRV_CHN_UPDATE,
  172. RSC_DRV_CHN_BUSY,
  173. RSC_DRV_CHN_EN,
  174. };
  175. static u32 rpmh_rsc_reg_offsets_ver_2_7[] = {
  176. [RSC_DRV_TCS_OFFSET] = 672,
  177. [RSC_DRV_CMD_OFFSET] = 20,
  178. [DRV_SOLVER_CONFIG] = 0x04,
  179. [DRV_PRNT_CHLD_CONFIG] = 0x0C,
  180. [RSC_DRV_IRQ_ENABLE] = 0x00,
  181. [RSC_DRV_IRQ_STATUS] = 0x04,
  182. [RSC_DRV_IRQ_CLEAR] = 0x08,
  183. [RSC_DRV_CMD_WAIT_FOR_CMPL] = 0x10,
  184. [RSC_DRV_CONTROL] = 0x14,
  185. [RSC_DRV_STATUS] = 0x18,
  186. [RSC_DRV_CMD_ENABLE] = 0x1C,
  187. [RSC_DRV_CMD_MSGID] = 0x30,
  188. [RSC_DRV_CMD_ADDR] = 0x34,
  189. [RSC_DRV_CMD_DATA] = 0x38,
  190. [RSC_DRV_CMD_STATUS] = 0x3C,
  191. [RSC_DRV_CMD_RESP_DATA] = 0x40,
  192. [RSC_DRV_CHN_SEQ_BUSY] = 0x0,
  193. [RSC_DRV_CHN_SEQ_PC] = 0x0,
  194. [RSC_DRV_CHN_TCS_TRIGGER] = 0x0,
  195. [RSC_DRV_CHN_TCS_COMPLETE] = 0x0,
  196. [RSC_DRV_CHN_UPDATE] = 0x0,
  197. [RSC_DRV_CHN_BUSY] = 0x0,
  198. [RSC_DRV_CHN_EN] = 0x0,
  199. };
  200. static u32 rpmh_rsc_reg_offsets_ver_3_0[] = {
  201. [RSC_DRV_TCS_OFFSET] = 672,
  202. [RSC_DRV_CMD_OFFSET] = 24,
  203. [DRV_SOLVER_CONFIG] = 0x04,
  204. [DRV_PRNT_CHLD_CONFIG] = 0x0C,
  205. [RSC_DRV_IRQ_ENABLE] = 0x00,
  206. [RSC_DRV_IRQ_STATUS] = 0x04,
  207. [RSC_DRV_IRQ_CLEAR] = 0x08,
  208. [RSC_DRV_CMD_WAIT_FOR_CMPL] = 0x20,
  209. [RSC_DRV_CONTROL] = 0x24,
  210. [RSC_DRV_STATUS] = 0x28,
  211. [RSC_DRV_CMD_ENABLE] = 0x2C,
  212. [RSC_DRV_CMD_MSGID] = 0x34,
  213. [RSC_DRV_CMD_ADDR] = 0x38,
  214. [RSC_DRV_CMD_DATA] = 0x3C,
  215. [RSC_DRV_CMD_STATUS] = 0x40,
  216. [RSC_DRV_CMD_RESP_DATA] = 0x44,
  217. [RSC_DRV_CHN_SEQ_BUSY] = 0x464,
  218. [RSC_DRV_CHN_SEQ_PC] = 0x468,
  219. [RSC_DRV_CHN_TCS_TRIGGER] = 0x490,
  220. [RSC_DRV_CHN_TCS_COMPLETE] = 0x494,
  221. [RSC_DRV_CHN_UPDATE] = 0x498,
  222. [RSC_DRV_CHN_BUSY] = 0x49C,
  223. [RSC_DRV_CHN_EN] = 0x4A0,
  224. };
  225. static u32 rpmh_rsc_reg_offsets_ver_3_0_hw_channel[] = {
  226. [RSC_DRV_TCS_OFFSET] = 336,
  227. [RSC_DRV_CMD_OFFSET] = 24,
  228. [DRV_SOLVER_CONFIG] = 0x04,
  229. [DRV_PRNT_CHLD_CONFIG] = 0x0C,
  230. [RSC_DRV_IRQ_ENABLE] = 0x00,
  231. [RSC_DRV_IRQ_STATUS] = 0x04,
  232. [RSC_DRV_IRQ_CLEAR] = 0x08,
  233. [RSC_DRV_CMD_WAIT_FOR_CMPL] = 0x20,
  234. [RSC_DRV_CONTROL] = 0x24,
  235. [RSC_DRV_STATUS] = 0x28,
  236. [RSC_DRV_CMD_ENABLE] = 0x2C,
  237. [RSC_DRV_CMD_MSGID] = 0x34,
  238. [RSC_DRV_CMD_ADDR] = 0x38,
  239. [RSC_DRV_CMD_DATA] = 0x3C,
  240. [RSC_DRV_CMD_STATUS] = 0x40,
  241. [RSC_DRV_CMD_RESP_DATA] = 0x44,
  242. [RSC_DRV_CHN_SEQ_BUSY] = 0x464,
  243. [RSC_DRV_CHN_SEQ_PC] = 0x468,
  244. [RSC_DRV_CHN_TCS_TRIGGER] = 0x490,
  245. [RSC_DRV_CHN_TCS_COMPLETE] = 0x494,
  246. [RSC_DRV_CHN_UPDATE] = 0x498,
  247. [RSC_DRV_CHN_BUSY] = 0x49C,
  248. [RSC_DRV_CHN_EN] = 0x4A0,
  249. };
  250. static inline void __iomem *
  251. tcs_reg_addr(const struct rsc_drv *drv, int reg, int tcs_id)
  252. {
  253. return drv->tcs_base + drv->regs[RSC_DRV_TCS_OFFSET] * tcs_id + reg;
  254. }
  255. static inline void __iomem *
  256. tcs_cmd_addr(const struct rsc_drv *drv, int reg, int tcs_id, int cmd_id)
  257. {
  258. return tcs_reg_addr(drv, reg, tcs_id) + drv->regs[RSC_DRV_CMD_OFFSET] * cmd_id;
  259. }
  260. static u32 read_tcs_cmd(const struct rsc_drv *drv, int reg, int tcs_id,
  261. int cmd_id)
  262. {
  263. return readl_relaxed(tcs_cmd_addr(drv, reg, tcs_id, cmd_id));
  264. }
  265. static u32 read_tcs_reg(const struct rsc_drv *drv, int reg, int tcs_id)
  266. {
  267. return readl_relaxed(tcs_reg_addr(drv, reg, tcs_id));
  268. }
  269. static void write_tcs_cmd(const struct rsc_drv *drv, int reg, int tcs_id,
  270. int cmd_id, u32 data)
  271. {
  272. writel_relaxed(data, tcs_cmd_addr(drv, reg, tcs_id, cmd_id));
  273. }
  274. static void write_tcs_reg(const struct rsc_drv *drv, int reg, int tcs_id,
  275. u32 data)
  276. {
  277. writel_relaxed(data, tcs_reg_addr(drv, reg, tcs_id));
  278. }
  279. static void write_tcs_reg_sync(const struct rsc_drv *drv, int reg, int tcs_id,
  280. u32 data)
  281. {
  282. int i;
  283. writel(data, tcs_reg_addr(drv, reg, tcs_id));
  284. /*
  285. * Wait until we read back the same value. Use a counter rather than
  286. * ktime for timeout since this may be called after timekeeping stops.
  287. */
  288. for (i = 0; i < USEC_PER_SEC; i++) {
  289. if (readl(tcs_reg_addr(drv, reg, tcs_id)) == data)
  290. return;
  291. udelay(1);
  292. }
  293. pr_err("%s: error writing %#x to %d:%#x\n", drv->name,
  294. data, tcs_id, reg);
  295. }
  296. /**
  297. * tcs_invalidate() - Invalidate all TCSes of the given type (sleep or wake).
  298. * @drv: The RSC controller.
  299. * @type: SLEEP_TCS or WAKE_TCS
  300. * @ch: Channel number
  301. *
  302. * This will clear the "slots" variable of the given tcs_group and also
  303. * tell the hardware to forget about all entries.
  304. *
  305. * The caller must ensure that no other RPMH actions are happening when this
  306. * function is called, since otherwise the device may immediately become
  307. * used again even before this function exits.
  308. */
  309. static void tcs_invalidate(struct rsc_drv *drv, int type, int ch)
  310. {
  311. int m;
  312. struct tcs_group *tcs = &drv->ch[ch].tcs[type];
  313. /* Caller ensures nobody else is running so no lock */
  314. if (bitmap_empty(tcs->slots, tcs->ncpt * tcs->num_tcs))
  315. return;
  316. for (m = tcs->offset; m < tcs->offset + tcs->num_tcs; m++) {
  317. write_tcs_reg_sync(drv, drv->regs[RSC_DRV_CMD_ENABLE], m, 0);
  318. write_tcs_reg_sync(drv, drv->regs[RSC_DRV_CMD_WAIT_FOR_CMPL], m, 0);
  319. }
  320. bitmap_zero(tcs->slots, tcs->ncpt * tcs->num_tcs);
  321. }
  322. /**
  323. * rpmh_rsc_get_channel() - Get the Unused channel to send data on.
  324. * @drv: The RSC controller.
  325. *
  326. * Return: 0 on success, else -error.
  327. */
  328. int rpmh_rsc_get_channel(struct rsc_drv *drv)
  329. {
  330. int chn_update, chn_busy;
  331. if (drv->num_channels == 1)
  332. return CH0;
  333. /* Select Unused channel */
  334. do {
  335. chn_update = readl_relaxed(drv->base + drv->regs[RSC_DRV_CHN_UPDATE]);
  336. chn_busy = readl_relaxed(drv->base + drv->regs[RSC_DRV_CHN_BUSY]);
  337. } while (chn_busy != chn_update);
  338. if (chn_busy & CH0_CHN_BUSY)
  339. return CH1;
  340. else if (chn_busy & CH1_CHN_BUSY)
  341. return CH0;
  342. else
  343. return -EBUSY;
  344. }
  345. /**
  346. * rpmh_rsc_invalidate() - Invalidate sleep and wake TCSes.
  347. * @drv: The RSC controller.
  348. * @ch: Channel number
  349. *
  350. * The caller must ensure that no other RPMH actions are happening when thi]s
  351. * function is called, since otherwise the device may immediately become
  352. * used again even before this function exits.
  353. */
  354. void rpmh_rsc_invalidate(struct rsc_drv *drv, int ch)
  355. {
  356. tcs_invalidate(drv, SLEEP_TCS, ch);
  357. tcs_invalidate(drv, WAKE_TCS, ch);
  358. }
  359. /**
  360. * get_tcs_for_msg() - Get the tcs_group used to send the given message.
  361. * @drv: The RSC controller.
  362. * @msg: The message we want to send.
  363. *
  364. * This is normally pretty straightforward except if we are trying to send
  365. * an ACTIVE_ONLY message but don't have any active_only TCSes.
  366. *
  367. * Return: A pointer to a tcs_group or an ERR_PTR.
  368. */
  369. static struct tcs_group *get_tcs_for_msg(struct rsc_drv *drv,
  370. enum rpmh_state state,
  371. int ch)
  372. {
  373. int type;
  374. struct tcs_group *tcs;
  375. switch (state) {
  376. case RPMH_ACTIVE_ONLY_STATE:
  377. type = ACTIVE_TCS;
  378. break;
  379. case RPMH_WAKE_ONLY_STATE:
  380. type = WAKE_TCS;
  381. break;
  382. case RPMH_SLEEP_STATE:
  383. type = SLEEP_TCS;
  384. break;
  385. default:
  386. return ERR_PTR(-EINVAL);
  387. }
  388. /*
  389. * If we are making an active request on a RSC that does not have a
  390. * dedicated TCS for active state use, then re-purpose a wake TCS to
  391. * send active votes. This is safe because we ensure any active-only
  392. * transfers have finished before we use it (maybe by running from
  393. * the last CPU in PM code).
  394. */
  395. tcs = &drv->ch[ch].tcs[type];
  396. if (state == RPMH_ACTIVE_ONLY_STATE && !tcs->num_tcs)
  397. tcs = &drv->ch[ch].tcs[WAKE_TCS];
  398. return tcs;
  399. }
  400. /**
  401. * get_req_from_tcs() - Get a stashed request that was xfering on the given TCS.
  402. * @drv: The RSC controller.
  403. * @tcs_id: The global ID of this TCS.
  404. *
  405. * For ACTIVE_ONLY transfers we want to call back into the client when the
  406. * transfer finishes. To do this we need the "request" that the client
  407. * originally provided us. This function grabs the request that we stashed
  408. * when we started the transfer.
  409. *
  410. * This only makes sense for ACTIVE_ONLY transfers since those are the only
  411. * ones we track sending (the only ones we enable interrupts for and the only
  412. * ones we call back to the client for).
  413. *
  414. * Return: The stashed request.
  415. */
  416. static const struct tcs_request *get_req_from_tcs(struct rsc_drv *drv,
  417. int tcs_id,
  418. int *ch)
  419. {
  420. struct tcs_group *tcs;
  421. int i;
  422. for (i = 0; i < MAX_CHANNEL; i++) {
  423. if (!drv->ch[i].initialized)
  424. continue;
  425. tcs = get_tcs_for_msg(drv, RPMH_ACTIVE_ONLY_STATE, i);
  426. if (tcs->mask & BIT(tcs_id)) {
  427. *ch = i;
  428. return tcs->req[tcs_id - tcs->offset];
  429. }
  430. }
  431. return NULL;
  432. }
  433. /**
  434. * __tcs_set_trigger() - Start xfer on a TCS or unset trigger on a borrowed TCS
  435. * @drv: The controller.
  436. * @tcs_id: The global ID of this TCS.
  437. * @trigger: If true then untrigger/retrigger. If false then just untrigger.
  438. *
  439. * In the normal case we only ever call with "trigger=true" to start a
  440. * transfer. That will un-trigger/disable the TCS from the last transfer
  441. * then trigger/enable for this transfer.
  442. *
  443. * If we borrowed a wake TCS for an active-only transfer we'll also call
  444. * this function with "trigger=false" to just do the un-trigger/disable
  445. * before using the TCS for wake purposes again.
  446. *
  447. * Note that the AP is only in charge of triggering active-only transfers.
  448. * The AP never triggers sleep/wake values using this function.
  449. */
  450. static void __tcs_set_trigger(struct rsc_drv *drv, int tcs_id, bool trigger)
  451. {
  452. u32 enable;
  453. /*
  454. * HW req: Clear the DRV_CONTROL and enable TCS again
  455. * While clearing ensure that the AMC mode trigger is cleared
  456. * and then the mode enable is cleared.
  457. */
  458. enable = read_tcs_reg(drv, drv->regs[RSC_DRV_CONTROL], tcs_id);
  459. enable &= ~TCS_AMC_MODE_TRIGGER;
  460. write_tcs_reg_sync(drv, drv->regs[RSC_DRV_CONTROL], tcs_id, enable);
  461. enable &= ~TCS_AMC_MODE_ENABLE;
  462. write_tcs_reg_sync(drv, drv->regs[RSC_DRV_CONTROL], tcs_id, enable);
  463. if (trigger) {
  464. /* Enable the AMC mode on the TCS and then trigger the TCS */
  465. enable = TCS_AMC_MODE_ENABLE;
  466. write_tcs_reg_sync(drv, drv->regs[RSC_DRV_CONTROL], tcs_id, enable);
  467. enable |= TCS_AMC_MODE_TRIGGER;
  468. write_tcs_reg(drv, drv->regs[RSC_DRV_CONTROL], tcs_id, enable);
  469. }
  470. }
  471. /**
  472. * enable_tcs_irq() - Enable or disable interrupts on the given TCS.
  473. * @drv: The controller.
  474. * @tcs_id: The global ID of this TCS.
  475. * @enable: If true then enable; if false then disable
  476. *
  477. * We only ever call this when we borrow a wake TCS for an active-only
  478. * transfer. For active-only TCSes interrupts are always left enabled.
  479. */
  480. static void enable_tcs_irq(struct rsc_drv *drv, int tcs_id, bool enable)
  481. {
  482. u32 data;
  483. data = readl_relaxed(drv->tcs_base + drv->regs[RSC_DRV_IRQ_ENABLE]);
  484. if (enable)
  485. data |= BIT(tcs_id);
  486. else
  487. data &= ~BIT(tcs_id);
  488. writel_relaxed(data, drv->tcs_base + drv->regs[RSC_DRV_IRQ_ENABLE]);
  489. }
  490. /**
  491. * tcs_tx_done() - TX Done interrupt handler.
  492. * @irq: The IRQ number (ignored).
  493. * @p: Pointer to "struct rsc_drv".
  494. *
  495. * Called for ACTIVE_ONLY transfers (those are the only ones we enable the
  496. * IRQ for) when a transfer is done.
  497. *
  498. * Return: IRQ_HANDLED
  499. */
  500. static irqreturn_t tcs_tx_done(int irq, void *p)
  501. {
  502. struct rsc_drv *drv = p;
  503. int i, ch;
  504. unsigned long irq_status;
  505. const struct tcs_request *req;
  506. irq_status = readl_relaxed(drv->tcs_base + drv->regs[RSC_DRV_IRQ_STATUS]);
  507. for_each_set_bit(i, &irq_status, BITS_PER_TYPE(u32)) {
  508. req = get_req_from_tcs(drv, i, &ch);
  509. if (WARN_ON(!req))
  510. goto skip;
  511. trace_rpmh_tx_done(drv, i, req);
  512. ipc_log_string(drv->ipc_log_ctx, "IRQ response: m=%d", i);
  513. /*
  514. * If wake tcs was re-purposed for sending active
  515. * votes, clear AMC trigger & enable modes and
  516. * disable interrupt for this TCS
  517. */
  518. if (!drv->ch[ch].tcs[ACTIVE_TCS].num_tcs)
  519. __tcs_set_trigger(drv, i, false);
  520. skip:
  521. /* Reclaim the TCS */
  522. write_tcs_reg(drv, drv->regs[RSC_DRV_CMD_ENABLE], i, 0);
  523. write_tcs_reg(drv, drv->regs[RSC_DRV_CMD_WAIT_FOR_CMPL], i, 0);
  524. writel_relaxed(BIT(i), drv->tcs_base + drv->regs[RSC_DRV_IRQ_CLEAR]);
  525. spin_lock(&drv->lock);
  526. clear_bit(i, drv->tcs_in_use);
  527. /*
  528. * Disable interrupt for WAKE TCS to avoid being
  529. * spammed with interrupts coming when the solver
  530. * sends its wake votes.
  531. */
  532. if (!drv->ch[ch].tcs[ACTIVE_TCS].num_tcs)
  533. enable_tcs_irq(drv, i, false);
  534. spin_unlock(&drv->lock);
  535. wake_up(&drv->tcs_wait);
  536. if (req)
  537. rpmh_tx_done(req);
  538. }
  539. return IRQ_HANDLED;
  540. }
  541. /**
  542. * __tcs_buffer_write() - Write to TCS hardware from a request; don't trigger.
  543. * @drv: The controller.
  544. * @tcs_id: The global ID of this TCS.
  545. * @cmd_id: The index within the TCS to start writing.
  546. * @msg: The message we want to send, which will contain several addr/data
  547. * pairs to program (but few enough that they all fit in one TCS).
  548. *
  549. * This is used for all types of transfers (active, sleep, and wake).
  550. */
  551. static void __tcs_buffer_write(struct rsc_drv *drv, int tcs_id, int cmd_id,
  552. const struct tcs_request *msg)
  553. {
  554. u32 msgid;
  555. u32 cmd_msgid = CMD_MSGID_LEN | CMD_MSGID_WRITE;
  556. u32 cmd_enable = 0;
  557. u32 cmd_complete;
  558. struct tcs_cmd *cmd;
  559. int i, j;
  560. cmd_msgid |= msg->wait_for_compl ? CMD_MSGID_RESP_REQ : 0;
  561. cmd_complete = read_tcs_reg(drv, drv->regs[RSC_DRV_CMD_WAIT_FOR_CMPL], tcs_id);
  562. for (i = 0, j = cmd_id; i < msg->num_cmds; i++, j++) {
  563. cmd = &msg->cmds[i];
  564. cmd_enable |= BIT(j);
  565. cmd_complete |= cmd->wait << j;
  566. msgid = cmd_msgid;
  567. msgid |= cmd->wait ? CMD_MSGID_RESP_REQ : 0;
  568. write_tcs_cmd(drv, drv->regs[RSC_DRV_CMD_MSGID], tcs_id, j, msgid);
  569. write_tcs_cmd(drv, drv->regs[RSC_DRV_CMD_ADDR], tcs_id, j, cmd->addr);
  570. write_tcs_cmd(drv, drv->regs[RSC_DRV_CMD_DATA], tcs_id, j, cmd->data);
  571. trace_rpmh_send_msg(drv, tcs_id, j, msgid, cmd);
  572. ipc_log_string(drv->ipc_log_ctx,
  573. "TCS write: m=%d n=%d msgid=%#x addr=%#x data=%#x wait=%d",
  574. tcs_id, j, msgid, cmd->addr,
  575. cmd->data, cmd->wait);
  576. }
  577. write_tcs_reg(drv, drv->regs[RSC_DRV_CMD_WAIT_FOR_CMPL], tcs_id, cmd_complete);
  578. cmd_enable |= read_tcs_reg(drv, drv->regs[RSC_DRV_CMD_ENABLE], tcs_id);
  579. write_tcs_reg(drv, drv->regs[RSC_DRV_CMD_ENABLE], tcs_id, cmd_enable);
  580. }
  581. /**
  582. * check_for_req_inflight() - Look to see if conflicting cmds are in flight.
  583. * @drv: The controller.
  584. * @tcs: A pointer to the tcs_group used for ACTIVE_ONLY transfers.
  585. * @msg: The message we want to send, which will contain several addr/data
  586. * pairs to program (but few enough that they all fit in one TCS).
  587. *
  588. * This will walk through the TCSes in the group and check if any of them
  589. * appear to be sending to addresses referenced in the message. If it finds
  590. * one it'll return -EBUSY.
  591. *
  592. * Only for use for active-only transfers.
  593. *
  594. * Must be called with the drv->lock held since that protects tcs_in_use.
  595. *
  596. * Return: 0 if nothing in flight or -EBUSY if we should try again later.
  597. * The caller must re-enable interrupts between tries since that's
  598. * the only way tcs_in_use will ever be updated and the only way
  599. * RSC_DRV_CMD_ENABLE will ever be cleared.
  600. */
  601. static int check_for_req_inflight(struct rsc_drv *drv, struct tcs_group *tcs,
  602. const struct tcs_request *msg)
  603. {
  604. unsigned long curr_enabled;
  605. u32 addr;
  606. int j, k;
  607. int i = tcs->offset;
  608. unsigned long accl;
  609. for_each_set_bit_from(i, drv->tcs_in_use, tcs->offset + tcs->num_tcs) {
  610. curr_enabled = read_tcs_reg(drv, drv->regs[RSC_DRV_CMD_ENABLE], i);
  611. for_each_set_bit(j, &curr_enabled, tcs->ncpt) {
  612. addr = read_tcs_cmd(drv, drv->regs[RSC_DRV_CMD_ADDR], i, j);
  613. for (k = 0; k < msg->num_cmds; k++) {
  614. /*
  615. * Each RPMh VREG accelerator resource has 3 or 4 contiguous 4-byte
  616. * aligned addresses associated with it. Ignore the offset to check
  617. * for in-flight VREG requests.
  618. */
  619. accl = ACCL_TYPE(msg->cmds[k].addr);
  620. if (accl == HW_ACCL_VREG &&
  621. VREG_ADDR(addr) == VREG_ADDR(msg->cmds[k].addr))
  622. return -EBUSY;
  623. else if (addr == msg->cmds[k].addr)
  624. return -EBUSY;
  625. }
  626. }
  627. }
  628. return 0;
  629. }
  630. /**
  631. * find_free_tcs() - Find free tcs in the given tcs_group; only for active.
  632. * @tcs: A pointer to the active-only tcs_group (or the wake tcs_group if
  633. * we borrowed it because there are zero active-only ones).
  634. *
  635. * Must be called with the drv->lock held since that protects tcs_in_use.
  636. *
  637. * Return: The first tcs that's free or -EBUSY if all in use.
  638. */
  639. static int find_free_tcs(struct tcs_group *tcs)
  640. {
  641. const struct rsc_drv *drv = tcs->drv;
  642. unsigned long i;
  643. unsigned long max = tcs->offset + tcs->num_tcs;
  644. int timeout = 100;
  645. i = find_next_zero_bit(drv->tcs_in_use, max, tcs->offset);
  646. if (i >= max)
  647. return -EBUSY;
  648. while (timeout) {
  649. if (read_tcs_reg(drv, drv->regs[RSC_DRV_STATUS], i))
  650. break;
  651. timeout--;
  652. udelay(1);
  653. }
  654. if (!timeout)
  655. return -EBUSY;
  656. return i;
  657. }
  658. /**
  659. * claim_tcs_for_req() - Claim a tcs in the given tcs_group; only for active.
  660. * @drv: The controller.
  661. * @tcs: The tcs_group used for ACTIVE_ONLY transfers.
  662. * @msg: The data to be sent.
  663. *
  664. * Claims a tcs in the given tcs_group while making sure that no existing cmd
  665. * is in flight that would conflict with the one in @msg.
  666. *
  667. * Context: Must be called with the drv->lock held since that protects
  668. * tcs_in_use.
  669. *
  670. * Return: The id of the claimed tcs or -EBUSY if a matching msg is in flight
  671. * or the tcs_group is full.
  672. */
  673. static int claim_tcs_for_req(struct rsc_drv *drv, struct tcs_group *tcs,
  674. const struct tcs_request *msg)
  675. {
  676. int ret;
  677. /*
  678. * The h/w does not like if we send a request to the same address,
  679. * when one is already in-flight or being processed.
  680. */
  681. ret = check_for_req_inflight(drv, tcs, msg);
  682. if (ret)
  683. return ret;
  684. return find_free_tcs(tcs);
  685. }
  686. /**
  687. * rpmh_rsc_send_data() - Write / trigger active-only message.
  688. * @drv: The controller.
  689. * @msg: The data to be sent.
  690. * @ch: Channel number
  691. *
  692. * NOTES:
  693. * - This is only used for "ACTIVE_ONLY" since the limitations of this
  694. * function don't make sense for sleep/wake cases.
  695. * - To do the transfer, we will grab a whole TCS for ourselves--we don't
  696. * try to share. If there are none available we'll wait indefinitely
  697. * for a free one.
  698. * - This function will not wait for the commands to be finished, only for
  699. * data to be programmed into the RPMh. See rpmh_tx_done() which will
  700. * be called when the transfer is fully complete.
  701. * - This function must be called with interrupts enabled. If the hardware
  702. * is busy doing someone else's transfer we need that transfer to fully
  703. * finish so that we can have the hardware, and to fully finish it needs
  704. * the interrupt handler to run. If the interrupts is set to run on the
  705. * active CPU this can never happen if interrupts are disabled.
  706. *
  707. * Return: 0 on success, -EINVAL on error.
  708. */
  709. int rpmh_rsc_send_data(struct rsc_drv *drv, const struct tcs_request *msg, int ch)
  710. {
  711. struct tcs_group *tcs;
  712. int tcs_id;
  713. unsigned long flags;
  714. tcs = get_tcs_for_msg(drv, msg->state, ch);
  715. if (IS_ERR(tcs))
  716. return PTR_ERR(tcs);
  717. spin_lock_irqsave(&drv->lock, flags);
  718. /* Controller is busy in 'solver' mode */
  719. if (drv->in_solver_mode) {
  720. spin_unlock_irqrestore(&drv->lock, flags);
  721. return -EBUSY;
  722. }
  723. /* Wait forever for a free tcs. It better be there eventually! */
  724. wait_event_lock_irq(drv->tcs_wait,
  725. (tcs_id = claim_tcs_for_req(drv, tcs, msg)) >= 0,
  726. drv->lock);
  727. tcs->req[tcs_id - tcs->offset] = msg;
  728. set_bit(tcs_id, drv->tcs_in_use);
  729. /*
  730. * Clear previously programmed ACTIVE/WAKE commands in selected
  731. * repurposed TCS to avoid triggering them. tcs->slots will be
  732. * cleaned from rpmh_flush() by invoking rpmh_rsc_invalidate()
  733. */
  734. write_tcs_reg_sync(drv, drv->regs[RSC_DRV_CMD_ENABLE], tcs_id, 0);
  735. write_tcs_reg_sync(drv, drv->regs[RSC_DRV_CMD_WAIT_FOR_CMPL], tcs_id, 0);
  736. if (msg->wait_for_compl || (msg->state == RPMH_ACTIVE_ONLY_STATE &&
  737. tcs->type != ACTIVE_TCS))
  738. enable_tcs_irq(drv, tcs_id, true);
  739. else
  740. enable_tcs_irq(drv, tcs_id, false);
  741. /*
  742. * These two can be done after the lock is released because:
  743. * - We marked "tcs_in_use" under lock.
  744. * - Once "tcs_in_use" has been marked nobody else could be writing
  745. * to these registers until the interrupt goes off.
  746. * - The interrupt can't go off until we trigger w/ the last line
  747. * of __tcs_set_trigger() below.
  748. */
  749. __tcs_buffer_write(drv, tcs_id, 0, msg);
  750. __tcs_set_trigger(drv, tcs_id, true);
  751. ipc_log_string(drv->ipc_log_ctx, "TCS trigger: m=%d wait_for_compl=%u",
  752. tcs_id, msg->wait_for_compl);
  753. if (!msg->wait_for_compl)
  754. clear_bit(tcs_id, drv->tcs_in_use);
  755. spin_unlock_irqrestore(&drv->lock, flags);
  756. if (!msg->wait_for_compl)
  757. wake_up(&drv->tcs_wait);
  758. return 0;
  759. }
  760. /**
  761. * find_slots() - Find a place to write the given message.
  762. * @tcs: The tcs group to search.
  763. * @msg: The message we want to find room for.
  764. * @tcs_id: If we return 0 from the function, we return the global ID of the
  765. * TCS to write to here.
  766. * @cmd_id: If we return 0 from the function, we return the index of
  767. * the command array of the returned TCS where the client should
  768. * start writing the message.
  769. *
  770. * Only for use on sleep/wake TCSes since those are the only ones we maintain
  771. * tcs->slots for.
  772. *
  773. * Return: -ENOMEM if there was no room, else 0.
  774. */
  775. static int find_slots(struct tcs_group *tcs, const struct tcs_request *msg,
  776. int *tcs_id, int *cmd_id)
  777. {
  778. int slot, offset;
  779. int i = 0;
  780. /* Do over, until we can fit the full payload in a single TCS */
  781. do {
  782. slot = bitmap_find_next_zero_area(tcs->slots,
  783. tcs->ncpt * tcs->num_tcs,
  784. i, msg->num_cmds, 0);
  785. if (slot >= tcs->num_tcs * tcs->ncpt)
  786. return -ENOMEM;
  787. i += tcs->ncpt;
  788. } while (slot + msg->num_cmds - 1 >= i);
  789. bitmap_set(tcs->slots, slot, msg->num_cmds);
  790. offset = slot / tcs->ncpt;
  791. *tcs_id = offset + tcs->offset;
  792. *cmd_id = slot % tcs->ncpt;
  793. return 0;
  794. }
  795. /**
  796. * rpmh_rsc_write_ctrl_data() - Write request to controller but don't trigger.
  797. * @drv: The controller.
  798. * @msg: The data to be written to the controller.
  799. * @ch: Channel number
  800. *
  801. * This should only be called for sleep/wake state, never active-only
  802. * state.
  803. *
  804. * The caller must ensure that no other RPMH actions are happening and the
  805. * controller is idle when this function is called since it runs lockless.
  806. *
  807. * Return: 0 if no error; else -error.
  808. */
  809. int rpmh_rsc_write_ctrl_data(struct rsc_drv *drv, const struct tcs_request *msg, int ch)
  810. {
  811. struct tcs_group *tcs;
  812. int tcs_id = 0, cmd_id = 0;
  813. int ret;
  814. if (!msg->num_cmds) {
  815. ipc_log_string(drv->ipc_log_ctx, "Empty num_cmds, returning");
  816. return 0;
  817. }
  818. tcs = get_tcs_for_msg(drv, msg->state, ch);
  819. if (IS_ERR(tcs))
  820. return PTR_ERR(tcs);
  821. /* find the TCS id and the command in the TCS to write to */
  822. ret = find_slots(tcs, msg, &tcs_id, &cmd_id);
  823. if (!ret)
  824. __tcs_buffer_write(drv, tcs_id, cmd_id, msg);
  825. return ret;
  826. }
  827. static struct tcs_group *get_tcs_from_index(struct rsc_drv *drv, int tcs_id)
  828. {
  829. unsigned int i, j;
  830. for (i = 0; i < TCS_TYPE_NR; i++) {
  831. for (j = 0; j < MAX_CHANNEL; j++) {
  832. if (!drv->ch[j].initialized)
  833. continue;
  834. if (drv->ch[j].tcs[i].mask & BIT(tcs_id))
  835. return &drv->ch[j].tcs[i];
  836. }
  837. }
  838. return NULL;
  839. }
  840. static void print_tcs_info(struct rsc_drv *drv, int tcs_id, unsigned long *accl,
  841. bool *aoss_irq_sts)
  842. {
  843. int ch = 0;
  844. struct tcs_group *tcs_grp = get_tcs_from_index(drv, tcs_id);
  845. const struct tcs_request *req = get_req_from_tcs(drv, tcs_id, &ch);
  846. unsigned long cmds_enabled;
  847. u32 addr, data, msgid, sts, irq_sts;
  848. bool in_use = test_bit(tcs_id, drv->tcs_in_use);
  849. int i;
  850. sts = read_tcs_reg(drv, drv->regs[RSC_DRV_STATUS], tcs_id);
  851. cmds_enabled = read_tcs_reg(drv, drv->regs[RSC_DRV_CMD_ENABLE], tcs_id);
  852. if (!cmds_enabled || !tcs_grp)
  853. return;
  854. if (!req)
  855. goto print_tcs_data;
  856. data = read_tcs_reg(drv, drv->regs[RSC_DRV_CONTROL], tcs_id);
  857. irq_sts = readl_relaxed(drv->tcs_base + drv->regs[RSC_DRV_IRQ_STATUS]);
  858. pr_warn("Request: tcs-in-use:%s active_tcs=%s(%d) state=%d wait_for_compl=%u]\n",
  859. (in_use ? "YES" : "NO"),
  860. ((tcs_grp->type == ACTIVE_TCS) ? "YES" : "NO"),
  861. tcs_grp->type, req->state, req->wait_for_compl);
  862. pr_warn("TCS=%d [ctrlr-sts:%s amc-mode:0x%x irq-sts:%s]\n",
  863. tcs_id, sts ? "IDLE" : "BUSY", data,
  864. (irq_sts & BIT(tcs_id)) ? "COMPLETED" : "PENDING");
  865. *aoss_irq_sts = (irq_sts & BIT(tcs_id)) ? true : false;
  866. print_tcs_data:
  867. for_each_set_bit(i, &cmds_enabled, tcs_grp->ncpt) {
  868. addr = read_tcs_cmd(drv, drv->regs[RSC_DRV_CMD_ADDR], tcs_id, i);
  869. data = read_tcs_cmd(drv, drv->regs[RSC_DRV_CMD_DATA], tcs_id, i);
  870. msgid = read_tcs_cmd(drv, drv->regs[RSC_DRV_CMD_MSGID], tcs_id, i);
  871. sts = read_tcs_cmd(drv, drv->regs[RSC_DRV_CMD_STATUS], tcs_id, i);
  872. pr_warn("\tCMD=%d [addr=0x%x data=0x%x hdr=0x%x sts=0x%x enabled=1]\n",
  873. i, addr, data, msgid, sts);
  874. if (!(sts & CMD_STATUS_ISSUED))
  875. continue;
  876. if (!(sts & CMD_STATUS_COMPL))
  877. *accl |= BIT(ACCL_TYPE(addr));
  878. }
  879. }
  880. void rpmh_rsc_debug_channel_busy(struct rsc_drv *drv)
  881. {
  882. u32 event_sts, ctrl_sts;
  883. u32 chn_update, chn_busy, chn_en;
  884. u32 seq_busy, seq_pc;
  885. pr_err("RSC:%s\n", drv->name);
  886. event_sts = readl_relaxed(drv->base + drv->regs[RSC_DRV_CHN_TCS_COMPLETE]);
  887. ctrl_sts = readl_relaxed(drv->base + drv->regs[RSC_DRV_CHN_TCS_TRIGGER]);
  888. chn_update = readl_relaxed(drv->base + drv->regs[RSC_DRV_CHN_UPDATE]);
  889. chn_busy = readl_relaxed(drv->base + drv->regs[RSC_DRV_CHN_BUSY]);
  890. chn_en = readl_relaxed(drv->base + drv->regs[RSC_DRV_CHN_EN]);
  891. seq_busy = readl_relaxed(drv->base + drv->regs[RSC_DRV_CHN_SEQ_BUSY]);
  892. seq_pc = readl_relaxed(drv->base + drv->regs[RSC_DRV_CHN_SEQ_PC]);
  893. pr_err("event sts: 0x%x ctrl_sts: 0x%x\n", event_sts, ctrl_sts);
  894. pr_err("chn_update: 0x%x chn_busy: 0x%x chn_en: 0x%x\n", chn_update, chn_busy, chn_en);
  895. pr_err("seq_busy: 0x%x seq_pc: 0x%x\n", seq_busy, seq_pc);
  896. crm_dump_regs("cam_crm");
  897. crm_dump_drv_regs("cam_crm", drv->id);
  898. }
  899. void rpmh_rsc_debug(struct rsc_drv *drv, struct completion *compl)
  900. {
  901. struct irq_data *rsc_irq_data = irq_get_irq_data(drv->irq);
  902. bool gic_irq_sts, aoss_irq_sts = false;
  903. int i;
  904. int busy = 0;
  905. unsigned long accl = 0;
  906. char str[20] = "";
  907. pr_warn("RSC:%s\n", drv->name);
  908. for (i = 0; i < drv->num_tcs; i++) {
  909. if (!test_bit(i, drv->tcs_in_use))
  910. continue;
  911. busy++;
  912. print_tcs_info(drv, i, &accl, &aoss_irq_sts);
  913. }
  914. if (!rsc_irq_data) {
  915. pr_err("No IRQ data for RSC:%s\n", drv->name);
  916. return;
  917. }
  918. irq_get_irqchip_state(drv->irq, IRQCHIP_STATE_PENDING, &gic_irq_sts);
  919. pr_warn("HW IRQ %lu is %s at GIC\n", rsc_irq_data->hwirq,
  920. gic_irq_sts ? "PENDING" : "NOT PENDING");
  921. pr_warn("Completion is %s to finish\n",
  922. completion_done(compl) ? "PENDING" : "NOT PENDING");
  923. for_each_set_bit(i, &accl, ARRAY_SIZE(accl_str)) {
  924. strlcat(str, accl_str[i], sizeof(str));
  925. strlcat(str, " ", sizeof(str));
  926. }
  927. if ((busy && !gic_irq_sts) || !aoss_irq_sts)
  928. pr_warn("ERROR:Accelerator(s) { %s } at AOSS did not respond\n",
  929. str);
  930. else if (gic_irq_sts)
  931. pr_warn("ERROR:Possible lockup in Linux\n");
  932. /* Show fast path status, if the TCS is busy */
  933. for (i = 0; i < MAX_CHANNEL; i++) {
  934. if (!drv->ch[i].initialized)
  935. continue;
  936. /* Show fast path status, if the TCS is busy */
  937. if (drv->ch[i].tcs[FAST_PATH_TCS].num_tcs) {
  938. int tcs_id = drv->ch[i].tcs[FAST_PATH_TCS].offset;
  939. bool sts = read_tcs_reg(drv,
  940. drv->regs[RSC_DRV_STATUS],
  941. tcs_id);
  942. if (!sts) {
  943. pr_err("Fast-path TCS information:\n");
  944. print_tcs_info(drv, tcs_id, &accl, &aoss_irq_sts);
  945. }
  946. }
  947. }
  948. /*
  949. * The TCS(s) are busy waiting, we have no way to recover from this.
  950. * If this debug function is called, we assume it's because timeout
  951. * has happened.
  952. * Crash and report.
  953. */
  954. BUG_ON(busy);
  955. }
  956. /**
  957. * rpmh_rsc_ctrlr_is_busy() - Check if any of the AMCs are busy.
  958. * @drv: The controller
  959. *
  960. * Checks if any of the AMCs are busy in handling ACTIVE sets.
  961. * This is called from the last cpu powering down before flushing
  962. * SLEEP and WAKE sets. If AMCs are busy, controller can not enter
  963. * power collapse, so deny from the last cpu's pm notification.
  964. *
  965. * Context: Must be called with the drv->lock held.
  966. *
  967. * Return:
  968. * * False - AMCs are idle
  969. * * True - AMCs are busy
  970. */
  971. static bool rpmh_rsc_ctrlr_is_busy(struct rsc_drv *drv)
  972. {
  973. int i;
  974. struct tcs_group *tcs;
  975. unsigned long set;
  976. unsigned long max;
  977. for (i = 0; i < MAX_CHANNEL; i++) {
  978. if (!drv->ch[i].initialized)
  979. continue;
  980. tcs = &drv->ch[i].tcs[ACTIVE_TCS];
  981. /*
  982. * If we made an active request on a RSC that does not have a
  983. * dedicated TCS for active state use, then re-purposed wake TCSes
  984. * should be checked for not busy, because we used wake TCSes for
  985. * active requests in this case.
  986. */
  987. if (!tcs->num_tcs)
  988. tcs = &drv->ch[i].tcs[WAKE_TCS];
  989. max = tcs->offset + tcs->num_tcs;
  990. set = find_next_bit(drv->tcs_in_use, max, tcs->offset);
  991. if (set < max)
  992. return true;
  993. /* Check if there is pending fastpath transaction */
  994. tcs = &drv->ch[i].tcs[FAST_PATH_TCS];
  995. if (tcs->num_tcs &&
  996. !read_tcs_reg(drv, drv->regs[RSC_DRV_STATUS], tcs->offset))
  997. return true;
  998. }
  999. return false;
  1000. }
  1001. /**
  1002. * rpmh_rsc_cpu_pm_callback() - Check if any of the AMCs are busy.
  1003. * @nfb: Pointer to the notifier block in struct rsc_drv.
  1004. * @action: CPU_PM_ENTER, CPU_PM_ENTER_FAILED, or CPU_PM_EXIT.
  1005. * @v: Unused
  1006. *
  1007. * This function is given to cpu_pm_register_notifier so we can be informed
  1008. * about when CPUs go down. When all CPUs go down we know no more active
  1009. * transfers will be started so we write sleep/wake sets. This function gets
  1010. * called from cpuidle code paths and also at system suspend time.
  1011. *
  1012. * If its last CPU going down and AMCs are not busy then writes cached sleep
  1013. * and wake messages to TCSes. The firmware then takes care of triggering
  1014. * them when entering deepest low power modes.
  1015. *
  1016. * Return: See cpu_pm_register_notifier()
  1017. */
  1018. static int rpmh_rsc_cpu_pm_callback(struct notifier_block *nfb,
  1019. unsigned long action, void *v)
  1020. {
  1021. struct rsc_drv *drv = container_of(nfb, struct rsc_drv, rsc_pm);
  1022. int ret = NOTIFY_OK;
  1023. int cpus_in_pm, ch;
  1024. switch (action) {
  1025. case CPU_PM_ENTER:
  1026. cpus_in_pm = atomic_inc_return(&drv->cpus_in_pm);
  1027. /*
  1028. * NOTE: comments for num_online_cpus() point out that it's
  1029. * only a snapshot so we need to be careful. It should be OK
  1030. * for us to use, though. It's important for us not to miss
  1031. * if we're the last CPU going down so it would only be a
  1032. * problem if a CPU went offline right after we did the check
  1033. * AND that CPU was not idle AND that CPU was the last non-idle
  1034. * CPU. That can't happen. CPUs would have to come out of idle
  1035. * before the CPU could go offline.
  1036. */
  1037. if (cpus_in_pm < num_online_cpus())
  1038. return NOTIFY_OK;
  1039. break;
  1040. case CPU_PM_ENTER_FAILED:
  1041. case CPU_PM_EXIT:
  1042. atomic_dec(&drv->cpus_in_pm);
  1043. return NOTIFY_OK;
  1044. default:
  1045. return NOTIFY_DONE;
  1046. }
  1047. /*
  1048. * It's likely we're on the last CPU. Grab the drv->lock and write
  1049. * out the sleep/wake commands to RPMH hardware. Grabbing the lock
  1050. * means that if we race with another CPU coming up we are still
  1051. * guaranteed to be safe. If another CPU came up just after we checked
  1052. * and has grabbed the lock or started an active transfer then we'll
  1053. * notice we're busy and abort. If another CPU comes up after we start
  1054. * flushing it will be blocked from starting an active transfer until
  1055. * we're done flushing. If another CPU starts an active transfer after
  1056. * we release the lock we're still OK because we're no longer the last
  1057. * CPU.
  1058. */
  1059. if (spin_trylock(&drv->lock)) {
  1060. ch = rpmh_rsc_get_channel(drv);
  1061. if (ch < 0 || rpmh_rsc_ctrlr_is_busy(drv) || rpmh_flush(&drv->client, ch))
  1062. ret = NOTIFY_BAD;
  1063. spin_unlock(&drv->lock);
  1064. } else {
  1065. /* Another CPU must be up */
  1066. return NOTIFY_OK;
  1067. }
  1068. if (ret == NOTIFY_BAD) {
  1069. /* Double-check if we're here because someone else is up */
  1070. if (cpus_in_pm < num_online_cpus())
  1071. ret = NOTIFY_OK;
  1072. else
  1073. /* We won't be called w/ CPU_PM_ENTER_FAILED */
  1074. atomic_dec(&drv->cpus_in_pm);
  1075. }
  1076. return ret;
  1077. }
  1078. /**
  1079. * rpmh_rsc_mode_solver_set() - Enable/disable solver mode.
  1080. * @drv: The controller.
  1081. * @enable: Boolean state to be set - true/false
  1082. *
  1083. * Return:
  1084. * * 0 - success
  1085. * * -EBUSY - AMCs are busy
  1086. */
  1087. int rpmh_rsc_mode_solver_set(struct rsc_drv *drv, bool enable)
  1088. {
  1089. int ret = -EBUSY;
  1090. if (spin_trylock(&drv->lock)) {
  1091. if (!enable || !rpmh_rsc_ctrlr_is_busy(drv)) {
  1092. drv->in_solver_mode = enable;
  1093. trace_rpmh_solver_set(drv, enable);
  1094. ipc_log_string(drv->ipc_log_ctx,
  1095. "solver mode set: %d", enable);
  1096. ret = 0;
  1097. }
  1098. spin_unlock(&drv->lock);
  1099. }
  1100. return ret;
  1101. }
  1102. int rpmh_rsc_is_tcs_completed(struct rsc_drv *drv, int ch)
  1103. {
  1104. u32 sts;
  1105. int retry = 10, ret = 0;
  1106. do {
  1107. sts = readl_relaxed(drv->base + drv->regs[RSC_DRV_CHN_TCS_COMPLETE]);
  1108. if (ch == 0)
  1109. sts &= CH0_WAKE_TCS_STATUS;
  1110. else
  1111. sts &= CH1_WAKE_TCS_STATUS;
  1112. retry--;
  1113. /*
  1114. * Wait till all the WAKE votes of the new channel are
  1115. * applied during channel switch.
  1116. * Maximum delay of 100 usec.
  1117. */
  1118. if (!sts)
  1119. udelay(10);
  1120. } while (!sts && retry);
  1121. if (!retry) {
  1122. ret = -EBUSY;
  1123. goto exit;
  1124. }
  1125. writel_relaxed(CH_CLEAR_STATUS,
  1126. drv->base + drv->regs[RSC_DRV_CHN_TCS_COMPLETE]);
  1127. exit:
  1128. trace_rpmh_switch_channel(drv, ch, ret);
  1129. ipc_log_string(drv->ipc_log_ctx, "channel switched to: %d ret: %d", ch, ret);
  1130. return 0;
  1131. }
  1132. /**
  1133. * rpmh_rsc_switch_channel() - Switch to the channel
  1134. * @drv: The controller.
  1135. * @ch: The channel number to switch to.
  1136. *
  1137. * NOTE: Caller should ensure serialization before making this call.
  1138. * Return:
  1139. * * 0 - success
  1140. * * -Error - Error code
  1141. */
  1142. int rpmh_rsc_switch_channel(struct rsc_drv *drv, int ch)
  1143. {
  1144. writel_relaxed(BIT(ch), drv->base + drv->regs[RSC_DRV_CHN_UPDATE]);
  1145. return rpmh_rsc_is_tcs_completed(drv, ch);
  1146. }
  1147. /**
  1148. * rpmh_rsc_drv_enable() - Enable the DRV and trigger Wake vote
  1149. * @drv: The controller.
  1150. *
  1151. * NOTE: Caller should ensure serialization before making this call.
  1152. * Return:
  1153. * * 0 - success
  1154. * * -Error - Error code
  1155. */
  1156. int rpmh_rsc_drv_enable(struct rsc_drv *drv, bool enable)
  1157. {
  1158. int ret = 0, ch;
  1159. u32 chn_en;
  1160. spin_lock(&drv->lock);
  1161. chn_en = readl_relaxed(drv->base + drv->regs[RSC_DRV_CHN_EN]);
  1162. if (chn_en == enable) {
  1163. ret = -EINVAL;
  1164. goto exit;
  1165. }
  1166. if (enable) {
  1167. /* Start with channel 0 */
  1168. ch = 0;
  1169. ret = rpmh_flush(&drv->client, ch);
  1170. if (ret)
  1171. goto exit;
  1172. writel_relaxed(enable, drv->base + drv->regs[RSC_DRV_CHN_EN]);
  1173. ret = rpmh_rsc_switch_channel(drv, ch);
  1174. if (ret)
  1175. goto exit;
  1176. } else {
  1177. /* Select unused channel */
  1178. ch = rpmh_rsc_get_channel(drv);
  1179. if (ch < 0)
  1180. goto exit;
  1181. ret = rpmh_flush(&drv->client, ch);
  1182. if (ret)
  1183. goto exit;
  1184. ret = rpmh_rsc_switch_channel(drv, ch);
  1185. if (ret)
  1186. goto exit;
  1187. writel_relaxed(0, drv->base + drv->regs[RSC_DRV_CHN_UPDATE]);
  1188. writel_relaxed(enable, drv->base + drv->regs[RSC_DRV_CHN_EN]);
  1189. }
  1190. exit:
  1191. spin_unlock(&drv->lock);
  1192. trace_rpmh_drv_enable(drv, enable, ret);
  1193. ipc_log_string(drv->ipc_log_ctx, "drv enable: %d ret: %d", enable, ret);
  1194. return ret;
  1195. }
  1196. /**
  1197. * rpmh_rsc_init_fast_path() - Initialize the fast-path TCS contents
  1198. * @drv: The controller.
  1199. * @msg: The TCS request to populate.
  1200. * @ch: Channel number
  1201. *
  1202. * Return:
  1203. * * 0 - success
  1204. * * -ENODEV - no fast-path TCS available
  1205. */
  1206. int rpmh_rsc_init_fast_path(struct rsc_drv *drv, const struct tcs_request *msg, int ch)
  1207. {
  1208. int tcs_id;
  1209. if (!drv->ch[ch].tcs[FAST_PATH_TCS].num_tcs)
  1210. return -ENODEV;
  1211. tcs_id = drv->ch[ch].tcs[FAST_PATH_TCS].offset;
  1212. /* We won't use the AMC IRQ to confirm if the TCS is free */
  1213. enable_tcs_irq(drv, tcs_id, false);
  1214. __tcs_buffer_write(drv, tcs_id, 0, msg);
  1215. return 0;
  1216. }
  1217. /**
  1218. * rpmh_rsc_update_fast_path() - Update the fast-path TCS data and trigger
  1219. * @drv: The controller.
  1220. * @msg: The TCS request data to be updated.
  1221. * @mask: The update mask for elements in @msg to be sent
  1222. * @ch: Channel number
  1223. *
  1224. * NOTE: Caller should ensure serialization before making this call.
  1225. * Return:
  1226. * * 0 - success
  1227. * * -ENODEV - no fast-path TCS available
  1228. */
  1229. int rpmh_rsc_update_fast_path(struct rsc_drv *drv,
  1230. const struct tcs_request *msg,
  1231. u32 mask, int ch)
  1232. {
  1233. int i;
  1234. u32 sts;
  1235. int tcs_id;
  1236. struct tcs_cmd *cmd;
  1237. int retry = 5;
  1238. if (!drv->ch[ch].tcs[FAST_PATH_TCS].num_tcs)
  1239. return -ENODEV;
  1240. tcs_id = drv->ch[ch].tcs[FAST_PATH_TCS].offset;
  1241. /* Ensure the TCS is free before writing to the TCS */
  1242. do {
  1243. sts = read_tcs_reg(drv, drv->regs[RSC_DRV_STATUS], tcs_id);
  1244. if (!sts) {
  1245. retry--;
  1246. /* Report and bail, if it took too many attempts */
  1247. if (!retry) {
  1248. pr_err("Fast-path TCS is too busy\n");
  1249. return -EBUSY;
  1250. }
  1251. udelay(1);
  1252. }
  1253. } while (!sts);
  1254. /*
  1255. * We only update the data, everything else remains the same.
  1256. * The number of commands and the addresses do not change with
  1257. * updates.
  1258. */
  1259. for (i = 0; i < msg->num_cmds; i++) {
  1260. if (!(mask & BIT(i)))
  1261. continue;
  1262. cmd = &msg->cmds[i];
  1263. write_tcs_cmd(drv, drv->regs[RSC_DRV_CMD_DATA], tcs_id, i, cmd->data);
  1264. }
  1265. /* Trigger the TCS to send the request */
  1266. __tcs_set_trigger(drv, tcs_id, true);
  1267. return 0;
  1268. }
  1269. static int rpmh_rsc_poweroff_noirq(struct device *dev)
  1270. {
  1271. return 0;
  1272. }
  1273. static void rpmh_rsc_tcs_irq_enable(struct rsc_drv *drv)
  1274. {
  1275. u32 tcs_mask;
  1276. int ch;
  1277. for (ch = 0; ch < MAX_CHANNEL; ch++) {
  1278. if (!drv->ch[ch].initialized)
  1279. continue;
  1280. tcs_mask = readl_relaxed(drv->tcs_base + drv->regs[RSC_DRV_IRQ_ENABLE]);
  1281. tcs_mask |= drv->ch[ch].tcs[ACTIVE_TCS].mask;
  1282. writel_relaxed(tcs_mask, drv->tcs_base + drv->regs[RSC_DRV_IRQ_ENABLE]);
  1283. }
  1284. }
  1285. static int rpmh_rsc_restore_noirq(struct device *dev)
  1286. {
  1287. struct rsc_drv_top *rsc_top = dev_get_drvdata(dev);
  1288. int i;
  1289. for (i = 0; i < rsc_top->drv_count; i++) {
  1290. if (rsc_top->drv[i].initialized)
  1291. rpmh_rsc_tcs_irq_enable(&rsc_top->drv[i]);
  1292. }
  1293. return 0;
  1294. }
  1295. static struct rsc_drv_top *rpmh_rsc_get_top_device(const char *name)
  1296. {
  1297. struct rsc_drv_top *rsc_top;
  1298. bool rsc_dev_present = false;
  1299. list_for_each_entry(rsc_top, &rpmh_rsc_dev_list, list) {
  1300. if (!strcmp(name, rsc_top->name)) {
  1301. rsc_dev_present = true;
  1302. break;
  1303. }
  1304. }
  1305. if (!rsc_dev_present)
  1306. return ERR_PTR(-ENODEV);
  1307. return rsc_top;
  1308. }
  1309. const struct device *rpmh_rsc_get_device(const char *name, u32 drv_id)
  1310. {
  1311. struct rsc_drv_top *rsc_top = rpmh_rsc_get_top_device(name);
  1312. int i;
  1313. if (IS_ERR(rsc_top))
  1314. return ERR_PTR(-ENODEV);
  1315. for (i = 0; i < rsc_top->drv_count; i++) {
  1316. if (i == drv_id && rsc_top->drv[i].initialized)
  1317. return rsc_top->drv[i].dev;
  1318. }
  1319. return ERR_PTR(-ENODEV);
  1320. }
  1321. static int rpmh_probe_channel_tcs_config(struct device_node *np,
  1322. struct rsc_drv *drv,
  1323. u32 max_tcs, u32 ncpt, int ch)
  1324. {
  1325. struct tcs_type_config {
  1326. u32 type;
  1327. u32 n;
  1328. } tcs_cfg[TCS_TYPE_NR] = { { 0 } };
  1329. struct tcs_group *tcs;
  1330. struct drv_channel *channel = &drv->ch[ch];
  1331. int i, ret, n, st = 0;
  1332. u32 tcs_mask;
  1333. n = of_property_count_u32_elems(np, "qcom,tcs-config");
  1334. if (n != 2 * TCS_TYPE_NR)
  1335. return -EINVAL;
  1336. for (i = 0; i < TCS_TYPE_NR; i++) {
  1337. ret = of_property_read_u32_index(np, "qcom,tcs-config",
  1338. i * 2, &tcs_cfg[i].type);
  1339. if (ret)
  1340. return ret;
  1341. if (tcs_cfg[i].type >= TCS_TYPE_NR)
  1342. return -EINVAL;
  1343. ret = of_property_read_u32_index(np, "qcom,tcs-config",
  1344. i * 2 + 1, &tcs_cfg[i].n);
  1345. if (ret)
  1346. return ret;
  1347. if (tcs_cfg[i].n > MAX_TCS_PER_TYPE)
  1348. return -EINVAL;
  1349. }
  1350. for (i = 0; i < TCS_TYPE_NR; i++) {
  1351. tcs = &channel->tcs[tcs_cfg[i].type];
  1352. if (tcs->drv)
  1353. return -EINVAL;
  1354. tcs->drv = drv;
  1355. tcs->type = tcs_cfg[i].type;
  1356. tcs->num_tcs = tcs_cfg[i].n;
  1357. tcs->ncpt = ncpt;
  1358. if (!tcs->num_tcs || tcs->type == CONTROL_TCS)
  1359. continue;
  1360. if (st + tcs->num_tcs > max_tcs ||
  1361. st + tcs->num_tcs >= BITS_PER_BYTE * sizeof(tcs->mask))
  1362. return -EINVAL;
  1363. tcs->mask = ((1 << tcs->num_tcs) - 1) << (st + drv->num_tcs);
  1364. tcs->offset = st + drv->num_tcs;
  1365. st += tcs->num_tcs;
  1366. }
  1367. /* Enable the active TCS to send requests immediately */
  1368. tcs_mask = readl_relaxed(drv->tcs_base + drv->regs[RSC_DRV_IRQ_ENABLE]);
  1369. tcs_mask |= drv->ch[ch].tcs[ACTIVE_TCS].mask;
  1370. writel_relaxed(tcs_mask, drv->tcs_base + drv->regs[RSC_DRV_IRQ_ENABLE]);
  1371. channel->drv = drv;
  1372. channel->initialized = true;
  1373. drv->num_tcs += st;
  1374. return 0;
  1375. }
  1376. static int rpmh_probe_tcs_config(struct rsc_drv *drv)
  1377. {
  1378. struct device_node *cn, *np = drv->dev->of_node;
  1379. int ch = 0, ret;
  1380. u32 offset, config;
  1381. u32 max_tcs, ncpt;
  1382. ret = of_property_read_u32(np, "qcom,tcs-offset", &offset);
  1383. if (ret)
  1384. return ret;
  1385. drv->tcs_base = drv->base + offset;
  1386. config = readl_relaxed(drv->base + drv->regs[DRV_PRNT_CHLD_CONFIG]);
  1387. max_tcs = config;
  1388. max_tcs &= DRV_NUM_TCS_MASK << (DRV_NUM_TCS_SHIFT * drv->id);
  1389. max_tcs = max_tcs >> (DRV_NUM_TCS_SHIFT * drv->id);
  1390. ncpt = config & (DRV_NCPT_MASK << DRV_NCPT_SHIFT);
  1391. ncpt = ncpt >> DRV_NCPT_SHIFT;
  1392. for_each_child_of_node(np, cn) {
  1393. if (!of_node_name_eq(cn, "channel"))
  1394. continue;
  1395. ret = rpmh_probe_channel_tcs_config(cn, drv, max_tcs, ncpt, ch);
  1396. if (ret)
  1397. return ret;
  1398. ch++;
  1399. }
  1400. drv->num_channels = ch;
  1401. return 0;
  1402. }
  1403. static int rpmh_rsc_pd_cb(struct notifier_block *nb,
  1404. unsigned long action, void *data)
  1405. {
  1406. struct rsc_drv *drv = container_of(nb, struct rsc_drv, genpd_nb);
  1407. int ch;
  1408. if (action != GENPD_NOTIFY_PRE_OFF)
  1409. return NOTIFY_OK;
  1410. ch = rpmh_rsc_get_channel(drv);
  1411. /* We don't need to lock as domin on/off are serialized */
  1412. if (ch < 0 || rpmh_rsc_ctrlr_is_busy(drv) || _rpmh_flush(&drv->client, ch))
  1413. return NOTIFY_BAD;
  1414. return NOTIFY_OK;
  1415. }
  1416. static int rpmh_rsc_pd_attach(struct rsc_drv *drv)
  1417. {
  1418. int ret;
  1419. pm_runtime_enable(&drv->pdev->dev);
  1420. ret = dev_pm_domain_attach(&drv->pdev->dev, false);
  1421. if (ret)
  1422. return ret;
  1423. drv->genpd_nb.notifier_call = rpmh_rsc_pd_cb;
  1424. return dev_pm_genpd_add_notifier(&drv->pdev->dev, &drv->genpd_nb);
  1425. }
  1426. static int rpmh_rsc_probe(struct platform_device *pdev)
  1427. {
  1428. struct device_node *np, *dn = pdev->dev.of_node;
  1429. struct rsc_drv *drv;
  1430. struct rsc_drv_top *rsc_top;
  1431. int ret, irq;
  1432. u32 rsc_id, major_ver, minor_ver, solver_config;
  1433. int i, j, drv_count;
  1434. const char *name;
  1435. /*
  1436. * Even though RPMh doesn't directly use cmd-db, all of its children
  1437. * do. To avoid adding this check to our children we'll do it now.
  1438. */
  1439. ret = cmd_db_ready();
  1440. if (ret) {
  1441. if (ret != -EPROBE_DEFER)
  1442. dev_err(&pdev->dev, "Command DB not available (%d)\n",
  1443. ret);
  1444. return ret;
  1445. }
  1446. rpmh_standalone = cmd_db_is_standalone();
  1447. if (rpmh_standalone)
  1448. dev_info(&pdev->dev, "RPMH is running in standalone mode.\n");
  1449. rsc_top = devm_kzalloc(&pdev->dev, sizeof(*rsc_top), GFP_KERNEL);
  1450. if (!rsc_top)
  1451. return -ENOMEM;
  1452. ret = of_property_read_u32(dn, "qcom,drv-count", &drv_count);
  1453. if (ret)
  1454. return ret;
  1455. drv = devm_kcalloc(&pdev->dev, drv_count, sizeof(*drv), GFP_KERNEL);
  1456. if (!drv)
  1457. return -ENOMEM;
  1458. name = of_get_property(dn, "label", NULL);
  1459. if (!name)
  1460. name = dev_name(&pdev->dev);
  1461. rsc_top->drv_count = drv_count;
  1462. rsc_top->drv = drv;
  1463. rsc_top->dev = &pdev->dev;
  1464. scnprintf(rsc_top->name, sizeof(rsc_top->name), "%s", name);
  1465. for_each_child_of_node(dn, np) {
  1466. struct device *drv_dev;
  1467. if (!of_node_name_eq(np, "drv"))
  1468. continue;
  1469. ret = of_property_read_u32(np, "qcom,drv-id", &i);
  1470. if (ret)
  1471. return ret;
  1472. scnprintf(drv[i].name, sizeof(drv[i].name), "%s-drv-%d", name, i);
  1473. drv[i].base = devm_platform_ioremap_resource(pdev, i);
  1474. if (IS_ERR(drv[i].base))
  1475. return PTR_ERR(drv[i].base);
  1476. drv_dev = kzalloc(sizeof(*drv_dev), GFP_KERNEL);
  1477. if (!drv_dev)
  1478. return -ENOMEM;
  1479. drv[i].id = i;
  1480. drv[i].pdev = pdev;
  1481. drv[i].dev = drv_dev;
  1482. drv_dev->parent = &pdev->dev;
  1483. drv_dev->of_node = np;
  1484. dev_set_name(drv_dev, "%s:%pOFn%d", dev_name(drv_dev->parent), np, i);
  1485. ret = device_register(drv_dev);
  1486. if (ret)
  1487. return ret;
  1488. rsc_id = readl_relaxed(drv[i].base + RSC_DRV_ID);
  1489. major_ver = rsc_id & (MAJOR_VER_MASK << MAJOR_VER_SHIFT);
  1490. major_ver >>= MAJOR_VER_SHIFT;
  1491. minor_ver = rsc_id & (MINOR_VER_MASK << MINOR_VER_SHIFT);
  1492. minor_ver >>= MINOR_VER_SHIFT;
  1493. if (major_ver >= 3 && minor_ver >= 0)
  1494. drv[i].regs = rpmh_rsc_reg_offsets_ver_3_0;
  1495. else
  1496. drv[i].regs = rpmh_rsc_reg_offsets_ver_2_7;
  1497. ret = rpmh_probe_tcs_config(&drv[i]);
  1498. if (ret)
  1499. return ret;
  1500. dev_set_drvdata(drv_dev, &drv[i]);
  1501. drv[i].initialized = true;
  1502. }
  1503. for (i = 0; i < drv_count; i++) {
  1504. if (!drv[i].initialized)
  1505. continue;
  1506. /*
  1507. * CPU PM notification are not required for controllers that support
  1508. * 'HW solver' mode where they can be in autonomous mode executing low
  1509. * power mode to power down.
  1510. */
  1511. solver_config = readl_relaxed(drv[i].base +
  1512. drv[i].regs[DRV_SOLVER_CONFIG]);
  1513. solver_config &= DRV_HW_SOLVER_MASK << DRV_HW_SOLVER_SHIFT;
  1514. solver_config = solver_config >> DRV_HW_SOLVER_SHIFT;
  1515. if (of_find_property(dn, "power-domains", NULL)) {
  1516. ret = rpmh_rsc_pd_attach(&drv[i]);
  1517. if (ret)
  1518. return ret;
  1519. } else if (!solver_config &&
  1520. !of_find_property(dn, "qcom,hw-channel", NULL)) {
  1521. drv[i].rsc_pm.notifier_call = rpmh_rsc_cpu_pm_callback;
  1522. cpu_pm_register_notifier(&drv[i].rsc_pm);
  1523. } else if (solver_config) {
  1524. drv[i].client.flags = SOLVER_PRESENT;
  1525. } else {
  1526. /*
  1527. * The requets for HW channel TCSes has to be either
  1528. * RPMH_SLEEP_STATE or RPMH_WAKE_ONLY_STATE.
  1529. *
  1530. * Assume 'solver' state which does nothing but to disallow
  1531. * RPMH_ACTIVE_ONLY_STATE requests.
  1532. */
  1533. drv[i].client.flags = SOLVER_PRESENT | HW_CHANNEL_PRESENT;
  1534. drv[i].client.in_solver_mode = true;
  1535. drv[i].in_solver_mode = true;
  1536. drv[i].regs = rpmh_rsc_reg_offsets_ver_3_0_hw_channel;
  1537. }
  1538. spin_lock_init(&drv[i].lock);
  1539. init_waitqueue_head(&drv[i].tcs_wait);
  1540. bitmap_zero(drv[i].tcs_in_use, MAX_TCS_NR);
  1541. drv[i].client.non_batch_cache = devm_kcalloc(&pdev->dev, CMD_DB_MAX_RESOURCES,
  1542. sizeof(struct cache_req), GFP_KERNEL);
  1543. if (!drv[i].client.non_batch_cache)
  1544. return -ENOMEM;
  1545. for (j = 0; j < CMD_DB_MAX_RESOURCES; j++)
  1546. INIT_LIST_HEAD(&drv[i].client.non_batch_cache[j].list);
  1547. irq = platform_get_irq(pdev, drv[i].id);
  1548. if (irq < 0)
  1549. return irq;
  1550. drv[i].irq = irq;
  1551. ret = devm_request_irq(&pdev->dev, irq, tcs_tx_done,
  1552. IRQF_TRIGGER_HIGH | IRQF_NO_SUSPEND,
  1553. drv[i].name, &drv[i]);
  1554. if (ret)
  1555. return ret;
  1556. spin_lock_init(&drv[i].client.cache_lock);
  1557. drv[i].ipc_log_ctx = ipc_log_context_create(
  1558. RSC_DRV_IPC_LOG_SIZE,
  1559. drv[i].name, 0);
  1560. if (__rsc_count < MAX_RSC_COUNT)
  1561. __rsc_drv[__rsc_count++] = &drv[i];
  1562. ret = devm_of_platform_populate(drv[i].dev);
  1563. if (ret)
  1564. return ret;
  1565. }
  1566. INIT_LIST_HEAD(&rsc_top->list);
  1567. list_add_tail(&rsc_top->list, &rpmh_rsc_dev_list);
  1568. dev_set_drvdata(&pdev->dev, rsc_top);
  1569. return devm_of_platform_populate(&pdev->dev);
  1570. }
  1571. static const struct dev_pm_ops rpmh_rsc_dev_pm_ops = {
  1572. .poweroff_noirq = rpmh_rsc_poweroff_noirq,
  1573. .restore_noirq = rpmh_rsc_restore_noirq,
  1574. };
  1575. static const struct of_device_id rpmh_drv_match[] = {
  1576. { .compatible = "qcom,rpmh-rsc", },
  1577. { }
  1578. };
  1579. MODULE_DEVICE_TABLE(of, rpmh_drv_match);
  1580. static struct platform_driver rpmh_driver = {
  1581. .probe = rpmh_rsc_probe,
  1582. .driver = {
  1583. .name = "rpmh",
  1584. .of_match_table = rpmh_drv_match,
  1585. .pm = &rpmh_rsc_dev_pm_ops,
  1586. .suppress_bind_attrs = true,
  1587. },
  1588. };
  1589. static int __init rpmh_driver_init(void)
  1590. {
  1591. return platform_driver_register(&rpmh_driver);
  1592. }
  1593. arch_initcall(rpmh_driver_init);
  1594. MODULE_DESCRIPTION("Qualcomm Technologies, Inc. RPMh Driver");
  1595. MODULE_LICENSE("GPL v2");