cpr.c 44 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright (c) 2013-2015, The Linux Foundation. All rights reserved.
  4. * Copyright (c) 2019, Linaro Limited
  5. */
  6. #include <linux/module.h>
  7. #include <linux/err.h>
  8. #include <linux/debugfs.h>
  9. #include <linux/string.h>
  10. #include <linux/kernel.h>
  11. #include <linux/list.h>
  12. #include <linux/init.h>
  13. #include <linux/io.h>
  14. #include <linux/bitops.h>
  15. #include <linux/slab.h>
  16. #include <linux/of.h>
  17. #include <linux/of_device.h>
  18. #include <linux/platform_device.h>
  19. #include <linux/pm_domain.h>
  20. #include <linux/pm_opp.h>
  21. #include <linux/interrupt.h>
  22. #include <linux/regmap.h>
  23. #include <linux/mfd/syscon.h>
  24. #include <linux/regulator/consumer.h>
  25. #include <linux/clk.h>
  26. #include <linux/nvmem-consumer.h>
  27. /* Register Offsets for RB-CPR and Bit Definitions */
  28. /* RBCPR Version Register */
  29. #define REG_RBCPR_VERSION 0
  30. #define RBCPR_VER_2 0x02
  31. #define FLAGS_IGNORE_1ST_IRQ_STATUS BIT(0)
  32. /* RBCPR Gate Count and Target Registers */
  33. #define REG_RBCPR_GCNT_TARGET(n) (0x60 + 4 * (n))
  34. #define RBCPR_GCNT_TARGET_TARGET_SHIFT 0
  35. #define RBCPR_GCNT_TARGET_TARGET_MASK GENMASK(11, 0)
  36. #define RBCPR_GCNT_TARGET_GCNT_SHIFT 12
  37. #define RBCPR_GCNT_TARGET_GCNT_MASK GENMASK(9, 0)
  38. /* RBCPR Timer Control */
  39. #define REG_RBCPR_TIMER_INTERVAL 0x44
  40. #define REG_RBIF_TIMER_ADJUST 0x4c
  41. #define RBIF_TIMER_ADJ_CONS_UP_MASK GENMASK(3, 0)
  42. #define RBIF_TIMER_ADJ_CONS_UP_SHIFT 0
  43. #define RBIF_TIMER_ADJ_CONS_DOWN_MASK GENMASK(3, 0)
  44. #define RBIF_TIMER_ADJ_CONS_DOWN_SHIFT 4
  45. #define RBIF_TIMER_ADJ_CLAMP_INT_MASK GENMASK(7, 0)
  46. #define RBIF_TIMER_ADJ_CLAMP_INT_SHIFT 8
  47. /* RBCPR Config Register */
  48. #define REG_RBIF_LIMIT 0x48
  49. #define RBIF_LIMIT_CEILING_MASK GENMASK(5, 0)
  50. #define RBIF_LIMIT_CEILING_SHIFT 6
  51. #define RBIF_LIMIT_FLOOR_BITS 6
  52. #define RBIF_LIMIT_FLOOR_MASK GENMASK(5, 0)
  53. #define RBIF_LIMIT_CEILING_DEFAULT RBIF_LIMIT_CEILING_MASK
  54. #define RBIF_LIMIT_FLOOR_DEFAULT 0
  55. #define REG_RBIF_SW_VLEVEL 0x94
  56. #define RBIF_SW_VLEVEL_DEFAULT 0x20
  57. #define REG_RBCPR_STEP_QUOT 0x80
  58. #define RBCPR_STEP_QUOT_STEPQUOT_MASK GENMASK(7, 0)
  59. #define RBCPR_STEP_QUOT_IDLE_CLK_MASK GENMASK(3, 0)
  60. #define RBCPR_STEP_QUOT_IDLE_CLK_SHIFT 8
  61. /* RBCPR Control Register */
  62. #define REG_RBCPR_CTL 0x90
  63. #define RBCPR_CTL_LOOP_EN BIT(0)
  64. #define RBCPR_CTL_TIMER_EN BIT(3)
  65. #define RBCPR_CTL_SW_AUTO_CONT_ACK_EN BIT(5)
  66. #define RBCPR_CTL_SW_AUTO_CONT_NACK_DN_EN BIT(6)
  67. #define RBCPR_CTL_COUNT_MODE BIT(10)
  68. #define RBCPR_CTL_UP_THRESHOLD_MASK GENMASK(3, 0)
  69. #define RBCPR_CTL_UP_THRESHOLD_SHIFT 24
  70. #define RBCPR_CTL_DN_THRESHOLD_MASK GENMASK(3, 0)
  71. #define RBCPR_CTL_DN_THRESHOLD_SHIFT 28
  72. /* RBCPR Ack/Nack Response */
  73. #define REG_RBIF_CONT_ACK_CMD 0x98
  74. #define REG_RBIF_CONT_NACK_CMD 0x9c
  75. /* RBCPR Result status Register */
  76. #define REG_RBCPR_RESULT_0 0xa0
  77. #define RBCPR_RESULT0_BUSY_SHIFT 19
  78. #define RBCPR_RESULT0_BUSY_MASK BIT(RBCPR_RESULT0_BUSY_SHIFT)
  79. #define RBCPR_RESULT0_ERROR_LT0_SHIFT 18
  80. #define RBCPR_RESULT0_ERROR_SHIFT 6
  81. #define RBCPR_RESULT0_ERROR_MASK GENMASK(11, 0)
  82. #define RBCPR_RESULT0_ERROR_STEPS_SHIFT 2
  83. #define RBCPR_RESULT0_ERROR_STEPS_MASK GENMASK(3, 0)
  84. #define RBCPR_RESULT0_STEP_UP_SHIFT 1
  85. /* RBCPR Interrupt Control Register */
  86. #define REG_RBIF_IRQ_EN(n) (0x100 + 4 * (n))
  87. #define REG_RBIF_IRQ_CLEAR 0x110
  88. #define REG_RBIF_IRQ_STATUS 0x114
  89. #define CPR_INT_DONE BIT(0)
  90. #define CPR_INT_MIN BIT(1)
  91. #define CPR_INT_DOWN BIT(2)
  92. #define CPR_INT_MID BIT(3)
  93. #define CPR_INT_UP BIT(4)
  94. #define CPR_INT_MAX BIT(5)
  95. #define CPR_INT_CLAMP BIT(6)
  96. #define CPR_INT_ALL (CPR_INT_DONE | CPR_INT_MIN | CPR_INT_DOWN | \
  97. CPR_INT_MID | CPR_INT_UP | CPR_INT_MAX | CPR_INT_CLAMP)
  98. #define CPR_INT_DEFAULT (CPR_INT_UP | CPR_INT_DOWN)
  99. #define CPR_NUM_RING_OSC 8
  100. /* CPR eFuse parameters */
  101. #define CPR_FUSE_TARGET_QUOT_BITS_MASK GENMASK(11, 0)
  102. #define CPR_FUSE_MIN_QUOT_DIFF 50
  103. #define FUSE_REVISION_UNKNOWN (-1)
  104. enum voltage_change_dir {
  105. NO_CHANGE,
  106. DOWN,
  107. UP,
  108. };
  109. struct cpr_fuse {
  110. char *ring_osc;
  111. char *init_voltage;
  112. char *quotient;
  113. char *quotient_offset;
  114. };
  115. struct fuse_corner_data {
  116. int ref_uV;
  117. int max_uV;
  118. int min_uV;
  119. int max_volt_scale;
  120. int max_quot_scale;
  121. /* fuse quot */
  122. int quot_offset;
  123. int quot_scale;
  124. int quot_adjust;
  125. /* fuse quot_offset */
  126. int quot_offset_scale;
  127. int quot_offset_adjust;
  128. };
  129. struct cpr_fuses {
  130. int init_voltage_step;
  131. int init_voltage_width;
  132. struct fuse_corner_data *fuse_corner_data;
  133. };
  134. struct corner_data {
  135. unsigned int fuse_corner;
  136. unsigned long freq;
  137. };
  138. struct cpr_desc {
  139. unsigned int num_fuse_corners;
  140. int min_diff_quot;
  141. int *step_quot;
  142. unsigned int timer_delay_us;
  143. unsigned int timer_cons_up;
  144. unsigned int timer_cons_down;
  145. unsigned int up_threshold;
  146. unsigned int down_threshold;
  147. unsigned int idle_clocks;
  148. unsigned int gcnt_us;
  149. unsigned int vdd_apc_step_up_limit;
  150. unsigned int vdd_apc_step_down_limit;
  151. unsigned int clamp_timer_interval;
  152. struct cpr_fuses cpr_fuses;
  153. bool reduce_to_fuse_uV;
  154. bool reduce_to_corner_uV;
  155. };
  156. struct acc_desc {
  157. unsigned int enable_reg;
  158. u32 enable_mask;
  159. struct reg_sequence *config;
  160. struct reg_sequence *settings;
  161. int num_regs_per_fuse;
  162. };
  163. struct cpr_acc_desc {
  164. const struct cpr_desc *cpr_desc;
  165. const struct acc_desc *acc_desc;
  166. };
  167. struct fuse_corner {
  168. int min_uV;
  169. int max_uV;
  170. int uV;
  171. int quot;
  172. int step_quot;
  173. const struct reg_sequence *accs;
  174. int num_accs;
  175. unsigned long max_freq;
  176. u8 ring_osc_idx;
  177. };
  178. struct corner {
  179. int min_uV;
  180. int max_uV;
  181. int uV;
  182. int last_uV;
  183. int quot_adjust;
  184. u32 save_ctl;
  185. u32 save_irq;
  186. unsigned long freq;
  187. struct fuse_corner *fuse_corner;
  188. };
  189. struct cpr_drv {
  190. unsigned int num_corners;
  191. unsigned int ref_clk_khz;
  192. struct generic_pm_domain pd;
  193. struct device *dev;
  194. struct device *attached_cpu_dev;
  195. struct mutex lock;
  196. void __iomem *base;
  197. struct corner *corner;
  198. struct regulator *vdd_apc;
  199. struct clk *cpu_clk;
  200. struct regmap *tcsr;
  201. bool loop_disabled;
  202. u32 gcnt;
  203. unsigned long flags;
  204. struct fuse_corner *fuse_corners;
  205. struct corner *corners;
  206. const struct cpr_desc *desc;
  207. const struct acc_desc *acc_desc;
  208. const struct cpr_fuse *cpr_fuses;
  209. struct dentry *debugfs;
  210. };
  211. static bool cpr_is_allowed(struct cpr_drv *drv)
  212. {
  213. return !drv->loop_disabled;
  214. }
  215. static void cpr_write(struct cpr_drv *drv, u32 offset, u32 value)
  216. {
  217. writel_relaxed(value, drv->base + offset);
  218. }
  219. static u32 cpr_read(struct cpr_drv *drv, u32 offset)
  220. {
  221. return readl_relaxed(drv->base + offset);
  222. }
  223. static void
  224. cpr_masked_write(struct cpr_drv *drv, u32 offset, u32 mask, u32 value)
  225. {
  226. u32 val;
  227. val = readl_relaxed(drv->base + offset);
  228. val &= ~mask;
  229. val |= value & mask;
  230. writel_relaxed(val, drv->base + offset);
  231. }
  232. static void cpr_irq_clr(struct cpr_drv *drv)
  233. {
  234. cpr_write(drv, REG_RBIF_IRQ_CLEAR, CPR_INT_ALL);
  235. }
  236. static void cpr_irq_clr_nack(struct cpr_drv *drv)
  237. {
  238. cpr_irq_clr(drv);
  239. cpr_write(drv, REG_RBIF_CONT_NACK_CMD, 1);
  240. }
  241. static void cpr_irq_clr_ack(struct cpr_drv *drv)
  242. {
  243. cpr_irq_clr(drv);
  244. cpr_write(drv, REG_RBIF_CONT_ACK_CMD, 1);
  245. }
  246. static void cpr_irq_set(struct cpr_drv *drv, u32 int_bits)
  247. {
  248. cpr_write(drv, REG_RBIF_IRQ_EN(0), int_bits);
  249. }
  250. static void cpr_ctl_modify(struct cpr_drv *drv, u32 mask, u32 value)
  251. {
  252. cpr_masked_write(drv, REG_RBCPR_CTL, mask, value);
  253. }
  254. static void cpr_ctl_enable(struct cpr_drv *drv, struct corner *corner)
  255. {
  256. u32 val, mask;
  257. const struct cpr_desc *desc = drv->desc;
  258. /* Program Consecutive Up & Down */
  259. val = desc->timer_cons_down << RBIF_TIMER_ADJ_CONS_DOWN_SHIFT;
  260. val |= desc->timer_cons_up << RBIF_TIMER_ADJ_CONS_UP_SHIFT;
  261. mask = RBIF_TIMER_ADJ_CONS_UP_MASK | RBIF_TIMER_ADJ_CONS_DOWN_MASK;
  262. cpr_masked_write(drv, REG_RBIF_TIMER_ADJUST, mask, val);
  263. cpr_masked_write(drv, REG_RBCPR_CTL,
  264. RBCPR_CTL_SW_AUTO_CONT_NACK_DN_EN |
  265. RBCPR_CTL_SW_AUTO_CONT_ACK_EN,
  266. corner->save_ctl);
  267. cpr_irq_set(drv, corner->save_irq);
  268. if (cpr_is_allowed(drv) && corner->max_uV > corner->min_uV)
  269. val = RBCPR_CTL_LOOP_EN;
  270. else
  271. val = 0;
  272. cpr_ctl_modify(drv, RBCPR_CTL_LOOP_EN, val);
  273. }
  274. static void cpr_ctl_disable(struct cpr_drv *drv)
  275. {
  276. cpr_irq_set(drv, 0);
  277. cpr_ctl_modify(drv, RBCPR_CTL_SW_AUTO_CONT_NACK_DN_EN |
  278. RBCPR_CTL_SW_AUTO_CONT_ACK_EN, 0);
  279. cpr_masked_write(drv, REG_RBIF_TIMER_ADJUST,
  280. RBIF_TIMER_ADJ_CONS_UP_MASK |
  281. RBIF_TIMER_ADJ_CONS_DOWN_MASK, 0);
  282. cpr_irq_clr(drv);
  283. cpr_write(drv, REG_RBIF_CONT_ACK_CMD, 1);
  284. cpr_write(drv, REG_RBIF_CONT_NACK_CMD, 1);
  285. cpr_ctl_modify(drv, RBCPR_CTL_LOOP_EN, 0);
  286. }
  287. static bool cpr_ctl_is_enabled(struct cpr_drv *drv)
  288. {
  289. u32 reg_val;
  290. reg_val = cpr_read(drv, REG_RBCPR_CTL);
  291. return reg_val & RBCPR_CTL_LOOP_EN;
  292. }
  293. static bool cpr_ctl_is_busy(struct cpr_drv *drv)
  294. {
  295. u32 reg_val;
  296. reg_val = cpr_read(drv, REG_RBCPR_RESULT_0);
  297. return reg_val & RBCPR_RESULT0_BUSY_MASK;
  298. }
  299. static void cpr_corner_save(struct cpr_drv *drv, struct corner *corner)
  300. {
  301. corner->save_ctl = cpr_read(drv, REG_RBCPR_CTL);
  302. corner->save_irq = cpr_read(drv, REG_RBIF_IRQ_EN(0));
  303. }
  304. static void cpr_corner_restore(struct cpr_drv *drv, struct corner *corner)
  305. {
  306. u32 gcnt, ctl, irq, ro_sel, step_quot;
  307. struct fuse_corner *fuse = corner->fuse_corner;
  308. const struct cpr_desc *desc = drv->desc;
  309. int i;
  310. ro_sel = fuse->ring_osc_idx;
  311. gcnt = drv->gcnt;
  312. gcnt |= fuse->quot - corner->quot_adjust;
  313. /* Program the step quotient and idle clocks */
  314. step_quot = desc->idle_clocks << RBCPR_STEP_QUOT_IDLE_CLK_SHIFT;
  315. step_quot |= fuse->step_quot & RBCPR_STEP_QUOT_STEPQUOT_MASK;
  316. cpr_write(drv, REG_RBCPR_STEP_QUOT, step_quot);
  317. /* Clear the target quotient value and gate count of all ROs */
  318. for (i = 0; i < CPR_NUM_RING_OSC; i++)
  319. cpr_write(drv, REG_RBCPR_GCNT_TARGET(i), 0);
  320. cpr_write(drv, REG_RBCPR_GCNT_TARGET(ro_sel), gcnt);
  321. ctl = corner->save_ctl;
  322. cpr_write(drv, REG_RBCPR_CTL, ctl);
  323. irq = corner->save_irq;
  324. cpr_irq_set(drv, irq);
  325. dev_dbg(drv->dev, "gcnt = %#08x, ctl = %#08x, irq = %#08x\n", gcnt,
  326. ctl, irq);
  327. }
  328. static void cpr_set_acc(struct regmap *tcsr, struct fuse_corner *f,
  329. struct fuse_corner *end)
  330. {
  331. if (f == end)
  332. return;
  333. if (f < end) {
  334. for (f += 1; f <= end; f++)
  335. regmap_multi_reg_write(tcsr, f->accs, f->num_accs);
  336. } else {
  337. for (f -= 1; f >= end; f--)
  338. regmap_multi_reg_write(tcsr, f->accs, f->num_accs);
  339. }
  340. }
  341. static int cpr_pre_voltage(struct cpr_drv *drv,
  342. struct fuse_corner *fuse_corner,
  343. enum voltage_change_dir dir)
  344. {
  345. struct fuse_corner *prev_fuse_corner = drv->corner->fuse_corner;
  346. if (drv->tcsr && dir == DOWN)
  347. cpr_set_acc(drv->tcsr, prev_fuse_corner, fuse_corner);
  348. return 0;
  349. }
  350. static int cpr_post_voltage(struct cpr_drv *drv,
  351. struct fuse_corner *fuse_corner,
  352. enum voltage_change_dir dir)
  353. {
  354. struct fuse_corner *prev_fuse_corner = drv->corner->fuse_corner;
  355. if (drv->tcsr && dir == UP)
  356. cpr_set_acc(drv->tcsr, prev_fuse_corner, fuse_corner);
  357. return 0;
  358. }
  359. static int cpr_scale_voltage(struct cpr_drv *drv, struct corner *corner,
  360. int new_uV, enum voltage_change_dir dir)
  361. {
  362. int ret;
  363. struct fuse_corner *fuse_corner = corner->fuse_corner;
  364. ret = cpr_pre_voltage(drv, fuse_corner, dir);
  365. if (ret)
  366. return ret;
  367. ret = regulator_set_voltage(drv->vdd_apc, new_uV, new_uV);
  368. if (ret) {
  369. dev_err_ratelimited(drv->dev, "failed to set apc voltage %d\n",
  370. new_uV);
  371. return ret;
  372. }
  373. ret = cpr_post_voltage(drv, fuse_corner, dir);
  374. if (ret)
  375. return ret;
  376. return 0;
  377. }
  378. static unsigned int cpr_get_cur_perf_state(struct cpr_drv *drv)
  379. {
  380. return drv->corner ? drv->corner - drv->corners + 1 : 0;
  381. }
  382. static int cpr_scale(struct cpr_drv *drv, enum voltage_change_dir dir)
  383. {
  384. u32 val, error_steps, reg_mask;
  385. int last_uV, new_uV, step_uV, ret;
  386. struct corner *corner;
  387. const struct cpr_desc *desc = drv->desc;
  388. if (dir != UP && dir != DOWN)
  389. return 0;
  390. step_uV = regulator_get_linear_step(drv->vdd_apc);
  391. if (!step_uV)
  392. return -EINVAL;
  393. corner = drv->corner;
  394. val = cpr_read(drv, REG_RBCPR_RESULT_0);
  395. error_steps = val >> RBCPR_RESULT0_ERROR_STEPS_SHIFT;
  396. error_steps &= RBCPR_RESULT0_ERROR_STEPS_MASK;
  397. last_uV = corner->last_uV;
  398. if (dir == UP) {
  399. if (desc->clamp_timer_interval &&
  400. error_steps < desc->up_threshold) {
  401. /*
  402. * Handle the case where another measurement started
  403. * after the interrupt was triggered due to a core
  404. * exiting from power collapse.
  405. */
  406. error_steps = max(desc->up_threshold,
  407. desc->vdd_apc_step_up_limit);
  408. }
  409. if (last_uV >= corner->max_uV) {
  410. cpr_irq_clr_nack(drv);
  411. /* Maximize the UP threshold */
  412. reg_mask = RBCPR_CTL_UP_THRESHOLD_MASK;
  413. reg_mask <<= RBCPR_CTL_UP_THRESHOLD_SHIFT;
  414. val = reg_mask;
  415. cpr_ctl_modify(drv, reg_mask, val);
  416. /* Disable UP interrupt */
  417. cpr_irq_set(drv, CPR_INT_DEFAULT & ~CPR_INT_UP);
  418. return 0;
  419. }
  420. if (error_steps > desc->vdd_apc_step_up_limit)
  421. error_steps = desc->vdd_apc_step_up_limit;
  422. /* Calculate new voltage */
  423. new_uV = last_uV + error_steps * step_uV;
  424. new_uV = min(new_uV, corner->max_uV);
  425. dev_dbg(drv->dev,
  426. "UP: -> new_uV: %d last_uV: %d perf state: %u\n",
  427. new_uV, last_uV, cpr_get_cur_perf_state(drv));
  428. } else {
  429. if (desc->clamp_timer_interval &&
  430. error_steps < desc->down_threshold) {
  431. /*
  432. * Handle the case where another measurement started
  433. * after the interrupt was triggered due to a core
  434. * exiting from power collapse.
  435. */
  436. error_steps = max(desc->down_threshold,
  437. desc->vdd_apc_step_down_limit);
  438. }
  439. if (last_uV <= corner->min_uV) {
  440. cpr_irq_clr_nack(drv);
  441. /* Enable auto nack down */
  442. reg_mask = RBCPR_CTL_SW_AUTO_CONT_NACK_DN_EN;
  443. val = RBCPR_CTL_SW_AUTO_CONT_NACK_DN_EN;
  444. cpr_ctl_modify(drv, reg_mask, val);
  445. /* Disable DOWN interrupt */
  446. cpr_irq_set(drv, CPR_INT_DEFAULT & ~CPR_INT_DOWN);
  447. return 0;
  448. }
  449. if (error_steps > desc->vdd_apc_step_down_limit)
  450. error_steps = desc->vdd_apc_step_down_limit;
  451. /* Calculate new voltage */
  452. new_uV = last_uV - error_steps * step_uV;
  453. new_uV = max(new_uV, corner->min_uV);
  454. dev_dbg(drv->dev,
  455. "DOWN: -> new_uV: %d last_uV: %d perf state: %u\n",
  456. new_uV, last_uV, cpr_get_cur_perf_state(drv));
  457. }
  458. ret = cpr_scale_voltage(drv, corner, new_uV, dir);
  459. if (ret) {
  460. cpr_irq_clr_nack(drv);
  461. return ret;
  462. }
  463. drv->corner->last_uV = new_uV;
  464. if (dir == UP) {
  465. /* Disable auto nack down */
  466. reg_mask = RBCPR_CTL_SW_AUTO_CONT_NACK_DN_EN;
  467. val = 0;
  468. } else {
  469. /* Restore default threshold for UP */
  470. reg_mask = RBCPR_CTL_UP_THRESHOLD_MASK;
  471. reg_mask <<= RBCPR_CTL_UP_THRESHOLD_SHIFT;
  472. val = desc->up_threshold;
  473. val <<= RBCPR_CTL_UP_THRESHOLD_SHIFT;
  474. }
  475. cpr_ctl_modify(drv, reg_mask, val);
  476. /* Re-enable default interrupts */
  477. cpr_irq_set(drv, CPR_INT_DEFAULT);
  478. /* Ack */
  479. cpr_irq_clr_ack(drv);
  480. return 0;
  481. }
  482. static irqreturn_t cpr_irq_handler(int irq, void *dev)
  483. {
  484. struct cpr_drv *drv = dev;
  485. const struct cpr_desc *desc = drv->desc;
  486. irqreturn_t ret = IRQ_HANDLED;
  487. u32 val;
  488. mutex_lock(&drv->lock);
  489. val = cpr_read(drv, REG_RBIF_IRQ_STATUS);
  490. if (drv->flags & FLAGS_IGNORE_1ST_IRQ_STATUS)
  491. val = cpr_read(drv, REG_RBIF_IRQ_STATUS);
  492. dev_dbg(drv->dev, "IRQ_STATUS = %#02x\n", val);
  493. if (!cpr_ctl_is_enabled(drv)) {
  494. dev_dbg(drv->dev, "CPR is disabled\n");
  495. ret = IRQ_NONE;
  496. } else if (cpr_ctl_is_busy(drv) && !desc->clamp_timer_interval) {
  497. dev_dbg(drv->dev, "CPR measurement is not ready\n");
  498. } else if (!cpr_is_allowed(drv)) {
  499. val = cpr_read(drv, REG_RBCPR_CTL);
  500. dev_err_ratelimited(drv->dev,
  501. "Interrupt broken? RBCPR_CTL = %#02x\n",
  502. val);
  503. ret = IRQ_NONE;
  504. } else {
  505. /*
  506. * Following sequence of handling is as per each IRQ's
  507. * priority
  508. */
  509. if (val & CPR_INT_UP) {
  510. cpr_scale(drv, UP);
  511. } else if (val & CPR_INT_DOWN) {
  512. cpr_scale(drv, DOWN);
  513. } else if (val & CPR_INT_MIN) {
  514. cpr_irq_clr_nack(drv);
  515. } else if (val & CPR_INT_MAX) {
  516. cpr_irq_clr_nack(drv);
  517. } else if (val & CPR_INT_MID) {
  518. /* RBCPR_CTL_SW_AUTO_CONT_ACK_EN is enabled */
  519. dev_dbg(drv->dev, "IRQ occurred for Mid Flag\n");
  520. } else {
  521. dev_dbg(drv->dev,
  522. "IRQ occurred for unknown flag (%#08x)\n", val);
  523. }
  524. /* Save register values for the corner */
  525. cpr_corner_save(drv, drv->corner);
  526. }
  527. mutex_unlock(&drv->lock);
  528. return ret;
  529. }
  530. static int cpr_enable(struct cpr_drv *drv)
  531. {
  532. int ret;
  533. ret = regulator_enable(drv->vdd_apc);
  534. if (ret)
  535. return ret;
  536. mutex_lock(&drv->lock);
  537. if (cpr_is_allowed(drv) && drv->corner) {
  538. cpr_irq_clr(drv);
  539. cpr_corner_restore(drv, drv->corner);
  540. cpr_ctl_enable(drv, drv->corner);
  541. }
  542. mutex_unlock(&drv->lock);
  543. return 0;
  544. }
  545. static int cpr_disable(struct cpr_drv *drv)
  546. {
  547. mutex_lock(&drv->lock);
  548. if (cpr_is_allowed(drv)) {
  549. cpr_ctl_disable(drv);
  550. cpr_irq_clr(drv);
  551. }
  552. mutex_unlock(&drv->lock);
  553. return regulator_disable(drv->vdd_apc);
  554. }
  555. static int cpr_config(struct cpr_drv *drv)
  556. {
  557. int i;
  558. u32 val, gcnt;
  559. struct corner *corner;
  560. const struct cpr_desc *desc = drv->desc;
  561. /* Disable interrupt and CPR */
  562. cpr_write(drv, REG_RBIF_IRQ_EN(0), 0);
  563. cpr_write(drv, REG_RBCPR_CTL, 0);
  564. /* Program the default HW ceiling, floor and vlevel */
  565. val = (RBIF_LIMIT_CEILING_DEFAULT & RBIF_LIMIT_CEILING_MASK)
  566. << RBIF_LIMIT_CEILING_SHIFT;
  567. val |= RBIF_LIMIT_FLOOR_DEFAULT & RBIF_LIMIT_FLOOR_MASK;
  568. cpr_write(drv, REG_RBIF_LIMIT, val);
  569. cpr_write(drv, REG_RBIF_SW_VLEVEL, RBIF_SW_VLEVEL_DEFAULT);
  570. /*
  571. * Clear the target quotient value and gate count of all
  572. * ring oscillators
  573. */
  574. for (i = 0; i < CPR_NUM_RING_OSC; i++)
  575. cpr_write(drv, REG_RBCPR_GCNT_TARGET(i), 0);
  576. /* Init and save gcnt */
  577. gcnt = (drv->ref_clk_khz * desc->gcnt_us) / 1000;
  578. gcnt = gcnt & RBCPR_GCNT_TARGET_GCNT_MASK;
  579. gcnt <<= RBCPR_GCNT_TARGET_GCNT_SHIFT;
  580. drv->gcnt = gcnt;
  581. /* Program the delay count for the timer */
  582. val = (drv->ref_clk_khz * desc->timer_delay_us) / 1000;
  583. cpr_write(drv, REG_RBCPR_TIMER_INTERVAL, val);
  584. dev_dbg(drv->dev, "Timer count: %#0x (for %d us)\n", val,
  585. desc->timer_delay_us);
  586. /* Program Consecutive Up & Down */
  587. val = desc->timer_cons_down << RBIF_TIMER_ADJ_CONS_DOWN_SHIFT;
  588. val |= desc->timer_cons_up << RBIF_TIMER_ADJ_CONS_UP_SHIFT;
  589. val |= desc->clamp_timer_interval << RBIF_TIMER_ADJ_CLAMP_INT_SHIFT;
  590. cpr_write(drv, REG_RBIF_TIMER_ADJUST, val);
  591. /* Program the control register */
  592. val = desc->up_threshold << RBCPR_CTL_UP_THRESHOLD_SHIFT;
  593. val |= desc->down_threshold << RBCPR_CTL_DN_THRESHOLD_SHIFT;
  594. val |= RBCPR_CTL_TIMER_EN | RBCPR_CTL_COUNT_MODE;
  595. val |= RBCPR_CTL_SW_AUTO_CONT_ACK_EN;
  596. cpr_write(drv, REG_RBCPR_CTL, val);
  597. for (i = 0; i < drv->num_corners; i++) {
  598. corner = &drv->corners[i];
  599. corner->save_ctl = val;
  600. corner->save_irq = CPR_INT_DEFAULT;
  601. }
  602. cpr_irq_set(drv, CPR_INT_DEFAULT);
  603. val = cpr_read(drv, REG_RBCPR_VERSION);
  604. if (val <= RBCPR_VER_2)
  605. drv->flags |= FLAGS_IGNORE_1ST_IRQ_STATUS;
  606. return 0;
  607. }
  608. static int cpr_set_performance_state(struct generic_pm_domain *domain,
  609. unsigned int state)
  610. {
  611. struct cpr_drv *drv = container_of(domain, struct cpr_drv, pd);
  612. struct corner *corner, *end;
  613. enum voltage_change_dir dir;
  614. int ret = 0, new_uV;
  615. mutex_lock(&drv->lock);
  616. dev_dbg(drv->dev, "%s: setting perf state: %u (prev state: %u)\n",
  617. __func__, state, cpr_get_cur_perf_state(drv));
  618. /*
  619. * Determine new corner we're going to.
  620. * Remove one since lowest performance state is 1.
  621. */
  622. corner = drv->corners + state - 1;
  623. end = &drv->corners[drv->num_corners - 1];
  624. if (corner > end || corner < drv->corners) {
  625. ret = -EINVAL;
  626. goto unlock;
  627. }
  628. /* Determine direction */
  629. if (drv->corner > corner)
  630. dir = DOWN;
  631. else if (drv->corner < corner)
  632. dir = UP;
  633. else
  634. dir = NO_CHANGE;
  635. if (cpr_is_allowed(drv))
  636. new_uV = corner->last_uV;
  637. else
  638. new_uV = corner->uV;
  639. if (cpr_is_allowed(drv))
  640. cpr_ctl_disable(drv);
  641. ret = cpr_scale_voltage(drv, corner, new_uV, dir);
  642. if (ret)
  643. goto unlock;
  644. if (cpr_is_allowed(drv)) {
  645. cpr_irq_clr(drv);
  646. if (drv->corner != corner)
  647. cpr_corner_restore(drv, corner);
  648. cpr_ctl_enable(drv, corner);
  649. }
  650. drv->corner = corner;
  651. unlock:
  652. mutex_unlock(&drv->lock);
  653. return ret;
  654. }
  655. static int
  656. cpr_populate_ring_osc_idx(struct cpr_drv *drv)
  657. {
  658. struct fuse_corner *fuse = drv->fuse_corners;
  659. struct fuse_corner *end = fuse + drv->desc->num_fuse_corners;
  660. const struct cpr_fuse *fuses = drv->cpr_fuses;
  661. u32 data;
  662. int ret;
  663. for (; fuse < end; fuse++, fuses++) {
  664. ret = nvmem_cell_read_variable_le_u32(drv->dev, fuses->ring_osc, &data);
  665. if (ret)
  666. return ret;
  667. fuse->ring_osc_idx = data;
  668. }
  669. return 0;
  670. }
  671. static int cpr_read_fuse_uV(const struct cpr_desc *desc,
  672. const struct fuse_corner_data *fdata,
  673. const char *init_v_efuse,
  674. int step_volt,
  675. struct cpr_drv *drv)
  676. {
  677. int step_size_uV, steps, uV;
  678. u32 bits = 0;
  679. int ret;
  680. ret = nvmem_cell_read_variable_le_u32(drv->dev, init_v_efuse, &bits);
  681. if (ret)
  682. return ret;
  683. steps = bits & ~BIT(desc->cpr_fuses.init_voltage_width - 1);
  684. /* Not two's complement.. instead highest bit is sign bit */
  685. if (bits & BIT(desc->cpr_fuses.init_voltage_width - 1))
  686. steps = -steps;
  687. step_size_uV = desc->cpr_fuses.init_voltage_step;
  688. uV = fdata->ref_uV + steps * step_size_uV;
  689. return DIV_ROUND_UP(uV, step_volt) * step_volt;
  690. }
  691. static int cpr_fuse_corner_init(struct cpr_drv *drv)
  692. {
  693. const struct cpr_desc *desc = drv->desc;
  694. const struct cpr_fuse *fuses = drv->cpr_fuses;
  695. const struct acc_desc *acc_desc = drv->acc_desc;
  696. int i;
  697. unsigned int step_volt;
  698. struct fuse_corner_data *fdata;
  699. struct fuse_corner *fuse, *end;
  700. int uV;
  701. const struct reg_sequence *accs;
  702. int ret;
  703. accs = acc_desc->settings;
  704. step_volt = regulator_get_linear_step(drv->vdd_apc);
  705. if (!step_volt)
  706. return -EINVAL;
  707. /* Populate fuse_corner members */
  708. fuse = drv->fuse_corners;
  709. end = &fuse[desc->num_fuse_corners - 1];
  710. fdata = desc->cpr_fuses.fuse_corner_data;
  711. for (i = 0; fuse <= end; fuse++, fuses++, i++, fdata++) {
  712. /*
  713. * Update SoC voltages: platforms might choose a different
  714. * regulators than the one used to characterize the algorithms
  715. * (ie, init_voltage_step).
  716. */
  717. fdata->min_uV = roundup(fdata->min_uV, step_volt);
  718. fdata->max_uV = roundup(fdata->max_uV, step_volt);
  719. /* Populate uV */
  720. uV = cpr_read_fuse_uV(desc, fdata, fuses->init_voltage,
  721. step_volt, drv);
  722. if (uV < 0)
  723. return uV;
  724. fuse->min_uV = fdata->min_uV;
  725. fuse->max_uV = fdata->max_uV;
  726. fuse->uV = clamp(uV, fuse->min_uV, fuse->max_uV);
  727. if (fuse == end) {
  728. /*
  729. * Allow the highest fuse corner's PVS voltage to
  730. * define the ceiling voltage for that corner in order
  731. * to support SoC's in which variable ceiling values
  732. * are required.
  733. */
  734. end->max_uV = max(end->max_uV, end->uV);
  735. }
  736. /* Populate target quotient by scaling */
  737. ret = nvmem_cell_read_variable_le_u32(drv->dev, fuses->quotient, &fuse->quot);
  738. if (ret)
  739. return ret;
  740. fuse->quot *= fdata->quot_scale;
  741. fuse->quot += fdata->quot_offset;
  742. fuse->quot += fdata->quot_adjust;
  743. fuse->step_quot = desc->step_quot[fuse->ring_osc_idx];
  744. /* Populate acc settings */
  745. fuse->accs = accs;
  746. fuse->num_accs = acc_desc->num_regs_per_fuse;
  747. accs += acc_desc->num_regs_per_fuse;
  748. }
  749. /*
  750. * Restrict all fuse corner PVS voltages based upon per corner
  751. * ceiling and floor voltages.
  752. */
  753. for (fuse = drv->fuse_corners, i = 0; fuse <= end; fuse++, i++) {
  754. if (fuse->uV > fuse->max_uV)
  755. fuse->uV = fuse->max_uV;
  756. else if (fuse->uV < fuse->min_uV)
  757. fuse->uV = fuse->min_uV;
  758. ret = regulator_is_supported_voltage(drv->vdd_apc,
  759. fuse->min_uV,
  760. fuse->min_uV);
  761. if (!ret) {
  762. dev_err(drv->dev,
  763. "min uV: %d (fuse corner: %d) not supported by regulator\n",
  764. fuse->min_uV, i);
  765. return -EINVAL;
  766. }
  767. ret = regulator_is_supported_voltage(drv->vdd_apc,
  768. fuse->max_uV,
  769. fuse->max_uV);
  770. if (!ret) {
  771. dev_err(drv->dev,
  772. "max uV: %d (fuse corner: %d) not supported by regulator\n",
  773. fuse->max_uV, i);
  774. return -EINVAL;
  775. }
  776. dev_dbg(drv->dev,
  777. "fuse corner %d: [%d %d %d] RO%hhu quot %d squot %d\n",
  778. i, fuse->min_uV, fuse->uV, fuse->max_uV,
  779. fuse->ring_osc_idx, fuse->quot, fuse->step_quot);
  780. }
  781. return 0;
  782. }
  783. static int cpr_calculate_scaling(const char *quot_offset,
  784. struct cpr_drv *drv,
  785. const struct fuse_corner_data *fdata,
  786. const struct corner *corner)
  787. {
  788. u32 quot_diff = 0;
  789. unsigned long freq_diff;
  790. int scaling;
  791. const struct fuse_corner *fuse, *prev_fuse;
  792. int ret;
  793. fuse = corner->fuse_corner;
  794. prev_fuse = fuse - 1;
  795. if (quot_offset) {
  796. ret = nvmem_cell_read_variable_le_u32(drv->dev, quot_offset, &quot_diff);
  797. if (ret)
  798. return ret;
  799. quot_diff *= fdata->quot_offset_scale;
  800. quot_diff += fdata->quot_offset_adjust;
  801. } else {
  802. quot_diff = fuse->quot - prev_fuse->quot;
  803. }
  804. freq_diff = fuse->max_freq - prev_fuse->max_freq;
  805. freq_diff /= 1000000; /* Convert to MHz */
  806. scaling = 1000 * quot_diff / freq_diff;
  807. return min(scaling, fdata->max_quot_scale);
  808. }
  809. static int cpr_interpolate(const struct corner *corner, int step_volt,
  810. const struct fuse_corner_data *fdata)
  811. {
  812. unsigned long f_high, f_low, f_diff;
  813. int uV_high, uV_low, uV;
  814. u64 temp, temp_limit;
  815. const struct fuse_corner *fuse, *prev_fuse;
  816. fuse = corner->fuse_corner;
  817. prev_fuse = fuse - 1;
  818. f_high = fuse->max_freq;
  819. f_low = prev_fuse->max_freq;
  820. uV_high = fuse->uV;
  821. uV_low = prev_fuse->uV;
  822. f_diff = fuse->max_freq - corner->freq;
  823. /*
  824. * Don't interpolate in the wrong direction. This could happen
  825. * if the adjusted fuse voltage overlaps with the previous fuse's
  826. * adjusted voltage.
  827. */
  828. if (f_high <= f_low || uV_high <= uV_low || f_high <= corner->freq)
  829. return corner->uV;
  830. temp = f_diff * (uV_high - uV_low);
  831. temp = div64_ul(temp, f_high - f_low);
  832. /*
  833. * max_volt_scale has units of uV/MHz while freq values
  834. * have units of Hz. Divide by 1000000 to convert to.
  835. */
  836. temp_limit = f_diff * fdata->max_volt_scale;
  837. do_div(temp_limit, 1000000);
  838. uV = uV_high - min(temp, temp_limit);
  839. return roundup(uV, step_volt);
  840. }
  841. static unsigned int cpr_get_fuse_corner(struct dev_pm_opp *opp)
  842. {
  843. struct device_node *np;
  844. unsigned int fuse_corner = 0;
  845. np = dev_pm_opp_get_of_node(opp);
  846. if (of_property_read_u32(np, "qcom,opp-fuse-level", &fuse_corner))
  847. pr_err("%s: missing 'qcom,opp-fuse-level' property\n",
  848. __func__);
  849. of_node_put(np);
  850. return fuse_corner;
  851. }
  852. static unsigned long cpr_get_opp_hz_for_req(struct dev_pm_opp *ref,
  853. struct device *cpu_dev)
  854. {
  855. u64 rate = 0;
  856. struct device_node *ref_np;
  857. struct device_node *desc_np;
  858. struct device_node *child_np = NULL;
  859. struct device_node *child_req_np = NULL;
  860. desc_np = dev_pm_opp_of_get_opp_desc_node(cpu_dev);
  861. if (!desc_np)
  862. return 0;
  863. ref_np = dev_pm_opp_get_of_node(ref);
  864. if (!ref_np)
  865. goto out_ref;
  866. do {
  867. of_node_put(child_req_np);
  868. child_np = of_get_next_available_child(desc_np, child_np);
  869. child_req_np = of_parse_phandle(child_np, "required-opps", 0);
  870. } while (child_np && child_req_np != ref_np);
  871. if (child_np && child_req_np == ref_np)
  872. of_property_read_u64(child_np, "opp-hz", &rate);
  873. of_node_put(child_req_np);
  874. of_node_put(child_np);
  875. of_node_put(ref_np);
  876. out_ref:
  877. of_node_put(desc_np);
  878. return (unsigned long) rate;
  879. }
  880. static int cpr_corner_init(struct cpr_drv *drv)
  881. {
  882. const struct cpr_desc *desc = drv->desc;
  883. const struct cpr_fuse *fuses = drv->cpr_fuses;
  884. int i, level, scaling = 0;
  885. unsigned int fnum, fc;
  886. const char *quot_offset;
  887. struct fuse_corner *fuse, *prev_fuse;
  888. struct corner *corner, *end;
  889. struct corner_data *cdata;
  890. const struct fuse_corner_data *fdata;
  891. bool apply_scaling;
  892. unsigned long freq_diff, freq_diff_mhz;
  893. unsigned long freq;
  894. int step_volt = regulator_get_linear_step(drv->vdd_apc);
  895. struct dev_pm_opp *opp;
  896. if (!step_volt)
  897. return -EINVAL;
  898. corner = drv->corners;
  899. end = &corner[drv->num_corners - 1];
  900. cdata = devm_kcalloc(drv->dev, drv->num_corners,
  901. sizeof(struct corner_data),
  902. GFP_KERNEL);
  903. if (!cdata)
  904. return -ENOMEM;
  905. /*
  906. * Store maximum frequency for each fuse corner based on the frequency
  907. * plan
  908. */
  909. for (level = 1; level <= drv->num_corners; level++) {
  910. opp = dev_pm_opp_find_level_exact(&drv->pd.dev, level);
  911. if (IS_ERR(opp))
  912. return -EINVAL;
  913. fc = cpr_get_fuse_corner(opp);
  914. if (!fc) {
  915. dev_pm_opp_put(opp);
  916. return -EINVAL;
  917. }
  918. fnum = fc - 1;
  919. freq = cpr_get_opp_hz_for_req(opp, drv->attached_cpu_dev);
  920. if (!freq) {
  921. dev_pm_opp_put(opp);
  922. return -EINVAL;
  923. }
  924. cdata[level - 1].fuse_corner = fnum;
  925. cdata[level - 1].freq = freq;
  926. fuse = &drv->fuse_corners[fnum];
  927. dev_dbg(drv->dev, "freq: %lu level: %u fuse level: %u\n",
  928. freq, dev_pm_opp_get_level(opp) - 1, fnum);
  929. if (freq > fuse->max_freq)
  930. fuse->max_freq = freq;
  931. dev_pm_opp_put(opp);
  932. }
  933. /*
  934. * Get the quotient adjustment scaling factor, according to:
  935. *
  936. * scaling = min(1000 * (QUOT(corner_N) - QUOT(corner_N-1))
  937. * / (freq(corner_N) - freq(corner_N-1)), max_factor)
  938. *
  939. * QUOT(corner_N): quotient read from fuse for fuse corner N
  940. * QUOT(corner_N-1): quotient read from fuse for fuse corner (N - 1)
  941. * freq(corner_N): max frequency in MHz supported by fuse corner N
  942. * freq(corner_N-1): max frequency in MHz supported by fuse corner
  943. * (N - 1)
  944. *
  945. * Then walk through the corners mapped to each fuse corner
  946. * and calculate the quotient adjustment for each one using the
  947. * following formula:
  948. *
  949. * quot_adjust = (freq_max - freq_corner) * scaling / 1000
  950. *
  951. * freq_max: max frequency in MHz supported by the fuse corner
  952. * freq_corner: frequency in MHz corresponding to the corner
  953. * scaling: calculated from above equation
  954. *
  955. *
  956. * + +
  957. * | v |
  958. * q | f c o | f c
  959. * u | c l | c
  960. * o | f t | f
  961. * t | c a | c
  962. * | c f g | c f
  963. * | e |
  964. * +--------------- +----------------
  965. * 0 1 2 3 4 5 6 0 1 2 3 4 5 6
  966. * corner corner
  967. *
  968. * c = corner
  969. * f = fuse corner
  970. *
  971. */
  972. for (apply_scaling = false, i = 0; corner <= end; corner++, i++) {
  973. fnum = cdata[i].fuse_corner;
  974. fdata = &desc->cpr_fuses.fuse_corner_data[fnum];
  975. quot_offset = fuses[fnum].quotient_offset;
  976. fuse = &drv->fuse_corners[fnum];
  977. if (fnum)
  978. prev_fuse = &drv->fuse_corners[fnum - 1];
  979. else
  980. prev_fuse = NULL;
  981. corner->fuse_corner = fuse;
  982. corner->freq = cdata[i].freq;
  983. corner->uV = fuse->uV;
  984. if (prev_fuse && cdata[i - 1].freq == prev_fuse->max_freq) {
  985. scaling = cpr_calculate_scaling(quot_offset, drv,
  986. fdata, corner);
  987. if (scaling < 0)
  988. return scaling;
  989. apply_scaling = true;
  990. } else if (corner->freq == fuse->max_freq) {
  991. /* This is a fuse corner; don't scale anything */
  992. apply_scaling = false;
  993. }
  994. if (apply_scaling) {
  995. freq_diff = fuse->max_freq - corner->freq;
  996. freq_diff_mhz = freq_diff / 1000000;
  997. corner->quot_adjust = scaling * freq_diff_mhz / 1000;
  998. corner->uV = cpr_interpolate(corner, step_volt, fdata);
  999. }
  1000. corner->max_uV = fuse->max_uV;
  1001. corner->min_uV = fuse->min_uV;
  1002. corner->uV = clamp(corner->uV, corner->min_uV, corner->max_uV);
  1003. corner->last_uV = corner->uV;
  1004. /* Reduce the ceiling voltage if needed */
  1005. if (desc->reduce_to_corner_uV && corner->uV < corner->max_uV)
  1006. corner->max_uV = corner->uV;
  1007. else if (desc->reduce_to_fuse_uV && fuse->uV < corner->max_uV)
  1008. corner->max_uV = max(corner->min_uV, fuse->uV);
  1009. dev_dbg(drv->dev, "corner %d: [%d %d %d] quot %d\n", i,
  1010. corner->min_uV, corner->uV, corner->max_uV,
  1011. fuse->quot - corner->quot_adjust);
  1012. }
  1013. return 0;
  1014. }
  1015. static const struct cpr_fuse *cpr_get_fuses(struct cpr_drv *drv)
  1016. {
  1017. const struct cpr_desc *desc = drv->desc;
  1018. struct cpr_fuse *fuses;
  1019. int i;
  1020. fuses = devm_kcalloc(drv->dev, desc->num_fuse_corners,
  1021. sizeof(struct cpr_fuse),
  1022. GFP_KERNEL);
  1023. if (!fuses)
  1024. return ERR_PTR(-ENOMEM);
  1025. for (i = 0; i < desc->num_fuse_corners; i++) {
  1026. char tbuf[32];
  1027. snprintf(tbuf, 32, "cpr_ring_osc%d", i + 1);
  1028. fuses[i].ring_osc = devm_kstrdup(drv->dev, tbuf, GFP_KERNEL);
  1029. if (!fuses[i].ring_osc)
  1030. return ERR_PTR(-ENOMEM);
  1031. snprintf(tbuf, 32, "cpr_init_voltage%d", i + 1);
  1032. fuses[i].init_voltage = devm_kstrdup(drv->dev, tbuf,
  1033. GFP_KERNEL);
  1034. if (!fuses[i].init_voltage)
  1035. return ERR_PTR(-ENOMEM);
  1036. snprintf(tbuf, 32, "cpr_quotient%d", i + 1);
  1037. fuses[i].quotient = devm_kstrdup(drv->dev, tbuf, GFP_KERNEL);
  1038. if (!fuses[i].quotient)
  1039. return ERR_PTR(-ENOMEM);
  1040. snprintf(tbuf, 32, "cpr_quotient_offset%d", i + 1);
  1041. fuses[i].quotient_offset = devm_kstrdup(drv->dev, tbuf,
  1042. GFP_KERNEL);
  1043. if (!fuses[i].quotient_offset)
  1044. return ERR_PTR(-ENOMEM);
  1045. }
  1046. return fuses;
  1047. }
  1048. static void cpr_set_loop_allowed(struct cpr_drv *drv)
  1049. {
  1050. drv->loop_disabled = false;
  1051. }
  1052. static int cpr_init_parameters(struct cpr_drv *drv)
  1053. {
  1054. const struct cpr_desc *desc = drv->desc;
  1055. struct clk *clk;
  1056. clk = clk_get(drv->dev, "ref");
  1057. if (IS_ERR(clk))
  1058. return PTR_ERR(clk);
  1059. drv->ref_clk_khz = clk_get_rate(clk) / 1000;
  1060. clk_put(clk);
  1061. if (desc->timer_cons_up > RBIF_TIMER_ADJ_CONS_UP_MASK ||
  1062. desc->timer_cons_down > RBIF_TIMER_ADJ_CONS_DOWN_MASK ||
  1063. desc->up_threshold > RBCPR_CTL_UP_THRESHOLD_MASK ||
  1064. desc->down_threshold > RBCPR_CTL_DN_THRESHOLD_MASK ||
  1065. desc->idle_clocks > RBCPR_STEP_QUOT_IDLE_CLK_MASK ||
  1066. desc->clamp_timer_interval > RBIF_TIMER_ADJ_CLAMP_INT_MASK)
  1067. return -EINVAL;
  1068. dev_dbg(drv->dev, "up threshold = %u, down threshold = %u\n",
  1069. desc->up_threshold, desc->down_threshold);
  1070. return 0;
  1071. }
  1072. static int cpr_find_initial_corner(struct cpr_drv *drv)
  1073. {
  1074. unsigned long rate;
  1075. const struct corner *end;
  1076. struct corner *iter;
  1077. unsigned int i = 0;
  1078. if (!drv->cpu_clk) {
  1079. dev_err(drv->dev, "cannot get rate from NULL clk\n");
  1080. return -EINVAL;
  1081. }
  1082. end = &drv->corners[drv->num_corners - 1];
  1083. rate = clk_get_rate(drv->cpu_clk);
  1084. /*
  1085. * Some bootloaders set a CPU clock frequency that is not defined
  1086. * in the OPP table. When running at an unlisted frequency,
  1087. * cpufreq_online() will change to the OPP which has the lowest
  1088. * frequency, at or above the unlisted frequency.
  1089. * Since cpufreq_online() always "rounds up" in the case of an
  1090. * unlisted frequency, this function always "rounds down" in case
  1091. * of an unlisted frequency. That way, when cpufreq_online()
  1092. * triggers the first ever call to cpr_set_performance_state(),
  1093. * it will correctly determine the direction as UP.
  1094. */
  1095. for (iter = drv->corners; iter <= end; iter++) {
  1096. if (iter->freq > rate)
  1097. break;
  1098. i++;
  1099. if (iter->freq == rate) {
  1100. drv->corner = iter;
  1101. break;
  1102. }
  1103. if (iter->freq < rate)
  1104. drv->corner = iter;
  1105. }
  1106. if (!drv->corner) {
  1107. dev_err(drv->dev, "boot up corner not found\n");
  1108. return -EINVAL;
  1109. }
  1110. dev_dbg(drv->dev, "boot up perf state: %u\n", i);
  1111. return 0;
  1112. }
  1113. static const struct cpr_desc qcs404_cpr_desc = {
  1114. .num_fuse_corners = 3,
  1115. .min_diff_quot = CPR_FUSE_MIN_QUOT_DIFF,
  1116. .step_quot = (int []){ 25, 25, 25, },
  1117. .timer_delay_us = 5000,
  1118. .timer_cons_up = 0,
  1119. .timer_cons_down = 2,
  1120. .up_threshold = 1,
  1121. .down_threshold = 3,
  1122. .idle_clocks = 15,
  1123. .gcnt_us = 1,
  1124. .vdd_apc_step_up_limit = 1,
  1125. .vdd_apc_step_down_limit = 1,
  1126. .cpr_fuses = {
  1127. .init_voltage_step = 8000,
  1128. .init_voltage_width = 6,
  1129. .fuse_corner_data = (struct fuse_corner_data[]){
  1130. /* fuse corner 0 */
  1131. {
  1132. .ref_uV = 1224000,
  1133. .max_uV = 1224000,
  1134. .min_uV = 1048000,
  1135. .max_volt_scale = 0,
  1136. .max_quot_scale = 0,
  1137. .quot_offset = 0,
  1138. .quot_scale = 1,
  1139. .quot_adjust = 0,
  1140. .quot_offset_scale = 5,
  1141. .quot_offset_adjust = 0,
  1142. },
  1143. /* fuse corner 1 */
  1144. {
  1145. .ref_uV = 1288000,
  1146. .max_uV = 1288000,
  1147. .min_uV = 1048000,
  1148. .max_volt_scale = 2000,
  1149. .max_quot_scale = 1400,
  1150. .quot_offset = 0,
  1151. .quot_scale = 1,
  1152. .quot_adjust = -20,
  1153. .quot_offset_scale = 5,
  1154. .quot_offset_adjust = 0,
  1155. },
  1156. /* fuse corner 2 */
  1157. {
  1158. .ref_uV = 1352000,
  1159. .max_uV = 1384000,
  1160. .min_uV = 1088000,
  1161. .max_volt_scale = 2000,
  1162. .max_quot_scale = 1400,
  1163. .quot_offset = 0,
  1164. .quot_scale = 1,
  1165. .quot_adjust = 0,
  1166. .quot_offset_scale = 5,
  1167. .quot_offset_adjust = 0,
  1168. },
  1169. },
  1170. },
  1171. };
  1172. static const struct acc_desc qcs404_acc_desc = {
  1173. .settings = (struct reg_sequence[]){
  1174. { 0xb120, 0x1041040 },
  1175. { 0xb124, 0x41 },
  1176. { 0xb120, 0x0 },
  1177. { 0xb124, 0x0 },
  1178. { 0xb120, 0x0 },
  1179. { 0xb124, 0x0 },
  1180. },
  1181. .config = (struct reg_sequence[]){
  1182. { 0xb138, 0xff },
  1183. { 0xb130, 0x5555 },
  1184. },
  1185. .num_regs_per_fuse = 2,
  1186. };
  1187. static const struct cpr_acc_desc qcs404_cpr_acc_desc = {
  1188. .cpr_desc = &qcs404_cpr_desc,
  1189. .acc_desc = &qcs404_acc_desc,
  1190. };
  1191. static unsigned int cpr_get_performance_state(struct generic_pm_domain *genpd,
  1192. struct dev_pm_opp *opp)
  1193. {
  1194. return dev_pm_opp_get_level(opp);
  1195. }
  1196. static int cpr_power_off(struct generic_pm_domain *domain)
  1197. {
  1198. struct cpr_drv *drv = container_of(domain, struct cpr_drv, pd);
  1199. return cpr_disable(drv);
  1200. }
  1201. static int cpr_power_on(struct generic_pm_domain *domain)
  1202. {
  1203. struct cpr_drv *drv = container_of(domain, struct cpr_drv, pd);
  1204. return cpr_enable(drv);
  1205. }
  1206. static int cpr_pd_attach_dev(struct generic_pm_domain *domain,
  1207. struct device *dev)
  1208. {
  1209. struct cpr_drv *drv = container_of(domain, struct cpr_drv, pd);
  1210. const struct acc_desc *acc_desc = drv->acc_desc;
  1211. int ret = 0;
  1212. mutex_lock(&drv->lock);
  1213. dev_dbg(drv->dev, "attach callback for: %s\n", dev_name(dev));
  1214. /*
  1215. * This driver only supports scaling voltage for a CPU cluster
  1216. * where all CPUs in the cluster share a single regulator.
  1217. * Therefore, save the struct device pointer only for the first
  1218. * CPU device that gets attached. There is no need to do any
  1219. * additional initialization when further CPUs get attached.
  1220. */
  1221. if (drv->attached_cpu_dev)
  1222. goto unlock;
  1223. /*
  1224. * cpr_scale_voltage() requires the direction (if we are changing
  1225. * to a higher or lower OPP). The first time
  1226. * cpr_set_performance_state() is called, there is no previous
  1227. * performance state defined. Therefore, we call
  1228. * cpr_find_initial_corner() that gets the CPU clock frequency
  1229. * set by the bootloader, so that we can determine the direction
  1230. * the first time cpr_set_performance_state() is called.
  1231. */
  1232. drv->cpu_clk = devm_clk_get(dev, NULL);
  1233. if (IS_ERR(drv->cpu_clk)) {
  1234. ret = PTR_ERR(drv->cpu_clk);
  1235. if (ret != -EPROBE_DEFER)
  1236. dev_err(drv->dev, "could not get cpu clk: %d\n", ret);
  1237. goto unlock;
  1238. }
  1239. drv->attached_cpu_dev = dev;
  1240. dev_dbg(drv->dev, "using cpu clk from: %s\n",
  1241. dev_name(drv->attached_cpu_dev));
  1242. /*
  1243. * Everything related to (virtual) corners has to be initialized
  1244. * here, when attaching to the power domain, since we need to know
  1245. * the maximum frequency for each fuse corner, and this is only
  1246. * available after the cpufreq driver has attached to us.
  1247. * The reason for this is that we need to know the highest
  1248. * frequency associated with each fuse corner.
  1249. */
  1250. ret = dev_pm_opp_get_opp_count(&drv->pd.dev);
  1251. if (ret < 0) {
  1252. dev_err(drv->dev, "could not get OPP count\n");
  1253. goto unlock;
  1254. }
  1255. drv->num_corners = ret;
  1256. if (drv->num_corners < 2) {
  1257. dev_err(drv->dev, "need at least 2 OPPs to use CPR\n");
  1258. ret = -EINVAL;
  1259. goto unlock;
  1260. }
  1261. drv->corners = devm_kcalloc(drv->dev, drv->num_corners,
  1262. sizeof(*drv->corners),
  1263. GFP_KERNEL);
  1264. if (!drv->corners) {
  1265. ret = -ENOMEM;
  1266. goto unlock;
  1267. }
  1268. ret = cpr_corner_init(drv);
  1269. if (ret)
  1270. goto unlock;
  1271. cpr_set_loop_allowed(drv);
  1272. ret = cpr_init_parameters(drv);
  1273. if (ret)
  1274. goto unlock;
  1275. /* Configure CPR HW but keep it disabled */
  1276. ret = cpr_config(drv);
  1277. if (ret)
  1278. goto unlock;
  1279. ret = cpr_find_initial_corner(drv);
  1280. if (ret)
  1281. goto unlock;
  1282. if (acc_desc->config)
  1283. regmap_multi_reg_write(drv->tcsr, acc_desc->config,
  1284. acc_desc->num_regs_per_fuse);
  1285. /* Enable ACC if required */
  1286. if (acc_desc->enable_mask)
  1287. regmap_update_bits(drv->tcsr, acc_desc->enable_reg,
  1288. acc_desc->enable_mask,
  1289. acc_desc->enable_mask);
  1290. dev_info(drv->dev, "driver initialized with %u OPPs\n",
  1291. drv->num_corners);
  1292. unlock:
  1293. mutex_unlock(&drv->lock);
  1294. return ret;
  1295. }
  1296. static int cpr_debug_info_show(struct seq_file *s, void *unused)
  1297. {
  1298. u32 gcnt, ro_sel, ctl, irq_status, reg, error_steps;
  1299. u32 step_dn, step_up, error, error_lt0, busy;
  1300. struct cpr_drv *drv = s->private;
  1301. struct fuse_corner *fuse_corner;
  1302. struct corner *corner;
  1303. corner = drv->corner;
  1304. fuse_corner = corner->fuse_corner;
  1305. seq_printf(s, "corner, current_volt = %d uV\n",
  1306. corner->last_uV);
  1307. ro_sel = fuse_corner->ring_osc_idx;
  1308. gcnt = cpr_read(drv, REG_RBCPR_GCNT_TARGET(ro_sel));
  1309. seq_printf(s, "rbcpr_gcnt_target (%u) = %#02X\n", ro_sel, gcnt);
  1310. ctl = cpr_read(drv, REG_RBCPR_CTL);
  1311. seq_printf(s, "rbcpr_ctl = %#02X\n", ctl);
  1312. irq_status = cpr_read(drv, REG_RBIF_IRQ_STATUS);
  1313. seq_printf(s, "rbcpr_irq_status = %#02X\n", irq_status);
  1314. reg = cpr_read(drv, REG_RBCPR_RESULT_0);
  1315. seq_printf(s, "rbcpr_result_0 = %#02X\n", reg);
  1316. step_dn = reg & 0x01;
  1317. step_up = (reg >> RBCPR_RESULT0_STEP_UP_SHIFT) & 0x01;
  1318. seq_printf(s, " [step_dn = %u", step_dn);
  1319. seq_printf(s, ", step_up = %u", step_up);
  1320. error_steps = (reg >> RBCPR_RESULT0_ERROR_STEPS_SHIFT)
  1321. & RBCPR_RESULT0_ERROR_STEPS_MASK;
  1322. seq_printf(s, ", error_steps = %u", error_steps);
  1323. error = (reg >> RBCPR_RESULT0_ERROR_SHIFT) & RBCPR_RESULT0_ERROR_MASK;
  1324. seq_printf(s, ", error = %u", error);
  1325. error_lt0 = (reg >> RBCPR_RESULT0_ERROR_LT0_SHIFT) & 0x01;
  1326. seq_printf(s, ", error_lt_0 = %u", error_lt0);
  1327. busy = (reg >> RBCPR_RESULT0_BUSY_SHIFT) & 0x01;
  1328. seq_printf(s, ", busy = %u]\n", busy);
  1329. return 0;
  1330. }
  1331. DEFINE_SHOW_ATTRIBUTE(cpr_debug_info);
  1332. static void cpr_debugfs_init(struct cpr_drv *drv)
  1333. {
  1334. drv->debugfs = debugfs_create_dir("qcom_cpr", NULL);
  1335. debugfs_create_file("debug_info", 0444, drv->debugfs,
  1336. drv, &cpr_debug_info_fops);
  1337. }
  1338. static int cpr_probe(struct platform_device *pdev)
  1339. {
  1340. struct device *dev = &pdev->dev;
  1341. struct cpr_drv *drv;
  1342. int irq, ret;
  1343. const struct cpr_acc_desc *data;
  1344. struct device_node *np;
  1345. u32 cpr_rev = FUSE_REVISION_UNKNOWN;
  1346. data = of_device_get_match_data(dev);
  1347. if (!data || !data->cpr_desc || !data->acc_desc)
  1348. return -EINVAL;
  1349. drv = devm_kzalloc(dev, sizeof(*drv), GFP_KERNEL);
  1350. if (!drv)
  1351. return -ENOMEM;
  1352. drv->dev = dev;
  1353. drv->desc = data->cpr_desc;
  1354. drv->acc_desc = data->acc_desc;
  1355. drv->fuse_corners = devm_kcalloc(dev, drv->desc->num_fuse_corners,
  1356. sizeof(*drv->fuse_corners),
  1357. GFP_KERNEL);
  1358. if (!drv->fuse_corners)
  1359. return -ENOMEM;
  1360. np = of_parse_phandle(dev->of_node, "acc-syscon", 0);
  1361. if (!np)
  1362. return -ENODEV;
  1363. drv->tcsr = syscon_node_to_regmap(np);
  1364. of_node_put(np);
  1365. if (IS_ERR(drv->tcsr))
  1366. return PTR_ERR(drv->tcsr);
  1367. drv->base = devm_platform_ioremap_resource(pdev, 0);
  1368. if (IS_ERR(drv->base))
  1369. return PTR_ERR(drv->base);
  1370. irq = platform_get_irq(pdev, 0);
  1371. if (irq < 0)
  1372. return -EINVAL;
  1373. drv->vdd_apc = devm_regulator_get(dev, "vdd-apc");
  1374. if (IS_ERR(drv->vdd_apc))
  1375. return PTR_ERR(drv->vdd_apc);
  1376. /*
  1377. * Initialize fuse corners, since it simply depends
  1378. * on data in efuses.
  1379. * Everything related to (virtual) corners has to be
  1380. * initialized after attaching to the power domain,
  1381. * since it depends on the CPU's OPP table.
  1382. */
  1383. ret = nvmem_cell_read_variable_le_u32(dev, "cpr_fuse_revision", &cpr_rev);
  1384. if (ret)
  1385. return ret;
  1386. drv->cpr_fuses = cpr_get_fuses(drv);
  1387. if (IS_ERR(drv->cpr_fuses))
  1388. return PTR_ERR(drv->cpr_fuses);
  1389. ret = cpr_populate_ring_osc_idx(drv);
  1390. if (ret)
  1391. return ret;
  1392. ret = cpr_fuse_corner_init(drv);
  1393. if (ret)
  1394. return ret;
  1395. mutex_init(&drv->lock);
  1396. ret = devm_request_threaded_irq(dev, irq, NULL,
  1397. cpr_irq_handler,
  1398. IRQF_ONESHOT | IRQF_TRIGGER_RISING,
  1399. "cpr", drv);
  1400. if (ret)
  1401. return ret;
  1402. drv->pd.name = devm_kstrdup_const(dev, dev->of_node->full_name,
  1403. GFP_KERNEL);
  1404. if (!drv->pd.name)
  1405. return -EINVAL;
  1406. drv->pd.power_off = cpr_power_off;
  1407. drv->pd.power_on = cpr_power_on;
  1408. drv->pd.set_performance_state = cpr_set_performance_state;
  1409. drv->pd.opp_to_performance_state = cpr_get_performance_state;
  1410. drv->pd.attach_dev = cpr_pd_attach_dev;
  1411. ret = pm_genpd_init(&drv->pd, NULL, true);
  1412. if (ret)
  1413. return ret;
  1414. ret = of_genpd_add_provider_simple(dev->of_node, &drv->pd);
  1415. if (ret)
  1416. goto err_remove_genpd;
  1417. platform_set_drvdata(pdev, drv);
  1418. cpr_debugfs_init(drv);
  1419. return 0;
  1420. err_remove_genpd:
  1421. pm_genpd_remove(&drv->pd);
  1422. return ret;
  1423. }
  1424. static int cpr_remove(struct platform_device *pdev)
  1425. {
  1426. struct cpr_drv *drv = platform_get_drvdata(pdev);
  1427. if (cpr_is_allowed(drv)) {
  1428. cpr_ctl_disable(drv);
  1429. cpr_irq_set(drv, 0);
  1430. }
  1431. of_genpd_del_provider(pdev->dev.of_node);
  1432. pm_genpd_remove(&drv->pd);
  1433. debugfs_remove_recursive(drv->debugfs);
  1434. return 0;
  1435. }
  1436. static const struct of_device_id cpr_match_table[] = {
  1437. { .compatible = "qcom,qcs404-cpr", .data = &qcs404_cpr_acc_desc },
  1438. { }
  1439. };
  1440. MODULE_DEVICE_TABLE(of, cpr_match_table);
  1441. static struct platform_driver cpr_driver = {
  1442. .probe = cpr_probe,
  1443. .remove = cpr_remove,
  1444. .driver = {
  1445. .name = "qcom-cpr",
  1446. .of_match_table = cpr_match_table,
  1447. },
  1448. };
  1449. module_platform_driver(cpr_driver);
  1450. MODULE_DESCRIPTION("Core Power Reduction (CPR) driver");
  1451. MODULE_LICENSE("GPL v2");