bwmon.c 49 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2013-2021, The Linux Foundation. All rights reserved.
  4. * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
  5. */
  6. #define pr_fmt(fmt) "qcom-bwmon: " fmt
  7. #include <linux/kernel.h>
  8. #include <linux/module.h>
  9. #include <linux/init.h>
  10. #include <linux/io.h>
  11. #include <linux/delay.h>
  12. #include <linux/bitops.h>
  13. #include <linux/err.h>
  14. #include <linux/errno.h>
  15. #include <linux/interrupt.h>
  16. #include <linux/platform_device.h>
  17. #include <linux/of.h>
  18. #include <linux/of_device.h>
  19. #include <linux/spinlock.h>
  20. #include <linux/log2.h>
  21. #include <linux/sizes.h>
  22. #include <soc/qcom/dcvs.h>
  23. #include <trace/hooks/sched.h>
  24. #include "bwmon.h"
  25. #include "trace-dcvs.h"
  26. static LIST_HEAD(hwmon_list);
  27. static DEFINE_SPINLOCK(list_lock);
  28. static DEFINE_SPINLOCK(sample_irq_lock);
  29. static DEFINE_SPINLOCK(mon_irq_lock);
  30. static DEFINE_MUTEX(bwmon_lock);
  31. static struct workqueue_struct *bwmon_wq;
  32. static u32 get_dst_from_map(struct bw_hwmon *hw, u32 src_vote);
  33. struct qcom_bwmon_attr {
  34. struct attribute attr;
  35. ssize_t (*show)(struct kobject *kobj, struct attribute *attr,
  36. char *buf);
  37. ssize_t (*store)(struct kobject *kobj, struct attribute *attr,
  38. const char *buf, size_t count);
  39. };
  40. #define to_bwmon_attr(_attr) \
  41. container_of(_attr, struct qcom_bwmon_attr, attr)
  42. #define to_hwmon_node(k) container_of(k, struct hwmon_node, kobj)
  43. #define to_bwmon(ptr) container_of(ptr, struct bwmon, hw)
  44. #define BWMON_ATTR_RW(_name) \
  45. struct qcom_bwmon_attr _name = \
  46. __ATTR(_name, 0644, show_##_name, store_##_name) \
  47. #define BWMON_ATTR_RO(_name) \
  48. struct qcom_bwmon_attr _name = \
  49. __ATTR(_name, 0444, show_##_name, NULL) \
  50. #define show_attr(name) \
  51. static ssize_t show_##name(struct kobject *kobj, \
  52. struct attribute *attr, char *buf) \
  53. { \
  54. struct hwmon_node *node = to_hwmon_node(kobj); \
  55. return scnprintf(buf, PAGE_SIZE, "%u\n", node->name); \
  56. } \
  57. #define store_attr(name, _min, _max) \
  58. static ssize_t store_##name(struct kobject *kobj, \
  59. struct attribute *attr, const char *buf, \
  60. size_t count) \
  61. { \
  62. int ret; \
  63. unsigned int val; \
  64. struct hwmon_node *node = to_hwmon_node(kobj); \
  65. ret = kstrtouint(buf, 10, &val); \
  66. if (ret < 0) \
  67. return ret; \
  68. val = max(val, _min); \
  69. val = min(val, _max); \
  70. node->name = val; \
  71. return count; \
  72. } \
  73. #define show_list_attr(name, n) \
  74. static ssize_t show_##name(struct kobject *kobj, \
  75. struct attribute *attr, char *buf) \
  76. { \
  77. struct hwmon_node *node = to_hwmon_node(kobj); \
  78. unsigned int i, cnt = 0; \
  79. \
  80. for (i = 0; i < n && node->name[i]; i++) \
  81. cnt += scnprintf(buf + cnt, PAGE_SIZE - cnt, "%u ", \
  82. node->name[i]); \
  83. cnt += scnprintf(buf + cnt, PAGE_SIZE - cnt, "\n"); \
  84. return cnt; \
  85. } \
  86. #define store_list_attr(name, n, _min, _max) \
  87. static ssize_t store_##name(struct kobject *kobj, \
  88. struct attribute *attr, const char *buf, \
  89. size_t count) \
  90. { \
  91. struct hwmon_node *node = to_hwmon_node(kobj); \
  92. int ret, numvals; \
  93. unsigned int i = 0, val; \
  94. char **strlist; \
  95. \
  96. strlist = argv_split(GFP_KERNEL, buf, &numvals); \
  97. if (!strlist) \
  98. return -ENOMEM; \
  99. numvals = min(numvals, n - 1); \
  100. for (i = 0; i < numvals; i++) { \
  101. ret = kstrtouint(strlist[i], 10, &val); \
  102. if (ret < 0) \
  103. goto out; \
  104. val = max(val, _min); \
  105. val = min(val, _max); \
  106. node->name[i] = val; \
  107. } \
  108. ret = count; \
  109. out: \
  110. argv_free(strlist); \
  111. node->name[i] = 0; \
  112. return ret; \
  113. } \
  114. static ssize_t store_min_freq(struct kobject *kobj,
  115. struct attribute *attr, const char *buf,
  116. size_t count)
  117. {
  118. int ret;
  119. unsigned int freq;
  120. struct hwmon_node *node = to_hwmon_node(kobj);
  121. ret = kstrtouint(buf, 10, &freq);
  122. if (ret < 0)
  123. return ret;
  124. freq = max(freq, node->hw_min_freq);
  125. freq = min(freq, node->max_freq);
  126. node->min_freq = freq;
  127. return count;
  128. }
  129. static ssize_t store_max_freq(struct kobject *kobj,
  130. struct attribute *attr, const char *buf,
  131. size_t count)
  132. {
  133. int ret;
  134. unsigned int freq;
  135. struct hwmon_node *node = to_hwmon_node(kobj);
  136. ret = kstrtouint(buf, 10, &freq);
  137. if (ret < 0)
  138. return ret;
  139. freq = max(freq, node->min_freq);
  140. freq = min(freq, node->hw_max_freq);
  141. node->max_freq = freq;
  142. return count;
  143. }
  144. static ssize_t store_throttle_adj(struct kobject *kobj,
  145. struct attribute *attr, const char *buf,
  146. size_t count)
  147. {
  148. struct hwmon_node *node = to_hwmon_node(kobj);
  149. int ret;
  150. unsigned int val;
  151. if (!node->hw->set_throttle_adj)
  152. return -EPERM;
  153. ret = kstrtouint(buf, 10, &val);
  154. if (ret < 0)
  155. return ret;
  156. ret = node->hw->set_throttle_adj(node->hw, val);
  157. if (!ret)
  158. return count;
  159. else
  160. return ret;
  161. }
  162. static ssize_t show_throttle_adj(struct kobject *kobj,
  163. struct attribute *attr, char *buf)
  164. {
  165. struct hwmon_node *node = to_hwmon_node(kobj);
  166. unsigned int val;
  167. if (!node->hw->get_throttle_adj)
  168. val = 0;
  169. else
  170. val = node->hw->get_throttle_adj(node->hw);
  171. return scnprintf(buf, PAGE_SIZE, "%u\n", val);
  172. }
  173. #define SAMPLE_MIN_MS 1U
  174. #define SAMPLE_MAX_MS 50U
  175. static ssize_t store_sample_ms(struct kobject *kobj,
  176. struct attribute *attr, const char *buf,
  177. size_t count)
  178. {
  179. struct hwmon_node *node = to_hwmon_node(kobj);
  180. int ret;
  181. unsigned int val;
  182. ret = kstrtoint(buf, 10, &val);
  183. if (ret)
  184. return ret;
  185. val = max(val, SAMPLE_MIN_MS);
  186. val = min(val, SAMPLE_MAX_MS);
  187. if (val > node->window_ms)
  188. return -EINVAL;
  189. node->sample_ms = val;
  190. return count;
  191. }
  192. static ssize_t show_cur_freq(struct kobject *kobj,
  193. struct attribute *attr, char *buf)
  194. {
  195. struct hwmon_node *node = to_hwmon_node(kobj);
  196. return scnprintf(buf, PAGE_SIZE, "%u\n", node->cur_freqs[0].ib);
  197. }
  198. static ssize_t store_second_vote_limit(struct kobject *kobj,
  199. struct attribute *attr, const char *buf,
  200. size_t count)
  201. {
  202. struct hwmon_node *node = to_hwmon_node(kobj);
  203. struct bw_hwmon *hw = node->hw;
  204. int ret;
  205. unsigned int val;
  206. if (!hw->second_vote_supported)
  207. return -ENODEV;
  208. ret = kstrtouint(buf, 10, &val);
  209. if (ret < 0)
  210. return ret;
  211. if (val == hw->second_vote_limit)
  212. return count;
  213. mutex_lock(&node->update_lock);
  214. if (val >= node->cur_freqs[1].ib)
  215. goto unlock_out;
  216. node->cur_freqs[1].ib = val;
  217. ret = qcom_dcvs_update_votes(dev_name(hw->dev), node->cur_freqs, 0x3,
  218. hw->dcvs_path);
  219. if (ret < 0)
  220. dev_err(hw->dev, "second vote update failed: %d\n", ret);
  221. unlock_out:
  222. hw->second_vote_limit = val;
  223. mutex_unlock(&node->update_lock);
  224. return count;
  225. }
  226. static ssize_t show_second_vote_limit(struct kobject *kobj,
  227. struct attribute *attr, char *buf)
  228. {
  229. struct hwmon_node *node = to_hwmon_node(kobj);
  230. struct bw_hwmon *hw = node->hw;
  231. return scnprintf(buf, PAGE_SIZE, "%u\n", hw->second_vote_limit);
  232. }
  233. show_attr(min_freq);
  234. static BWMON_ATTR_RW(min_freq);
  235. show_attr(max_freq);
  236. static BWMON_ATTR_RW(max_freq);
  237. static BWMON_ATTR_RW(throttle_adj);
  238. show_attr(sample_ms);
  239. static BWMON_ATTR_RW(sample_ms);
  240. static BWMON_ATTR_RO(cur_freq);
  241. static BWMON_ATTR_RW(second_vote_limit);
  242. show_attr(window_ms);
  243. store_attr(window_ms, 8U, 1000U);
  244. static BWMON_ATTR_RW(window_ms);
  245. show_attr(guard_band_mbps);
  246. store_attr(guard_band_mbps, 0U, 2000U);
  247. static BWMON_ATTR_RW(guard_band_mbps);
  248. show_attr(decay_rate);
  249. store_attr(decay_rate, 0U, 100U);
  250. static BWMON_ATTR_RW(decay_rate);
  251. show_attr(io_percent);
  252. store_attr(io_percent, 1U, 400U);
  253. static BWMON_ATTR_RW(io_percent);
  254. show_attr(bw_step);
  255. store_attr(bw_step, 50U, 1000U);
  256. static BWMON_ATTR_RW(bw_step);
  257. show_attr(up_scale);
  258. store_attr(up_scale, 0U, 500U);
  259. static BWMON_ATTR_RW(up_scale);
  260. show_attr(up_thres);
  261. store_attr(up_thres, 1U, 100U);
  262. static BWMON_ATTR_RW(up_thres);
  263. show_attr(down_thres);
  264. store_attr(down_thres, 0U, 90U);
  265. static BWMON_ATTR_RW(down_thres);
  266. show_attr(down_count);
  267. store_attr(down_count, 0U, 90U);
  268. static BWMON_ATTR_RW(down_count);
  269. show_attr(hist_memory);
  270. store_attr(hist_memory, 0U, 90U);
  271. static BWMON_ATTR_RW(hist_memory);
  272. show_attr(hyst_trigger_count);
  273. store_attr(hyst_trigger_count, 0U, 90U);
  274. static BWMON_ATTR_RW(hyst_trigger_count);
  275. show_attr(hyst_length);
  276. store_attr(hyst_length, 0U, 90U);
  277. static BWMON_ATTR_RW(hyst_length);
  278. show_attr(idle_length);
  279. store_attr(idle_length, 0U, 90U);
  280. static BWMON_ATTR_RW(idle_length);
  281. show_attr(idle_mbps);
  282. store_attr(idle_mbps, 0U, 2000U);
  283. static BWMON_ATTR_RW(idle_mbps);
  284. show_attr(ab_scale);
  285. store_attr(ab_scale, 0U, 100U);
  286. static BWMON_ATTR_RW(ab_scale);
  287. show_list_attr(mbps_zones, NUM_MBPS_ZONES);
  288. store_list_attr(mbps_zones, NUM_MBPS_ZONES, 0U, UINT_MAX);
  289. static BWMON_ATTR_RW(mbps_zones);
  290. static struct attribute *bwmon_attrs[] = {
  291. &min_freq.attr,
  292. &max_freq.attr,
  293. &cur_freq.attr,
  294. &window_ms.attr,
  295. &guard_band_mbps.attr,
  296. &decay_rate.attr,
  297. &io_percent.attr,
  298. &bw_step.attr,
  299. &sample_ms.attr,
  300. &up_scale.attr,
  301. &up_thres.attr,
  302. &down_thres.attr,
  303. &down_count.attr,
  304. &hist_memory.attr,
  305. &hyst_trigger_count.attr,
  306. &hyst_length.attr,
  307. &idle_length.attr,
  308. &idle_mbps.attr,
  309. &ab_scale.attr,
  310. &mbps_zones.attr,
  311. &throttle_adj.attr,
  312. &second_vote_limit.attr,
  313. NULL,
  314. };
  315. ATTRIBUTE_GROUPS(bwmon);
  316. static ssize_t attr_show(struct kobject *kobj, struct attribute *attr,
  317. char *buf)
  318. {
  319. struct qcom_bwmon_attr *bwmon_attr = to_bwmon_attr(attr);
  320. ssize_t ret = -EIO;
  321. if (bwmon_attr->show)
  322. ret = bwmon_attr->show(kobj, attr, buf);
  323. return ret;
  324. }
  325. static ssize_t attr_store(struct kobject *kobj, struct attribute *attr,
  326. const char *buf, size_t count)
  327. {
  328. struct qcom_bwmon_attr *bwmon_attr = to_bwmon_attr(attr);
  329. ssize_t ret = -EIO;
  330. if (bwmon_attr->store)
  331. ret = bwmon_attr->store(kobj, attr, buf, count);
  332. return ret;
  333. }
  334. static const struct sysfs_ops bwmon_sysfs_ops = {
  335. .show = attr_show,
  336. .store = attr_store,
  337. };
  338. static struct kobj_type bwmon_ktype = {
  339. .sysfs_ops = &bwmon_sysfs_ops,
  340. .default_groups = bwmon_groups,
  341. };
  342. /* Returns MBps of read/writes for the sampling window. */
  343. static unsigned long bytes_to_mbps(unsigned long long bytes, unsigned int us)
  344. {
  345. bytes *= USEC_PER_SEC;
  346. do_div(bytes, us);
  347. bytes = DIV_ROUND_UP_ULL(bytes, SZ_1M);
  348. return bytes;
  349. }
  350. static unsigned int mbps_to_bytes(unsigned long mbps, unsigned int ms)
  351. {
  352. mbps *= ms;
  353. mbps = DIV_ROUND_UP(mbps, MSEC_PER_SEC);
  354. mbps *= SZ_1M;
  355. return mbps;
  356. }
  357. static int __bw_hwmon_sw_sample_end(struct bw_hwmon *hwmon)
  358. {
  359. struct hwmon_node *node = hwmon->node;
  360. ktime_t ts;
  361. unsigned long bytes, mbps;
  362. unsigned int us;
  363. int wake = 0;
  364. ts = ktime_get();
  365. us = ktime_to_us(ktime_sub(ts, node->prev_ts));
  366. bytes = hwmon->get_bytes_and_clear(hwmon);
  367. bytes += node->bytes;
  368. node->bytes = 0;
  369. mbps = bytes_to_mbps(bytes, us);
  370. node->max_mbps = max(node->max_mbps, mbps);
  371. /*
  372. * If the measured bandwidth in a micro sample is greater than the
  373. * wake up threshold, it indicates an increase in load that's non
  374. * trivial. So, have the governor ignore historical idle time or low
  375. * bandwidth usage and do the bandwidth calculation based on just
  376. * this micro sample.
  377. */
  378. if (mbps > node->hw->up_wake_mbps) {
  379. wake = UP_WAKE;
  380. } else if (mbps < node->hw->down_wake_mbps) {
  381. if (node->down_cnt)
  382. node->down_cnt--;
  383. if (node->down_cnt <= 0)
  384. wake = DOWN_WAKE;
  385. }
  386. node->prev_ts = ts;
  387. node->wake = wake;
  388. node->sampled = true;
  389. trace_bw_hwmon_meas(dev_name(hwmon->dev),
  390. mbps,
  391. us,
  392. wake);
  393. return wake;
  394. }
  395. static int __bw_hwmon_hw_sample_end(struct bw_hwmon *hwmon)
  396. {
  397. struct hwmon_node *node = hwmon->node;
  398. unsigned long bytes, mbps;
  399. int wake = 0;
  400. /*
  401. * If this read is in response to an IRQ, the HW monitor should
  402. * return the measurement in the micro sample that triggered the IRQ.
  403. * Otherwise, it should return the maximum measured value in any
  404. * micro sample since the last time we called get_bytes_and_clear()
  405. */
  406. bytes = hwmon->get_bytes_and_clear(hwmon);
  407. mbps = bytes_to_mbps(bytes, node->sample_ms * USEC_PER_MSEC);
  408. node->max_mbps = mbps;
  409. if (mbps > node->hw->up_wake_mbps)
  410. wake = UP_WAKE;
  411. else if (mbps < node->hw->down_wake_mbps)
  412. wake = DOWN_WAKE;
  413. node->wake = wake;
  414. node->sampled = true;
  415. trace_bw_hwmon_meas(dev_name(hwmon->dev),
  416. mbps,
  417. node->sample_ms * USEC_PER_MSEC,
  418. wake);
  419. return 1;
  420. }
  421. static int __bw_hwmon_sample_end(struct bw_hwmon *hwmon)
  422. {
  423. if (hwmon->set_hw_events)
  424. return __bw_hwmon_hw_sample_end(hwmon);
  425. else
  426. return __bw_hwmon_sw_sample_end(hwmon);
  427. }
  428. static int bw_hwmon_sample_end(struct bw_hwmon *hwmon)
  429. {
  430. unsigned long flags;
  431. int wake;
  432. spin_lock_irqsave(&sample_irq_lock, flags);
  433. wake = __bw_hwmon_sample_end(hwmon);
  434. spin_unlock_irqrestore(&sample_irq_lock, flags);
  435. return wake;
  436. }
  437. static unsigned long to_mbps_zone(struct hwmon_node *node, unsigned long mbps)
  438. {
  439. int i;
  440. for (i = 0; i < NUM_MBPS_ZONES && node->mbps_zones[i]; i++)
  441. if (node->mbps_zones[i] >= mbps)
  442. return node->mbps_zones[i];
  443. return KHZ_TO_MBPS(node->max_freq, node->hw->dcvs_width);
  444. }
  445. #define MIN_MBPS 500UL
  446. #define HIST_PEAK_TOL 75
  447. static unsigned long get_bw_and_set_irq(struct hwmon_node *node,
  448. struct dcvs_freq *freq_mbps)
  449. {
  450. unsigned long meas_mbps, thres, flags, req_mbps, adj_mbps;
  451. unsigned long meas_mbps_zone;
  452. unsigned long hist_lo_tol, hyst_lo_tol;
  453. struct bw_hwmon *hw = node->hw;
  454. unsigned int new_bw, io_percent = node->io_percent;
  455. ktime_t ts;
  456. unsigned int ms = 0;
  457. spin_lock_irqsave(&sample_irq_lock, flags);
  458. if (!hw->set_hw_events) {
  459. ts = ktime_get();
  460. ms = ktime_to_ms(ktime_sub(ts, node->prev_ts));
  461. }
  462. if (!node->sampled || ms >= node->sample_ms)
  463. __bw_hwmon_sample_end(node->hw);
  464. node->sampled = false;
  465. req_mbps = meas_mbps = node->max_mbps;
  466. node->max_mbps = 0;
  467. hist_lo_tol = (node->hist_max_mbps * HIST_PEAK_TOL) / 100;
  468. /* Remember historic peak in the past hist_mem decision windows. */
  469. if (meas_mbps > node->hist_max_mbps || !node->hist_mem) {
  470. /* If new max or no history */
  471. node->hist_max_mbps = meas_mbps;
  472. node->hist_mem = node->hist_memory;
  473. } else if (meas_mbps >= hist_lo_tol) {
  474. /*
  475. * If subsequent peaks come close (within tolerance) to but
  476. * less than the historic peak, then reset the history start,
  477. * but not the peak value.
  478. */
  479. node->hist_mem = node->hist_memory;
  480. } else {
  481. /* Count down history expiration. */
  482. if (node->hist_mem)
  483. node->hist_mem--;
  484. }
  485. /*
  486. * The AB value that corresponds to the lowest mbps zone greater than
  487. * or equal to the "frequency" the current measurement will pick.
  488. * This upper limit is useful for balancing out any prediction
  489. * mechanisms to be power friendly.
  490. */
  491. meas_mbps_zone = (meas_mbps * 100) / io_percent;
  492. meas_mbps_zone = to_mbps_zone(node, meas_mbps_zone);
  493. meas_mbps_zone = (meas_mbps_zone * io_percent) / 100;
  494. meas_mbps_zone = max(meas_mbps, meas_mbps_zone);
  495. /*
  496. * If this is a wake up due to BW increase, vote much higher BW than
  497. * what we measure to stay ahead of increasing traffic and then set
  498. * it up to vote for measured BW if we see down_count short sample
  499. * windows of low traffic.
  500. */
  501. if (node->wake == UP_WAKE) {
  502. req_mbps += ((meas_mbps - node->prev_req)
  503. * node->up_scale) / 100;
  504. /*
  505. * However if the measured load is less than the historic
  506. * peak, but the over request is higher than the historic
  507. * peak, then we could limit the over requesting to the
  508. * historic peak.
  509. */
  510. if (req_mbps > node->hist_max_mbps
  511. && meas_mbps < node->hist_max_mbps)
  512. req_mbps = node->hist_max_mbps;
  513. req_mbps = min(req_mbps, meas_mbps_zone);
  514. }
  515. hyst_lo_tol = (node->hyst_mbps * HIST_PEAK_TOL) / 100;
  516. if (meas_mbps > node->hyst_mbps && meas_mbps > MIN_MBPS) {
  517. hyst_lo_tol = (meas_mbps * HIST_PEAK_TOL) / 100;
  518. node->hyst_peak = 0;
  519. node->hyst_trig_win = node->hyst_length;
  520. node->hyst_mbps = meas_mbps;
  521. if (node->hyst_en)
  522. node->hyst_en = node->hyst_length;
  523. }
  524. /*
  525. * Check node->max_mbps to avoid double counting peaks that cause
  526. * early termination of a window.
  527. */
  528. if (meas_mbps >= hyst_lo_tol && meas_mbps > MIN_MBPS
  529. && !node->max_mbps) {
  530. node->hyst_peak++;
  531. if (node->hyst_peak >= node->hyst_trigger_count) {
  532. node->hyst_peak = 0;
  533. node->hyst_en = node->hyst_length;
  534. }
  535. }
  536. if (node->hyst_trig_win)
  537. node->hyst_trig_win--;
  538. if (node->hyst_en)
  539. node->hyst_en--;
  540. if (!node->hyst_trig_win && !node->hyst_en) {
  541. node->hyst_peak = 0;
  542. node->hyst_mbps = 0;
  543. }
  544. if (node->hyst_en) {
  545. if (meas_mbps > node->idle_mbps) {
  546. req_mbps = max(req_mbps, node->hyst_mbps);
  547. node->idle_en = node->idle_length;
  548. } else if (node->idle_en) {
  549. req_mbps = max(req_mbps, node->hyst_mbps);
  550. node->idle_en--;
  551. }
  552. }
  553. /* Stretch the short sample window size, if the traffic is too low */
  554. if (meas_mbps < MIN_MBPS) {
  555. hw->up_wake_mbps = (max(MIN_MBPS, req_mbps)
  556. * (100 + node->up_thres)) / 100;
  557. hw->down_wake_mbps = 0;
  558. thres = mbps_to_bytes(max(MIN_MBPS, req_mbps / 2),
  559. node->sample_ms);
  560. } else {
  561. /*
  562. * Up wake vs down wake are intentionally a percentage of
  563. * req_mbps vs meas_mbps to make sure the over requesting
  564. * phase is handled properly. We only want to wake up and
  565. * reduce the vote based on the measured mbps being less than
  566. * the previous measurement that caused the "over request".
  567. */
  568. hw->up_wake_mbps = (req_mbps * (100 + node->up_thres)) / 100;
  569. hw->down_wake_mbps = (meas_mbps * node->down_thres) / 100;
  570. thres = mbps_to_bytes(meas_mbps, node->sample_ms);
  571. }
  572. if (hw->set_hw_events) {
  573. hw->down_cnt = node->down_count;
  574. hw->set_hw_events(hw, node->sample_ms);
  575. } else {
  576. node->down_cnt = node->down_count;
  577. node->bytes = hw->set_thres(hw, thres);
  578. }
  579. node->wake = 0;
  580. node->prev_req = req_mbps;
  581. spin_unlock_irqrestore(&sample_irq_lock, flags);
  582. adj_mbps = req_mbps + node->guard_band_mbps;
  583. if (adj_mbps > node->prev_ab) {
  584. new_bw = adj_mbps;
  585. } else {
  586. new_bw = adj_mbps * node->decay_rate
  587. + node->prev_ab * (100 - node->decay_rate);
  588. new_bw /= 100;
  589. }
  590. node->prev_ab = new_bw;
  591. freq_mbps->ib = (new_bw * 100) / io_percent;
  592. if (node->ab_scale < 100)
  593. new_bw = mult_frac(new_bw, node->ab_scale, 100);
  594. freq_mbps->ab = roundup(new_bw, node->bw_step);
  595. trace_bw_hwmon_update(dev_name(node->hw->dev),
  596. freq_mbps->ab,
  597. freq_mbps->ib,
  598. hw->up_wake_mbps,
  599. hw->down_wake_mbps);
  600. trace_bw_hwmon_debug(dev_name(node->hw->dev),
  601. req_mbps,
  602. meas_mbps_zone,
  603. node->hist_max_mbps,
  604. node->hist_mem,
  605. node->hyst_mbps,
  606. node->hyst_en);
  607. return req_mbps;
  608. }
  609. static u32 get_dst_from_map(struct bw_hwmon *hw, u32 src_vote)
  610. {
  611. struct bwmon_second_map *map = hw->second_map;
  612. u32 dst_vote = 0;
  613. if (!map)
  614. goto out;
  615. while (map->src_freq && map->src_freq < src_vote)
  616. map++;
  617. if (!map->src_freq)
  618. map--;
  619. dst_vote = map->dst_freq;
  620. out:
  621. return dst_vote;
  622. }
  623. /*
  624. * Governor function that computes new target frequency
  625. * based on bw measurement (mbps) and updates cur_freq (khz).
  626. * Returns true if cur_freq was changed
  627. * Note: must hold node->update_lock before calling
  628. */
  629. static bool bwmon_update_cur_freq(struct hwmon_node *node)
  630. {
  631. struct bw_hwmon *hw = node->hw;
  632. struct dcvs_freq new_freq;
  633. u32 primary_mbps;
  634. get_bw_and_set_irq(node, &new_freq);
  635. /* first convert freq from mbps to khz */
  636. new_freq.ab = MBPS_TO_KHZ(new_freq.ab, hw->dcvs_width);
  637. new_freq.ib = MBPS_TO_KHZ(new_freq.ib, hw->dcvs_width);
  638. new_freq.ib = max(new_freq.ib, node->min_freq);
  639. new_freq.ib = min(new_freq.ib, node->max_freq);
  640. primary_mbps = KHZ_TO_MBPS(new_freq.ib, hw->dcvs_width);
  641. if (new_freq.ib != node->cur_freqs[0].ib ||
  642. new_freq.ab != node->cur_freqs[0].ab) {
  643. node->cur_freqs[0].ib = new_freq.ib;
  644. node->cur_freqs[0].ab = new_freq.ab;
  645. if (hw->second_vote_supported) {
  646. if (hw->second_map)
  647. node->cur_freqs[1].ib = get_dst_from_map(hw,
  648. new_freq.ib);
  649. else if (hw->second_dcvs_width)
  650. node->cur_freqs[1].ib = MBPS_TO_KHZ(primary_mbps,
  651. hw->second_dcvs_width);
  652. else
  653. node->cur_freqs[1].ib = 0;
  654. node->cur_freqs[1].ib = min(node->cur_freqs[1].ib,
  655. hw->second_vote_limit);
  656. }
  657. return true;
  658. }
  659. return false;
  660. }
  661. static const u64 HALF_TICK_NS = (NSEC_PER_SEC / HZ) >> 1;
  662. static void bwmon_jiffies_update_cb(void *unused, void *extra)
  663. {
  664. struct bw_hwmon *hw;
  665. struct hwmon_node *node;
  666. unsigned long flags;
  667. ktime_t now = ktime_get();
  668. s64 delta_ns;
  669. spin_lock_irqsave(&list_lock, flags);
  670. list_for_each_entry(node, &hwmon_list, list) {
  671. hw = node->hw;
  672. if (!hw->is_active)
  673. continue;
  674. delta_ns = now - hw->last_update_ts + HALF_TICK_NS;
  675. if (delta_ns > ms_to_ktime(hw->node->window_ms)) {
  676. queue_work(bwmon_wq, &hw->work);
  677. hw->last_update_ts = now;
  678. }
  679. }
  680. spin_unlock_irqrestore(&list_lock, flags);
  681. }
  682. static void bwmon_monitor_work(struct work_struct *work)
  683. {
  684. int err = 0;
  685. struct bw_hwmon *hw = container_of(work, struct bw_hwmon, work);
  686. struct hwmon_node *node = hw->node;
  687. /* governor update and commit */
  688. mutex_lock(&node->update_lock);
  689. if (bwmon_update_cur_freq(node))
  690. err = qcom_dcvs_update_votes(dev_name(hw->dev),
  691. node->cur_freqs,
  692. 1 + (hw->second_vote_supported << 1),
  693. hw->dcvs_path);
  694. if (err < 0)
  695. dev_err(hw->dev, "bwmon monitor update failed: %d\n", err);
  696. mutex_unlock(&node->update_lock);
  697. }
  698. static inline void bwmon_monitor_start(struct bw_hwmon *hw)
  699. {
  700. hw->last_update_ts = ktime_get();
  701. hw->is_active = true;
  702. }
  703. static inline void bwmon_monitor_stop(struct bw_hwmon *hw)
  704. {
  705. hw->is_active = false;
  706. cancel_work_sync(&hw->work);
  707. }
  708. static int update_bw_hwmon(struct bw_hwmon *hw)
  709. {
  710. struct hwmon_node *node = hw->node;
  711. int ret = 0;
  712. mutex_lock(&node->mon_lock);
  713. if (!node->mon_started) {
  714. mutex_unlock(&node->mon_lock);
  715. return -EBUSY;
  716. }
  717. dev_dbg(hw->dev, "Got update request\n");
  718. bwmon_monitor_stop(hw);
  719. /* governor update and commit */
  720. mutex_lock(&node->update_lock);
  721. if (bwmon_update_cur_freq(node))
  722. ret = qcom_dcvs_update_votes(dev_name(hw->dev),
  723. node->cur_freqs,
  724. 1 + (hw->second_vote_supported << 1),
  725. hw->dcvs_path);
  726. if (ret < 0)
  727. dev_err(hw->dev, "bwmon irq update failed: %d\n", ret);
  728. mutex_unlock(&node->update_lock);
  729. bwmon_monitor_start(hw);
  730. mutex_unlock(&node->mon_lock);
  731. return 0;
  732. }
  733. static int start_monitor(struct bw_hwmon *hwmon)
  734. {
  735. struct hwmon_node *node = hwmon->node;
  736. unsigned long mbps;
  737. int ret;
  738. node->prev_ts = ktime_get();
  739. node->prev_ab = 0;
  740. mbps = KHZ_TO_MBPS(node->cur_freqs[0].ib, hwmon->dcvs_width) *
  741. node->io_percent / 100;
  742. hwmon->up_wake_mbps = mbps;
  743. hwmon->down_wake_mbps = MIN_MBPS;
  744. ret = hwmon->start_hwmon(hwmon, mbps);
  745. if (ret < 0) {
  746. dev_err(hwmon->dev, "Unable to start HW monitor! (%d)\n", ret);
  747. return ret;
  748. }
  749. node->mon_started = true;
  750. return 0;
  751. }
  752. static void stop_monitor(struct bw_hwmon *hwmon)
  753. {
  754. struct hwmon_node *node = hwmon->node;
  755. mutex_lock(&node->mon_lock);
  756. node->mon_started = false;
  757. mutex_unlock(&node->mon_lock);
  758. hwmon->stop_hwmon(hwmon);
  759. }
  760. static int configure_hwmon_node(struct bw_hwmon *hwmon)
  761. {
  762. struct hwmon_node *node;
  763. unsigned long flags;
  764. node = devm_kzalloc(hwmon->dev, sizeof(*node), GFP_KERNEL);
  765. if (!node)
  766. return -ENOMEM;
  767. hwmon->node = node;
  768. node->guard_band_mbps = 100;
  769. node->decay_rate = 90;
  770. node->io_percent = 16;
  771. node->bw_step = 190;
  772. node->sample_ms = 50;
  773. node->window_ms = 50;
  774. node->up_scale = 0;
  775. node->up_thres = 10;
  776. node->down_thres = 0;
  777. node->down_count = 3;
  778. node->hist_memory = 0;
  779. node->hyst_trigger_count = 3;
  780. node->hyst_length = 0;
  781. node->idle_length = 0;
  782. node->idle_mbps = 400;
  783. node->ab_scale = 100;
  784. node->mbps_zones[0] = 0;
  785. node->hw = hwmon;
  786. mutex_init(&node->mon_lock);
  787. mutex_init(&node->update_lock);
  788. spin_lock_irqsave(&list_lock, flags);
  789. list_add_tail(&node->list, &hwmon_list);
  790. spin_unlock_irqrestore(&list_lock, flags);
  791. return 0;
  792. }
  793. #define SECOND_MAP_TBL "qcom,secondary-map"
  794. #define NUM_COLS 2
  795. static struct bwmon_second_map *init_second_map(struct device *dev,
  796. struct device_node *of_node)
  797. {
  798. int len, nf, i, j;
  799. u32 data;
  800. struct bwmon_second_map *tbl;
  801. int ret;
  802. if (!of_find_property(of_node, SECOND_MAP_TBL, &len))
  803. return NULL;
  804. len /= sizeof(data);
  805. if (len % NUM_COLS || len == 0)
  806. return NULL;
  807. nf = len / NUM_COLS;
  808. tbl = devm_kzalloc(dev, (nf + 1) * sizeof(struct bwmon_second_map),
  809. GFP_KERNEL);
  810. if (!tbl)
  811. return NULL;
  812. for (i = 0, j = 0; i < nf; i++, j += 2) {
  813. ret = of_property_read_u32_index(of_node, SECOND_MAP_TBL,
  814. j, &data);
  815. if (ret < 0)
  816. return NULL;
  817. tbl[i].src_freq = data;
  818. ret = of_property_read_u32_index(of_node, SECOND_MAP_TBL,
  819. j + 1, &data);
  820. if (ret < 0)
  821. return NULL;
  822. tbl[i].dst_freq = data;
  823. pr_debug("Entry%d src:%u, dst:%u\n", i, tbl[i].src_freq,
  824. tbl[i].dst_freq);
  825. }
  826. tbl[i].src_freq = 0;
  827. return tbl;
  828. }
  829. #define ENABLE_MASK BIT(0)
  830. static __always_inline void mon_enable(struct bwmon *m, enum mon_reg_type type)
  831. {
  832. switch (type) {
  833. case MON1:
  834. writel_relaxed(ENABLE_MASK | m->throttle_adj, MON_EN(m));
  835. break;
  836. case MON2:
  837. writel_relaxed(ENABLE_MASK | m->throttle_adj, MON2_EN(m));
  838. break;
  839. case MON3:
  840. writel_relaxed(ENABLE_MASK | m->throttle_adj, MON3_EN(m));
  841. break;
  842. }
  843. }
  844. static __always_inline void mon_disable(struct bwmon *m, enum mon_reg_type type)
  845. {
  846. switch (type) {
  847. case MON1:
  848. writel_relaxed(m->throttle_adj, MON_EN(m));
  849. break;
  850. case MON2:
  851. writel_relaxed(m->throttle_adj, MON2_EN(m));
  852. break;
  853. case MON3:
  854. writel_relaxed(m->throttle_adj, MON3_EN(m));
  855. break;
  856. }
  857. /*
  858. * mon_disable() and mon_irq_clear(),
  859. * If latter goes first and count happen to trigger irq, we would
  860. * have the irq line high but no one handling it.
  861. */
  862. mb();
  863. }
  864. #define MON_CLEAR_BIT 0x1
  865. #define MON_CLEAR_ALL_BIT 0x2
  866. static __always_inline
  867. void mon_clear(struct bwmon *m, bool clear_all, enum mon_reg_type type)
  868. {
  869. switch (type) {
  870. case MON1:
  871. writel_relaxed(MON_CLEAR_BIT, MON_CLEAR(m));
  872. break;
  873. case MON2:
  874. if (clear_all)
  875. writel_relaxed(MON_CLEAR_ALL_BIT, MON2_CLEAR(m));
  876. else
  877. writel_relaxed(MON_CLEAR_BIT, MON2_CLEAR(m));
  878. break;
  879. case MON3:
  880. if (clear_all)
  881. writel_relaxed(MON_CLEAR_ALL_BIT, MON3_CLEAR(m));
  882. else
  883. writel_relaxed(MON_CLEAR_BIT, MON3_CLEAR(m));
  884. /*
  885. * In some hardware versions since MON3_CLEAR(m) register does
  886. * not have self-clearing capability it needs to be cleared
  887. * explicitly. But we also need to ensure the writes to it
  888. * are successful before clearing it.
  889. */
  890. wmb();
  891. writel_relaxed(0, MON3_CLEAR(m));
  892. break;
  893. }
  894. /*
  895. * The counter clear and IRQ clear bits are not in the same 4KB
  896. * region. So, we need to make sure the counter clear is completed
  897. * before we try to clear the IRQ or do any other counter operations.
  898. */
  899. mb();
  900. }
  901. #define SAMPLE_WIN_LIM 0xFFFFFF
  902. static __always_inline
  903. void mon_set_hw_sampling_window(struct bwmon *m, unsigned int sample_ms,
  904. enum mon_reg_type type)
  905. {
  906. u32 rate;
  907. if (unlikely(sample_ms != m->sample_size_ms)) {
  908. rate = mult_frac(sample_ms, m->hw_timer_hz, MSEC_PER_SEC);
  909. m->sample_size_ms = sample_ms;
  910. if (unlikely(rate > SAMPLE_WIN_LIM)) {
  911. rate = SAMPLE_WIN_LIM;
  912. pr_warn("Sample window %u larger than hw limit: %u\n",
  913. rate, SAMPLE_WIN_LIM);
  914. }
  915. switch (type) {
  916. case MON1:
  917. WARN(1, "Invalid\n");
  918. return;
  919. case MON2:
  920. writel_relaxed(rate, MON2_SW(m));
  921. break;
  922. case MON3:
  923. writel_relaxed(rate, MON3_SW(m));
  924. break;
  925. }
  926. }
  927. }
  928. static void mon_glb_irq_enable(struct bwmon *m)
  929. {
  930. u32 val;
  931. val = readl_relaxed(GLB_INT_EN(m));
  932. val |= 1 << m->mport;
  933. writel_relaxed(val, GLB_INT_EN(m));
  934. }
  935. static __always_inline
  936. void mon_irq_enable(struct bwmon *m, enum mon_reg_type type)
  937. {
  938. u32 val;
  939. spin_lock(&mon_irq_lock);
  940. switch (type) {
  941. case MON1:
  942. mon_glb_irq_enable(m);
  943. val = readl_relaxed(MON_INT_EN(m));
  944. val |= MON_INT_ENABLE;
  945. writel_relaxed(val, MON_INT_EN(m));
  946. break;
  947. case MON2:
  948. mon_glb_irq_enable(m);
  949. val = readl_relaxed(MON_INT_EN(m));
  950. val |= MON2_INT_STATUS_MASK;
  951. writel_relaxed(val, MON_INT_EN(m));
  952. break;
  953. case MON3:
  954. val = readl_relaxed(MON3_INT_EN(m));
  955. val |= MON3_INT_STATUS_MASK;
  956. writel_relaxed(val, MON3_INT_EN(m));
  957. break;
  958. }
  959. spin_unlock(&mon_irq_lock);
  960. /*
  961. * make sure irq enable complete for local and global
  962. * to avoid race with other monitor calls
  963. */
  964. mb();
  965. }
  966. static void mon_glb_irq_disable(struct bwmon *m)
  967. {
  968. u32 val;
  969. val = readl_relaxed(GLB_INT_EN(m));
  970. val &= ~(1 << m->mport);
  971. writel_relaxed(val, GLB_INT_EN(m));
  972. }
  973. static __always_inline
  974. void mon_irq_disable(struct bwmon *m, enum mon_reg_type type)
  975. {
  976. u32 val;
  977. spin_lock(&mon_irq_lock);
  978. switch (type) {
  979. case MON1:
  980. mon_glb_irq_disable(m);
  981. val = readl_relaxed(MON_INT_EN(m));
  982. val &= ~MON_INT_ENABLE;
  983. writel_relaxed(val, MON_INT_EN(m));
  984. break;
  985. case MON2:
  986. mon_glb_irq_disable(m);
  987. val = readl_relaxed(MON_INT_EN(m));
  988. val &= ~MON2_INT_STATUS_MASK;
  989. writel_relaxed(val, MON_INT_EN(m));
  990. break;
  991. case MON3:
  992. val = readl_relaxed(MON3_INT_EN(m));
  993. val &= ~MON3_INT_STATUS_MASK;
  994. writel_relaxed(val, MON3_INT_EN(m));
  995. break;
  996. }
  997. spin_unlock(&mon_irq_lock);
  998. /*
  999. * make sure irq disable complete for local and global
  1000. * to avoid race with other monitor calls
  1001. */
  1002. mb();
  1003. }
  1004. static __always_inline
  1005. unsigned int mon_irq_status(struct bwmon *m, enum mon_reg_type type)
  1006. {
  1007. u32 mval;
  1008. switch (type) {
  1009. case MON1:
  1010. mval = readl_relaxed(MON_INT_STATUS(m));
  1011. dev_dbg(m->dev, "IRQ status p:%x, g:%x\n", mval,
  1012. readl_relaxed(GLB_INT_STATUS(m)));
  1013. mval &= MON_INT_STATUS_MASK;
  1014. break;
  1015. case MON2:
  1016. mval = readl_relaxed(MON_INT_STATUS(m));
  1017. dev_dbg(m->dev, "IRQ status p:%x, g:%x\n", mval,
  1018. readl_relaxed(GLB_INT_STATUS(m)));
  1019. mval &= MON2_INT_STATUS_MASK;
  1020. mval >>= MON2_INT_STATUS_SHIFT;
  1021. break;
  1022. case MON3:
  1023. mval = readl_relaxed(MON3_INT_STATUS(m));
  1024. dev_dbg(m->dev, "IRQ status p:%x\n", mval);
  1025. mval &= MON3_INT_STATUS_MASK;
  1026. break;
  1027. }
  1028. return mval;
  1029. }
  1030. static void mon_glb_irq_clear(struct bwmon *m)
  1031. {
  1032. /*
  1033. * Synchronize the local interrupt clear in mon_irq_clear()
  1034. * with the global interrupt clear here. Otherwise, the CPU
  1035. * may reorder the two writes and clear the global interrupt
  1036. * before the local interrupt, causing the global interrupt
  1037. * to be retriggered by the local interrupt still being high.
  1038. */
  1039. mb();
  1040. writel_relaxed(1 << m->mport, GLB_INT_CLR(m));
  1041. /*
  1042. * Similarly, because the global registers are in a different
  1043. * region than the local registers, we need to ensure any register
  1044. * writes to enable the monitor after this call are ordered with the
  1045. * clearing here so that local writes don't happen before the
  1046. * interrupt is cleared.
  1047. */
  1048. mb();
  1049. }
  1050. static __always_inline
  1051. void mon_irq_clear(struct bwmon *m, enum mon_reg_type type)
  1052. {
  1053. switch (type) {
  1054. case MON1:
  1055. writel_relaxed(MON_INT_STATUS_MASK, MON_INT_CLR(m));
  1056. mon_glb_irq_clear(m);
  1057. break;
  1058. case MON2:
  1059. writel_relaxed(MON2_INT_STATUS_MASK, MON_INT_CLR(m));
  1060. mon_glb_irq_clear(m);
  1061. break;
  1062. case MON3:
  1063. writel_relaxed(MON3_INT_STATUS_MASK, MON3_INT_CLR(m));
  1064. /*
  1065. * In some hardware versions since MON3_INT_CLEAR(m) register
  1066. * does not have self-clearing capability it needs to be
  1067. * cleared explicitly. But we also need to ensure the writes
  1068. * to it are successful before clearing it.
  1069. */
  1070. wmb();
  1071. writel_relaxed(0, MON3_INT_CLR(m));
  1072. break;
  1073. }
  1074. }
  1075. #define THROTTLE_MASK 0x1F
  1076. #define THROTTLE_SHIFT 16
  1077. static int mon_set_throttle_adj(struct bw_hwmon *hw, uint adj)
  1078. {
  1079. struct bwmon *m = to_bwmon(hw);
  1080. if (adj > THROTTLE_MASK)
  1081. return -EINVAL;
  1082. adj = (adj & THROTTLE_MASK) << THROTTLE_SHIFT;
  1083. m->throttle_adj = adj;
  1084. return 0;
  1085. }
  1086. static u32 mon_get_throttle_adj(struct bw_hwmon *hw)
  1087. {
  1088. struct bwmon *m = to_bwmon(hw);
  1089. return m->throttle_adj >> THROTTLE_SHIFT;
  1090. }
  1091. #define ZONE1_SHIFT 8
  1092. #define ZONE2_SHIFT 16
  1093. #define ZONE3_SHIFT 24
  1094. #define ZONE0_ACTION 0x01 /* Increment zone 0 count */
  1095. #define ZONE1_ACTION 0x09 /* Increment zone 1 & clear lower zones */
  1096. #define ZONE2_ACTION 0x25 /* Increment zone 2 & clear lower zones */
  1097. #define ZONE3_ACTION 0x95 /* Increment zone 3 & clear lower zones */
  1098. static u32 calc_zone_actions(void)
  1099. {
  1100. u32 zone_actions;
  1101. zone_actions = ZONE0_ACTION;
  1102. zone_actions |= ZONE1_ACTION << ZONE1_SHIFT;
  1103. zone_actions |= ZONE2_ACTION << ZONE2_SHIFT;
  1104. zone_actions |= ZONE3_ACTION << ZONE3_SHIFT;
  1105. return zone_actions;
  1106. }
  1107. #define ZONE_CNT_LIM 0xFFU
  1108. #define UP_CNT_1 1
  1109. static u32 calc_zone_counts(struct bw_hwmon *hw)
  1110. {
  1111. u32 zone_counts;
  1112. zone_counts = ZONE_CNT_LIM;
  1113. zone_counts |= min(hw->down_cnt, ZONE_CNT_LIM) << ZONE1_SHIFT;
  1114. zone_counts |= ZONE_CNT_LIM << ZONE2_SHIFT;
  1115. zone_counts |= UP_CNT_1 << ZONE3_SHIFT;
  1116. return zone_counts;
  1117. }
  1118. #define MB_SHIFT 20
  1119. static u32 mbps_to_count(unsigned long mbps, unsigned int ms, u8 shift)
  1120. {
  1121. mbps *= ms;
  1122. if (shift > MB_SHIFT)
  1123. mbps >>= shift - MB_SHIFT;
  1124. else
  1125. mbps <<= MB_SHIFT - shift;
  1126. return DIV_ROUND_UP(mbps, MSEC_PER_SEC);
  1127. }
  1128. /*
  1129. * Define the 4 zones using HI, MED & LO thresholds:
  1130. * Zone 0: byte count < THRES_LO
  1131. * Zone 1: THRES_LO < byte count < THRES_MED
  1132. * Zone 2: THRES_MED < byte count < THRES_HI
  1133. * Zone 3: THRES_LIM > byte count > THRES_HI
  1134. */
  1135. #define THRES_LIM(shift) (0xFFFFFFFF >> shift)
  1136. static __always_inline
  1137. void set_zone_thres(struct bwmon *m, unsigned int sample_ms,
  1138. enum mon_reg_type type)
  1139. {
  1140. struct bw_hwmon *hw = &m->hw;
  1141. u32 hi, med, lo;
  1142. u32 zone_cnt_thres = calc_zone_counts(hw);
  1143. hi = mbps_to_count(hw->up_wake_mbps, sample_ms, m->count_shift);
  1144. med = mbps_to_count(hw->down_wake_mbps, sample_ms, m->count_shift);
  1145. lo = 0;
  1146. if (unlikely((hi > m->thres_lim) || (med > hi) || (lo > med))) {
  1147. pr_warn("Zone thres larger than hw limit: hi:%u med:%u lo:%u\n",
  1148. hi, med, lo);
  1149. hi = min(hi, m->thres_lim);
  1150. med = min(med, hi - 1);
  1151. lo = min(lo, med-1);
  1152. }
  1153. switch (type) {
  1154. case MON1:
  1155. WARN(1, "Invalid\n");
  1156. return;
  1157. case MON2:
  1158. writel_relaxed(hi, MON2_THRES_HI(m));
  1159. writel_relaxed(med, MON2_THRES_MED(m));
  1160. writel_relaxed(lo, MON2_THRES_LO(m));
  1161. /* Set the zone count thresholds for interrupts */
  1162. writel_relaxed(zone_cnt_thres, MON2_ZONE_CNT_THRES(m));
  1163. break;
  1164. case MON3:
  1165. writel_relaxed(hi, MON3_THRES_HI(m));
  1166. writel_relaxed(med, MON3_THRES_MED(m));
  1167. writel_relaxed(lo, MON3_THRES_LO(m));
  1168. /* Set the zone count thresholds for interrupts */
  1169. writel_relaxed(zone_cnt_thres, MON3_ZONE_CNT_THRES(m));
  1170. break;
  1171. }
  1172. dev_dbg(m->dev, "Thres: hi:%u med:%u lo:%u\n", hi, med, lo);
  1173. dev_dbg(m->dev, "Zone Count Thres: %0x\n", zone_cnt_thres);
  1174. }
  1175. static __always_inline
  1176. void mon_set_zones(struct bwmon *m, unsigned int sample_ms,
  1177. enum mon_reg_type type)
  1178. {
  1179. mon_set_hw_sampling_window(m, sample_ms, type);
  1180. set_zone_thres(m, sample_ms, type);
  1181. }
  1182. static void mon_set_limit(struct bwmon *m, u32 count)
  1183. {
  1184. writel_relaxed(count, MON_THRES(m));
  1185. dev_dbg(m->dev, "Thres: %08x\n", count);
  1186. }
  1187. static u32 mon_get_limit(struct bwmon *m)
  1188. {
  1189. return readl_relaxed(MON_THRES(m));
  1190. }
  1191. #define THRES_HIT(status) (status & BIT(0))
  1192. #define OVERFLOW(status) (status & BIT(1))
  1193. static unsigned long mon_get_count1(struct bwmon *m)
  1194. {
  1195. unsigned long count, status;
  1196. count = readl_relaxed(MON_CNT(m));
  1197. status = mon_irq_status(m, MON1);
  1198. dev_dbg(m->dev, "Counter: %08lx\n", count);
  1199. if (OVERFLOW(status) && m->spec->overflow)
  1200. count += 0xFFFFFFFF;
  1201. if (THRES_HIT(status) && m->spec->wrap_on_thres)
  1202. count += mon_get_limit(m);
  1203. dev_dbg(m->dev, "Actual Count: %08lx\n", count);
  1204. return count;
  1205. }
  1206. static __always_inline
  1207. unsigned int get_zone(struct bwmon *m, enum mon_reg_type type)
  1208. {
  1209. u32 zone_counts;
  1210. u32 zone;
  1211. zone = get_bitmask_order(m->intr_status);
  1212. if (zone) {
  1213. zone--;
  1214. } else {
  1215. switch (type) {
  1216. case MON1:
  1217. WARN(1, "Invalid\n");
  1218. return 0;
  1219. case MON2:
  1220. zone_counts = readl_relaxed(MON2_ZONE_CNT(m));
  1221. break;
  1222. case MON3:
  1223. zone_counts = readl_relaxed(MON3_ZONE_CNT(m));
  1224. break;
  1225. }
  1226. if (zone_counts) {
  1227. zone = get_bitmask_order(zone_counts) - 1;
  1228. zone /= 8;
  1229. }
  1230. }
  1231. m->intr_status = 0;
  1232. return zone;
  1233. }
  1234. static __always_inline
  1235. unsigned long get_zone_count(struct bwmon *m, unsigned int zone,
  1236. enum mon_reg_type type)
  1237. {
  1238. unsigned long count;
  1239. switch (type) {
  1240. case MON1:
  1241. WARN(1, "Invalid\n");
  1242. return 0;
  1243. case MON2:
  1244. count = readl_relaxed(MON2_ZONE_MAX(m, zone));
  1245. break;
  1246. case MON3:
  1247. count = readl_relaxed(MON3_ZONE_MAX(m, zone));
  1248. break;
  1249. }
  1250. if (count)
  1251. count++;
  1252. return count;
  1253. }
  1254. static __always_inline
  1255. unsigned long mon_get_zone_stats(struct bwmon *m, enum mon_reg_type type)
  1256. {
  1257. unsigned int zone;
  1258. unsigned long count = 0;
  1259. zone = get_zone(m, type);
  1260. count = get_zone_count(m, zone, type);
  1261. count <<= m->count_shift;
  1262. dev_dbg(m->dev, "Zone%d Max byte count: %08lx\n", zone, count);
  1263. return count;
  1264. }
  1265. static __always_inline
  1266. unsigned long mon_get_count(struct bwmon *m, enum mon_reg_type type)
  1267. {
  1268. unsigned long count;
  1269. switch (type) {
  1270. case MON1:
  1271. count = mon_get_count1(m);
  1272. break;
  1273. case MON2:
  1274. case MON3:
  1275. count = mon_get_zone_stats(m, type);
  1276. break;
  1277. }
  1278. return count;
  1279. }
  1280. /* ********** CPUBW specific code ********** */
  1281. static __always_inline
  1282. unsigned long __get_bytes_and_clear(struct bw_hwmon *hw, enum mon_reg_type type)
  1283. {
  1284. struct bwmon *m = to_bwmon(hw);
  1285. unsigned long count;
  1286. mon_disable(m, type);
  1287. count = mon_get_count(m, type);
  1288. mon_clear(m, false, type);
  1289. mon_irq_clear(m, type);
  1290. mon_enable(m, type);
  1291. return count;
  1292. }
  1293. static unsigned long get_bytes_and_clear(struct bw_hwmon *hw)
  1294. {
  1295. return __get_bytes_and_clear(hw, MON1);
  1296. }
  1297. static unsigned long get_bytes_and_clear2(struct bw_hwmon *hw)
  1298. {
  1299. return __get_bytes_and_clear(hw, MON2);
  1300. }
  1301. static unsigned long get_bytes_and_clear3(struct bw_hwmon *hw)
  1302. {
  1303. return __get_bytes_and_clear(hw, MON3);
  1304. }
  1305. static unsigned long set_thres(struct bw_hwmon *hw, unsigned long bytes)
  1306. {
  1307. unsigned long count;
  1308. u32 limit;
  1309. struct bwmon *m = to_bwmon(hw);
  1310. mon_disable(m, MON1);
  1311. count = mon_get_count1(m);
  1312. mon_clear(m, false, MON1);
  1313. mon_irq_clear(m, MON1);
  1314. if (likely(!m->spec->wrap_on_thres))
  1315. limit = bytes;
  1316. else
  1317. limit = max(bytes, 500000UL);
  1318. mon_set_limit(m, limit);
  1319. mon_enable(m, MON1);
  1320. return count;
  1321. }
  1322. static unsigned long
  1323. __set_hw_events(struct bw_hwmon *hw, unsigned int sample_ms,
  1324. enum mon_reg_type type)
  1325. {
  1326. struct bwmon *m = to_bwmon(hw);
  1327. mon_disable(m, type);
  1328. mon_clear(m, false, type);
  1329. mon_irq_clear(m, type);
  1330. mon_set_zones(m, sample_ms, type);
  1331. mon_enable(m, type);
  1332. return 0;
  1333. }
  1334. static unsigned long set_hw_events(struct bw_hwmon *hw, unsigned int sample_ms)
  1335. {
  1336. return __set_hw_events(hw, sample_ms, MON2);
  1337. }
  1338. static unsigned long
  1339. set_hw_events3(struct bw_hwmon *hw, unsigned int sample_ms)
  1340. {
  1341. return __set_hw_events(hw, sample_ms, MON3);
  1342. }
  1343. static irqreturn_t
  1344. __bwmon_intr_handler(int irq, void *dev, enum mon_reg_type type)
  1345. {
  1346. struct bwmon *m = dev;
  1347. m->intr_status = mon_irq_status(m, type);
  1348. if (!m->intr_status)
  1349. return IRQ_NONE;
  1350. if (bw_hwmon_sample_end(&m->hw) > 0)
  1351. return IRQ_WAKE_THREAD;
  1352. return IRQ_HANDLED;
  1353. }
  1354. static irqreturn_t bwmon_intr_handler(int irq, void *dev)
  1355. {
  1356. return __bwmon_intr_handler(irq, dev, MON1);
  1357. }
  1358. static irqreturn_t bwmon_intr_handler2(int irq, void *dev)
  1359. {
  1360. return __bwmon_intr_handler(irq, dev, MON2);
  1361. }
  1362. static irqreturn_t bwmon_intr_handler3(int irq, void *dev)
  1363. {
  1364. return __bwmon_intr_handler(irq, dev, MON3);
  1365. }
  1366. static irqreturn_t bwmon_intr_thread(int irq, void *dev)
  1367. {
  1368. struct bwmon *m = dev;
  1369. update_bw_hwmon(&m->hw);
  1370. return IRQ_HANDLED;
  1371. }
  1372. static __always_inline
  1373. void mon_set_byte_count_filter(struct bwmon *m, enum mon_reg_type type)
  1374. {
  1375. if (!m->byte_mask)
  1376. return;
  1377. switch (type) {
  1378. case MON1:
  1379. case MON2:
  1380. writel_relaxed(m->byte_mask, MON_MASK(m));
  1381. writel_relaxed(m->byte_match, MON_MATCH(m));
  1382. break;
  1383. case MON3:
  1384. writel_relaxed(m->byte_mask, MON3_MASK(m));
  1385. writel_relaxed(m->byte_match, MON3_MATCH(m));
  1386. break;
  1387. }
  1388. }
  1389. static __always_inline int __start_bw_hwmon(struct bw_hwmon *hw,
  1390. unsigned long mbps, enum mon_reg_type type)
  1391. {
  1392. struct bwmon *m = to_bwmon(hw);
  1393. u32 limit, zone_actions;
  1394. int ret;
  1395. irq_handler_t handler;
  1396. switch (type) {
  1397. case MON1:
  1398. handler = bwmon_intr_handler;
  1399. limit = mbps_to_bytes(mbps, hw->node->window_ms);
  1400. break;
  1401. case MON2:
  1402. zone_actions = calc_zone_actions();
  1403. handler = bwmon_intr_handler2;
  1404. break;
  1405. case MON3:
  1406. zone_actions = calc_zone_actions();
  1407. handler = bwmon_intr_handler3;
  1408. break;
  1409. }
  1410. ret = request_threaded_irq(m->irq, handler, bwmon_intr_thread,
  1411. IRQF_ONESHOT | IRQF_SHARED,
  1412. dev_name(m->dev), m);
  1413. if (ret < 0) {
  1414. dev_err(m->dev, "Unable to register interrupt handler! (%d)\n",
  1415. ret);
  1416. return ret;
  1417. }
  1418. INIT_WORK(&hw->work, &bwmon_monitor_work);
  1419. mon_disable(m, type);
  1420. mon_clear(m, false, type);
  1421. switch (type) {
  1422. case MON1:
  1423. mon_set_limit(m, limit);
  1424. break;
  1425. case MON2:
  1426. mon_set_zones(m, hw->node->window_ms, type);
  1427. /* Set the zone actions to increment appropriate counters */
  1428. writel_relaxed(zone_actions, MON2_ZONE_ACTIONS(m));
  1429. break;
  1430. case MON3:
  1431. mon_set_zones(m, hw->node->window_ms, type);
  1432. /* Set the zone actions to increment appropriate counters */
  1433. writel_relaxed(zone_actions, MON3_ZONE_ACTIONS(m));
  1434. }
  1435. mon_set_byte_count_filter(m, type);
  1436. mon_irq_clear(m, type);
  1437. mon_irq_enable(m, type);
  1438. mon_enable(m, type);
  1439. bwmon_monitor_start(hw);
  1440. return 0;
  1441. }
  1442. static int start_bw_hwmon(struct bw_hwmon *hw, unsigned long mbps)
  1443. {
  1444. return __start_bw_hwmon(hw, mbps, MON1);
  1445. }
  1446. static int start_bw_hwmon2(struct bw_hwmon *hw, unsigned long mbps)
  1447. {
  1448. return __start_bw_hwmon(hw, mbps, MON2);
  1449. }
  1450. static int start_bw_hwmon3(struct bw_hwmon *hw, unsigned long mbps)
  1451. {
  1452. return __start_bw_hwmon(hw, mbps, MON3);
  1453. }
  1454. static __always_inline
  1455. void __stop_bw_hwmon(struct bw_hwmon *hw, enum mon_reg_type type)
  1456. {
  1457. struct bwmon *m = to_bwmon(hw);
  1458. bwmon_monitor_stop(hw);
  1459. mon_irq_disable(m, type);
  1460. free_irq(m->irq, m);
  1461. mon_disable(m, type);
  1462. mon_clear(m, true, type);
  1463. mon_irq_clear(m, type);
  1464. }
  1465. static void stop_bw_hwmon(struct bw_hwmon *hw)
  1466. {
  1467. return __stop_bw_hwmon(hw, MON1);
  1468. }
  1469. static void stop_bw_hwmon2(struct bw_hwmon *hw)
  1470. {
  1471. return __stop_bw_hwmon(hw, MON2);
  1472. }
  1473. static void stop_bw_hwmon3(struct bw_hwmon *hw)
  1474. {
  1475. return __stop_bw_hwmon(hw, MON3);
  1476. }
  1477. /*************************************************************************/
  1478. static const struct bwmon_spec spec[] = {
  1479. [0] = {
  1480. .wrap_on_thres = true,
  1481. .overflow = false,
  1482. .throt_adj = false,
  1483. .hw_sampling = false,
  1484. .has_global_base = true,
  1485. .reg_type = MON1,
  1486. },
  1487. [1] = {
  1488. .wrap_on_thres = false,
  1489. .overflow = true,
  1490. .throt_adj = false,
  1491. .hw_sampling = false,
  1492. .has_global_base = true,
  1493. .reg_type = MON1,
  1494. },
  1495. [2] = {
  1496. .wrap_on_thres = false,
  1497. .overflow = true,
  1498. .throt_adj = true,
  1499. .hw_sampling = false,
  1500. .has_global_base = true,
  1501. .reg_type = MON1,
  1502. },
  1503. [3] = {
  1504. .wrap_on_thres = false,
  1505. .overflow = true,
  1506. .throt_adj = true,
  1507. .hw_sampling = true,
  1508. .has_global_base = true,
  1509. .reg_type = MON2,
  1510. },
  1511. [4] = {
  1512. .wrap_on_thres = false,
  1513. .overflow = true,
  1514. .throt_adj = false,
  1515. .hw_sampling = true,
  1516. .reg_type = MON3,
  1517. },
  1518. };
  1519. static const struct of_device_id qcom_bwmon_match_table[] = {
  1520. { .compatible = "qcom,bwmon", .data = &spec[0] },
  1521. { .compatible = "qcom,bwmon2", .data = &spec[1] },
  1522. { .compatible = "qcom,bwmon3", .data = &spec[2] },
  1523. { .compatible = "qcom,bwmon4", .data = &spec[3] },
  1524. { .compatible = "qcom,bwmon5", .data = &spec[4] },
  1525. {}
  1526. };
  1527. static int qcom_bwmon_driver_probe(struct platform_device *pdev)
  1528. {
  1529. struct device *dev = &pdev->dev;
  1530. struct resource *res;
  1531. struct bwmon *m;
  1532. struct hwmon_node *node;
  1533. int ret;
  1534. u32 data, count_unit;
  1535. u32 dcvs_hw = NUM_DCVS_PATHS, second_hw = NUM_DCVS_PATHS;
  1536. struct kobject *dcvs_kobj;
  1537. struct device_node *of_node, *tmp_of_node;
  1538. unsigned long flags;
  1539. m = devm_kzalloc(dev, sizeof(*m), GFP_KERNEL);
  1540. if (!m)
  1541. return -ENOMEM;
  1542. m->dev = dev;
  1543. m->hw.dev = dev;
  1544. m->spec = of_device_get_match_data(dev);
  1545. if (!m->spec) {
  1546. dev_err(dev, "Unknown device type!\n");
  1547. return -ENODEV;
  1548. }
  1549. res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "base");
  1550. if (!res) {
  1551. dev_err(dev, "base not found!\n");
  1552. return -EINVAL;
  1553. }
  1554. m->base = devm_ioremap(dev, res->start, resource_size(res));
  1555. if (!m->base) {
  1556. dev_err(dev, "Unable map base!\n");
  1557. return -ENOMEM;
  1558. }
  1559. if (m->spec->has_global_base) {
  1560. res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
  1561. "global_base");
  1562. if (!res) {
  1563. dev_err(dev, "global_base not found!\n");
  1564. return -EINVAL;
  1565. }
  1566. m->global_base = devm_ioremap(dev, res->start,
  1567. resource_size(res));
  1568. if (!m->global_base) {
  1569. dev_err(dev, "Unable map global_base!\n");
  1570. return -ENOMEM;
  1571. }
  1572. ret = of_property_read_u32(dev->of_node, "qcom,mport", &data);
  1573. if (ret < 0) {
  1574. dev_err(dev, "mport not found! (%d)\n", ret);
  1575. return ret;
  1576. }
  1577. m->mport = data;
  1578. }
  1579. m->irq = platform_get_irq(pdev, 0);
  1580. if (m->irq < 0) {
  1581. dev_err(dev, "Unable to get IRQ number\n");
  1582. return m->irq;
  1583. }
  1584. if (m->spec->hw_sampling) {
  1585. ret = of_property_read_u32(dev->of_node, "qcom,hw-timer-hz",
  1586. &m->hw_timer_hz);
  1587. if (ret < 0) {
  1588. dev_err(dev, "HW sampling rate not specified!\n");
  1589. return ret;
  1590. }
  1591. }
  1592. if (of_property_read_u32(dev->of_node, "qcom,count-unit", &count_unit))
  1593. count_unit = SZ_1M;
  1594. m->count_shift = order_base_2(count_unit);
  1595. m->thres_lim = THRES_LIM(m->count_shift);
  1596. switch (m->spec->reg_type) {
  1597. case MON3:
  1598. m->hw.start_hwmon = start_bw_hwmon3;
  1599. m->hw.stop_hwmon = stop_bw_hwmon3;
  1600. m->hw.get_bytes_and_clear = get_bytes_and_clear3;
  1601. m->hw.set_hw_events = set_hw_events3;
  1602. break;
  1603. case MON2:
  1604. m->hw.start_hwmon = start_bw_hwmon2;
  1605. m->hw.stop_hwmon = stop_bw_hwmon2;
  1606. m->hw.get_bytes_and_clear = get_bytes_and_clear2;
  1607. m->hw.set_hw_events = set_hw_events;
  1608. break;
  1609. case MON1:
  1610. m->hw.start_hwmon = start_bw_hwmon;
  1611. m->hw.stop_hwmon = stop_bw_hwmon;
  1612. m->hw.get_bytes_and_clear = get_bytes_and_clear;
  1613. m->hw.set_thres = set_thres;
  1614. break;
  1615. }
  1616. of_property_read_u32(dev->of_node, "qcom,byte-mid-match",
  1617. &m->byte_match);
  1618. of_property_read_u32(dev->of_node, "qcom,byte-mid-mask",
  1619. &m->byte_mask);
  1620. if (m->spec->throt_adj) {
  1621. m->hw.set_throttle_adj = mon_set_throttle_adj;
  1622. m->hw.get_throttle_adj = mon_get_throttle_adj;
  1623. }
  1624. of_node = of_parse_phandle(dev->of_node, "qcom,target-dev", 0);
  1625. if (!of_node) {
  1626. dev_err(dev, "Unable to find target-dev for bwmon device\n");
  1627. return -EINVAL;
  1628. }
  1629. ret = of_property_read_u32(of_node, "qcom,dcvs-hw-type", &dcvs_hw);
  1630. if (ret < 0 || dcvs_hw >= NUM_DCVS_HW_TYPES) {
  1631. dev_err(dev, "invalid dcvs_hw=%d, ret=%d\n", dcvs_hw, ret);
  1632. return -EINVAL;
  1633. }
  1634. m->hw.dcvs_hw = dcvs_hw;
  1635. ret = of_property_read_u32(of_node, "qcom,bus-width",
  1636. &m->hw.dcvs_width);
  1637. if (ret < 0 || !m->hw.dcvs_width) {
  1638. dev_err(dev, "invalid hw width=%d, ret=%d\n",
  1639. m->hw.dcvs_width, ret);
  1640. return -EINVAL;
  1641. }
  1642. m->hw.dcvs_path = DCVS_SLOW_PATH;
  1643. of_node = of_parse_phandle(dev->of_node, "qcom,second-vote", 0);
  1644. if (of_node) {
  1645. tmp_of_node = of_parse_phandle(of_node, "qcom,target-dev", 0);
  1646. if (!tmp_of_node) {
  1647. dev_err(dev, "Unable to find target-dev for second vote\n");
  1648. return -EINVAL;
  1649. }
  1650. ret = of_property_read_u32(tmp_of_node, "qcom,dcvs-hw-type",
  1651. &second_hw);
  1652. if (ret < 0 || second_hw >= NUM_DCVS_HW_TYPES) {
  1653. dev_err(dev, "invalid sec dcvs_hw=%d, ret=%d\n",
  1654. second_hw, ret);
  1655. return -EINVAL;
  1656. }
  1657. m->hw.second_dcvs_hw = second_hw;
  1658. if (of_find_property(of_node, "qcom,secondary-map", &ret)) {
  1659. m->hw.second_map = init_second_map(dev, of_node);
  1660. if (!m->hw.second_map) {
  1661. dev_err(dev, "error importing second map!\n");
  1662. return -EINVAL;
  1663. }
  1664. }
  1665. if (!m->hw.second_map) {
  1666. ret = of_property_read_u32(tmp_of_node, "qcom,bus-width",
  1667. &m->hw.second_dcvs_width);
  1668. if (ret < 0 || !m->hw.second_dcvs_width) {
  1669. dev_err(dev, "invalid sec hw width=%d, ret=%d\n",
  1670. m->hw.second_dcvs_width, ret);
  1671. return -EINVAL;
  1672. }
  1673. }
  1674. m->hw.second_vote_supported = true;
  1675. }
  1676. ret = qcom_dcvs_register_voter(dev_name(dev), dcvs_hw, m->hw.dcvs_path);
  1677. if (ret < 0) {
  1678. if (ret != -EPROBE_DEFER)
  1679. dev_err(dev, "qcom dcvs registration error: %d\n", ret);
  1680. return ret;
  1681. }
  1682. if (m->hw.second_vote_supported) {
  1683. ret = qcom_dcvs_register_voter(dev_name(dev), second_hw,
  1684. DCVS_SLOW_PATH);
  1685. if (ret < 0) {
  1686. dev_err(dev, "second hw qcom dcvs reg err: %d\n", ret);
  1687. return ret;
  1688. }
  1689. }
  1690. ret = configure_hwmon_node(&m->hw);
  1691. if (ret < 0) {
  1692. dev_err(dev, "bwmon node configuration failed: %d\n", ret);
  1693. return ret;
  1694. }
  1695. node = m->hw.node;
  1696. ret = qcom_dcvs_hw_minmax_get(dcvs_hw, &node->hw_min_freq,
  1697. &node->hw_max_freq);
  1698. if (ret < 0) {
  1699. dev_err(dev, "error getting minmax from qcom dcvs: %d\n", ret);
  1700. return ret;
  1701. }
  1702. node->min_freq = node->hw_min_freq;
  1703. node->max_freq = node->hw_max_freq;
  1704. node->cur_freqs[0].ib = node->min_freq;
  1705. node->cur_freqs[0].ab = 0;
  1706. node->cur_freqs[0].hw_type = dcvs_hw;
  1707. node->cur_freqs[1].hw_type = second_hw;
  1708. /* second vote only enabled by default if secondary map is present */
  1709. if (m->hw.second_map)
  1710. m->hw.second_vote_limit = get_dst_from_map(&m->hw, U32_MAX);
  1711. m->hw.is_active = false;
  1712. mutex_lock(&bwmon_lock);
  1713. if (!bwmon_wq) {
  1714. bwmon_wq = create_freezable_workqueue("bwmon_wq");
  1715. if (!bwmon_wq) {
  1716. dev_err(dev, "Couldn't create bwmon workqueue.\n");
  1717. mutex_unlock(&bwmon_lock);
  1718. return -ENOMEM;
  1719. }
  1720. register_trace_android_vh_jiffies_update(
  1721. bwmon_jiffies_update_cb, NULL);
  1722. }
  1723. mutex_unlock(&bwmon_lock);
  1724. ret = start_monitor(&m->hw);
  1725. if (ret < 0) {
  1726. dev_err(dev, "Error starting BWMON monitor: %d\n", ret);
  1727. goto err_sysfs;
  1728. }
  1729. dcvs_kobj = qcom_dcvs_kobject_get(dcvs_hw);
  1730. if (IS_ERR(dcvs_kobj)) {
  1731. ret = PTR_ERR(dcvs_kobj);
  1732. dev_err(dev, "error getting kobj from qcom_dcvs: %d\n", ret);
  1733. goto err_sysfs;
  1734. }
  1735. ret = kobject_init_and_add(&node->kobj, &bwmon_ktype, dcvs_kobj,
  1736. dev_name(dev));
  1737. if (ret < 0) {
  1738. dev_err(dev, "failed to init bwmon kobj: %d\n", ret);
  1739. kobject_put(&node->kobj);
  1740. goto err_sysfs;
  1741. }
  1742. return 0;
  1743. err_sysfs:
  1744. stop_monitor(&m->hw);
  1745. spin_lock_irqsave(&list_lock, flags);
  1746. list_del(&node->list);
  1747. spin_unlock_irqrestore(&list_lock, flags);
  1748. return ret;
  1749. }
  1750. static struct platform_driver qcom_bwmon_driver = {
  1751. .probe = qcom_bwmon_driver_probe,
  1752. .driver = {
  1753. .name = "qcom-bwmon",
  1754. .of_match_table = qcom_bwmon_match_table,
  1755. .suppress_bind_attrs = true,
  1756. },
  1757. };
  1758. module_platform_driver(qcom_bwmon_driver);
  1759. MODULE_DESCRIPTION("QCOM BWMON driver");
  1760. MODULE_LICENSE("GPL");