cpufreq.h 35 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220
  1. /* SPDX-License-Identifier: GPL-2.0-only */
  2. /*
  3. * linux/include/linux/cpufreq.h
  4. *
  5. * Copyright (C) 2001 Russell King
  6. * (C) 2002 - 2003 Dominik Brodowski <[email protected]>
  7. */
  8. #ifndef _LINUX_CPUFREQ_H
  9. #define _LINUX_CPUFREQ_H
  10. #include <linux/clk.h>
  11. #include <linux/cpu.h>
  12. #include <linux/cpumask.h>
  13. #include <linux/completion.h>
  14. #include <linux/kobject.h>
  15. #include <linux/notifier.h>
  16. #include <linux/of.h>
  17. #include <linux/of_device.h>
  18. #include <linux/pm_opp.h>
  19. #include <linux/pm_qos.h>
  20. #include <linux/spinlock.h>
  21. #include <linux/sysfs.h>
  22. /*********************************************************************
  23. * CPUFREQ INTERFACE *
  24. *********************************************************************/
  25. /*
  26. * Frequency values here are CPU kHz
  27. *
  28. * Maximum transition latency is in nanoseconds - if it's unknown,
  29. * CPUFREQ_ETERNAL shall be used.
  30. */
  31. #define CPUFREQ_ETERNAL (-1)
  32. #define CPUFREQ_NAME_LEN 16
  33. /* Print length for names. Extra 1 space for accommodating '\n' in prints */
  34. #define CPUFREQ_NAME_PLEN (CPUFREQ_NAME_LEN + 1)
  35. struct cpufreq_governor;
  36. enum cpufreq_table_sorting {
  37. CPUFREQ_TABLE_UNSORTED,
  38. CPUFREQ_TABLE_SORTED_ASCENDING,
  39. CPUFREQ_TABLE_SORTED_DESCENDING
  40. };
  41. struct cpufreq_cpuinfo {
  42. unsigned int max_freq;
  43. unsigned int min_freq;
  44. /* in 10^(-9) s = nanoseconds */
  45. unsigned int transition_latency;
  46. };
  47. struct cpufreq_policy {
  48. /* CPUs sharing clock, require sw coordination */
  49. cpumask_var_t cpus; /* Online CPUs only */
  50. cpumask_var_t related_cpus; /* Online + Offline CPUs */
  51. cpumask_var_t real_cpus; /* Related and present */
  52. unsigned int shared_type; /* ACPI: ANY or ALL affected CPUs
  53. should set cpufreq */
  54. unsigned int cpu; /* cpu managing this policy, must be online */
  55. struct clk *clk;
  56. struct cpufreq_cpuinfo cpuinfo;/* see above */
  57. unsigned int min; /* in kHz */
  58. unsigned int max; /* in kHz */
  59. unsigned int cur; /* in kHz, only needed if cpufreq
  60. * governors are used */
  61. unsigned int suspend_freq; /* freq to set during suspend */
  62. unsigned int policy; /* see above */
  63. unsigned int last_policy; /* policy before unplug */
  64. struct cpufreq_governor *governor; /* see below */
  65. void *governor_data;
  66. char last_governor[CPUFREQ_NAME_LEN]; /* last governor used */
  67. struct work_struct update; /* if update_policy() needs to be
  68. * called, but you're in IRQ context */
  69. struct freq_constraints constraints;
  70. struct freq_qos_request *min_freq_req;
  71. struct freq_qos_request *max_freq_req;
  72. struct cpufreq_frequency_table *freq_table;
  73. enum cpufreq_table_sorting freq_table_sorted;
  74. struct list_head policy_list;
  75. struct kobject kobj;
  76. struct completion kobj_unregister;
  77. /*
  78. * The rules for this semaphore:
  79. * - Any routine that wants to read from the policy structure will
  80. * do a down_read on this semaphore.
  81. * - Any routine that will write to the policy structure and/or may take away
  82. * the policy altogether (eg. CPU hotplug), will hold this lock in write
  83. * mode before doing so.
  84. */
  85. struct rw_semaphore rwsem;
  86. /*
  87. * Fast switch flags:
  88. * - fast_switch_possible should be set by the driver if it can
  89. * guarantee that frequency can be changed on any CPU sharing the
  90. * policy and that the change will affect all of the policy CPUs then.
  91. * - fast_switch_enabled is to be set by governors that support fast
  92. * frequency switching with the help of cpufreq_enable_fast_switch().
  93. */
  94. bool fast_switch_possible;
  95. bool fast_switch_enabled;
  96. /*
  97. * Set if the CPUFREQ_GOV_STRICT_TARGET flag is set for the current
  98. * governor.
  99. */
  100. bool strict_target;
  101. /*
  102. * Set if inefficient frequencies were found in the frequency table.
  103. * This indicates if the relation flag CPUFREQ_RELATION_E can be
  104. * honored.
  105. */
  106. bool efficiencies_available;
  107. /*
  108. * Preferred average time interval between consecutive invocations of
  109. * the driver to set the frequency for this policy. To be set by the
  110. * scaling driver (0, which is the default, means no preference).
  111. */
  112. unsigned int transition_delay_us;
  113. /*
  114. * Remote DVFS flag (Not added to the driver structure as we don't want
  115. * to access another structure from scheduler hotpath).
  116. *
  117. * Should be set if CPUs can do DVFS on behalf of other CPUs from
  118. * different cpufreq policies.
  119. */
  120. bool dvfs_possible_from_any_cpu;
  121. /* Cached frequency lookup from cpufreq_driver_resolve_freq. */
  122. unsigned int cached_target_freq;
  123. unsigned int cached_resolved_idx;
  124. /* Synchronization for frequency transitions */
  125. bool transition_ongoing; /* Tracks transition status */
  126. spinlock_t transition_lock;
  127. wait_queue_head_t transition_wait;
  128. struct task_struct *transition_task; /* Task which is doing the transition */
  129. /* cpufreq-stats */
  130. struct cpufreq_stats *stats;
  131. /* For cpufreq driver's internal use */
  132. void *driver_data;
  133. /* Pointer to the cooling device if used for thermal mitigation */
  134. struct thermal_cooling_device *cdev;
  135. struct notifier_block nb_min;
  136. struct notifier_block nb_max;
  137. };
  138. /*
  139. * Used for passing new cpufreq policy data to the cpufreq driver's ->verify()
  140. * callback for sanitization. That callback is only expected to modify the min
  141. * and max values, if necessary, and specifically it must not update the
  142. * frequency table.
  143. */
  144. struct cpufreq_policy_data {
  145. struct cpufreq_cpuinfo cpuinfo;
  146. struct cpufreq_frequency_table *freq_table;
  147. unsigned int cpu;
  148. unsigned int min; /* in kHz */
  149. unsigned int max; /* in kHz */
  150. };
  151. struct cpufreq_freqs {
  152. struct cpufreq_policy *policy;
  153. unsigned int old;
  154. unsigned int new;
  155. u8 flags; /* flags of cpufreq_driver, see below. */
  156. };
  157. /* Only for ACPI */
  158. #define CPUFREQ_SHARED_TYPE_NONE (0) /* None */
  159. #define CPUFREQ_SHARED_TYPE_HW (1) /* HW does needed coordination */
  160. #define CPUFREQ_SHARED_TYPE_ALL (2) /* All dependent CPUs should set freq */
  161. #define CPUFREQ_SHARED_TYPE_ANY (3) /* Freq can be set from any dependent CPU*/
  162. #ifdef CONFIG_CPU_FREQ
  163. struct cpufreq_policy *cpufreq_cpu_get_raw(unsigned int cpu);
  164. struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu);
  165. void cpufreq_cpu_put(struct cpufreq_policy *policy);
  166. #else
  167. static inline struct cpufreq_policy *cpufreq_cpu_get_raw(unsigned int cpu)
  168. {
  169. return NULL;
  170. }
  171. static inline struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
  172. {
  173. return NULL;
  174. }
  175. static inline void cpufreq_cpu_put(struct cpufreq_policy *policy) { }
  176. #endif
  177. static inline bool policy_is_inactive(struct cpufreq_policy *policy)
  178. {
  179. return cpumask_empty(policy->cpus);
  180. }
  181. static inline bool policy_is_shared(struct cpufreq_policy *policy)
  182. {
  183. return cpumask_weight(policy->cpus) > 1;
  184. }
  185. #ifdef CONFIG_CPU_FREQ
  186. unsigned int cpufreq_get(unsigned int cpu);
  187. unsigned int cpufreq_quick_get(unsigned int cpu);
  188. unsigned int cpufreq_quick_get_max(unsigned int cpu);
  189. unsigned int cpufreq_get_hw_max_freq(unsigned int cpu);
  190. void disable_cpufreq(void);
  191. u64 get_cpu_idle_time(unsigned int cpu, u64 *wall, int io_busy);
  192. struct cpufreq_policy *cpufreq_cpu_acquire(unsigned int cpu);
  193. void cpufreq_cpu_release(struct cpufreq_policy *policy);
  194. int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu);
  195. void refresh_frequency_limits(struct cpufreq_policy *policy);
  196. void cpufreq_update_policy(unsigned int cpu);
  197. void cpufreq_update_limits(unsigned int cpu);
  198. bool have_governor_per_policy(void);
  199. bool cpufreq_supports_freq_invariance(void);
  200. struct kobject *get_governor_parent_kobj(struct cpufreq_policy *policy);
  201. void cpufreq_enable_fast_switch(struct cpufreq_policy *policy);
  202. void cpufreq_disable_fast_switch(struct cpufreq_policy *policy);
  203. #else
  204. static inline unsigned int cpufreq_get(unsigned int cpu)
  205. {
  206. return 0;
  207. }
  208. static inline unsigned int cpufreq_quick_get(unsigned int cpu)
  209. {
  210. return 0;
  211. }
  212. static inline unsigned int cpufreq_quick_get_max(unsigned int cpu)
  213. {
  214. return 0;
  215. }
  216. static inline unsigned int cpufreq_get_hw_max_freq(unsigned int cpu)
  217. {
  218. return 0;
  219. }
  220. static inline bool cpufreq_supports_freq_invariance(void)
  221. {
  222. return false;
  223. }
  224. static inline void disable_cpufreq(void) { }
  225. #endif
  226. #ifdef CONFIG_CPU_FREQ_STAT
  227. void cpufreq_stats_create_table(struct cpufreq_policy *policy);
  228. void cpufreq_stats_free_table(struct cpufreq_policy *policy);
  229. void cpufreq_stats_record_transition(struct cpufreq_policy *policy,
  230. unsigned int new_freq);
  231. #else
  232. static inline void cpufreq_stats_create_table(struct cpufreq_policy *policy) { }
  233. static inline void cpufreq_stats_free_table(struct cpufreq_policy *policy) { }
  234. static inline void cpufreq_stats_record_transition(struct cpufreq_policy *policy,
  235. unsigned int new_freq) { }
  236. #endif /* CONFIG_CPU_FREQ_STAT */
  237. /*********************************************************************
  238. * CPUFREQ DRIVER INTERFACE *
  239. *********************************************************************/
  240. #define CPUFREQ_RELATION_L 0 /* lowest frequency at or above target */
  241. #define CPUFREQ_RELATION_H 1 /* highest frequency below or at target */
  242. #define CPUFREQ_RELATION_C 2 /* closest frequency to target */
  243. /* relation flags */
  244. #define CPUFREQ_RELATION_E BIT(2) /* Get if possible an efficient frequency */
  245. #define CPUFREQ_RELATION_LE (CPUFREQ_RELATION_L | CPUFREQ_RELATION_E)
  246. #define CPUFREQ_RELATION_HE (CPUFREQ_RELATION_H | CPUFREQ_RELATION_E)
  247. #define CPUFREQ_RELATION_CE (CPUFREQ_RELATION_C | CPUFREQ_RELATION_E)
  248. struct freq_attr {
  249. struct attribute attr;
  250. ssize_t (*show)(struct cpufreq_policy *, char *);
  251. ssize_t (*store)(struct cpufreq_policy *, const char *, size_t count);
  252. };
  253. #define cpufreq_freq_attr_ro(_name) \
  254. static struct freq_attr _name = \
  255. __ATTR(_name, 0444, show_##_name, NULL)
  256. #define cpufreq_freq_attr_ro_perm(_name, _perm) \
  257. static struct freq_attr _name = \
  258. __ATTR(_name, _perm, show_##_name, NULL)
  259. #define cpufreq_freq_attr_rw(_name) \
  260. static struct freq_attr _name = \
  261. __ATTR(_name, 0644, show_##_name, store_##_name)
  262. #define cpufreq_freq_attr_wo(_name) \
  263. static struct freq_attr _name = \
  264. __ATTR(_name, 0200, NULL, store_##_name)
  265. #define define_one_global_ro(_name) \
  266. static struct kobj_attribute _name = \
  267. __ATTR(_name, 0444, show_##_name, NULL)
  268. #define define_one_global_rw(_name) \
  269. static struct kobj_attribute _name = \
  270. __ATTR(_name, 0644, show_##_name, store_##_name)
  271. struct cpufreq_driver {
  272. char name[CPUFREQ_NAME_LEN];
  273. u16 flags;
  274. void *driver_data;
  275. /* needed by all drivers */
  276. int (*init)(struct cpufreq_policy *policy);
  277. int (*verify)(struct cpufreq_policy_data *policy);
  278. /* define one out of two */
  279. int (*setpolicy)(struct cpufreq_policy *policy);
  280. int (*target)(struct cpufreq_policy *policy,
  281. unsigned int target_freq,
  282. unsigned int relation); /* Deprecated */
  283. int (*target_index)(struct cpufreq_policy *policy,
  284. unsigned int index);
  285. unsigned int (*fast_switch)(struct cpufreq_policy *policy,
  286. unsigned int target_freq);
  287. /*
  288. * ->fast_switch() replacement for drivers that use an internal
  289. * representation of performance levels and can pass hints other than
  290. * the target performance level to the hardware.
  291. */
  292. void (*adjust_perf)(unsigned int cpu,
  293. unsigned long min_perf,
  294. unsigned long target_perf,
  295. unsigned long capacity);
  296. /*
  297. * Only for drivers with target_index() and CPUFREQ_ASYNC_NOTIFICATION
  298. * unset.
  299. *
  300. * get_intermediate should return a stable intermediate frequency
  301. * platform wants to switch to and target_intermediate() should set CPU
  302. * to that frequency, before jumping to the frequency corresponding
  303. * to 'index'. Core will take care of sending notifications and driver
  304. * doesn't have to handle them in target_intermediate() or
  305. * target_index().
  306. *
  307. * Drivers can return '0' from get_intermediate() in case they don't
  308. * wish to switch to intermediate frequency for some target frequency.
  309. * In that case core will directly call ->target_index().
  310. */
  311. unsigned int (*get_intermediate)(struct cpufreq_policy *policy,
  312. unsigned int index);
  313. int (*target_intermediate)(struct cpufreq_policy *policy,
  314. unsigned int index);
  315. /* should be defined, if possible */
  316. unsigned int (*get)(unsigned int cpu);
  317. /* Called to update policy limits on firmware notifications. */
  318. void (*update_limits)(unsigned int cpu);
  319. /* optional */
  320. int (*bios_limit)(int cpu, unsigned int *limit);
  321. int (*online)(struct cpufreq_policy *policy);
  322. int (*offline)(struct cpufreq_policy *policy);
  323. int (*exit)(struct cpufreq_policy *policy);
  324. int (*suspend)(struct cpufreq_policy *policy);
  325. int (*resume)(struct cpufreq_policy *policy);
  326. /* Will be called after the driver is fully initialized */
  327. void (*ready)(struct cpufreq_policy *policy);
  328. struct freq_attr **attr;
  329. /* platform specific boost support code */
  330. bool boost_enabled;
  331. int (*set_boost)(struct cpufreq_policy *policy, int state);
  332. /*
  333. * Set by drivers that want to register with the energy model after the
  334. * policy is properly initialized, but before the governor is started.
  335. */
  336. void (*register_em)(struct cpufreq_policy *policy);
  337. };
  338. /* flags */
  339. /*
  340. * Set by drivers that need to update internal upper and lower boundaries along
  341. * with the target frequency and so the core and governors should also invoke
  342. * the diver if the target frequency does not change, but the policy min or max
  343. * may have changed.
  344. */
  345. #define CPUFREQ_NEED_UPDATE_LIMITS BIT(0)
  346. /* loops_per_jiffy or other kernel "constants" aren't affected by frequency transitions */
  347. #define CPUFREQ_CONST_LOOPS BIT(1)
  348. /*
  349. * Set by drivers that want the core to automatically register the cpufreq
  350. * driver as a thermal cooling device.
  351. */
  352. #define CPUFREQ_IS_COOLING_DEV BIT(2)
  353. /*
  354. * This should be set by platforms having multiple clock-domains, i.e.
  355. * supporting multiple policies. With this sysfs directories of governor would
  356. * be created in cpu/cpu<num>/cpufreq/ directory and so they can use the same
  357. * governor with different tunables for different clusters.
  358. */
  359. #define CPUFREQ_HAVE_GOVERNOR_PER_POLICY BIT(3)
  360. /*
  361. * Driver will do POSTCHANGE notifications from outside of their ->target()
  362. * routine and so must set cpufreq_driver->flags with this flag, so that core
  363. * can handle them specially.
  364. */
  365. #define CPUFREQ_ASYNC_NOTIFICATION BIT(4)
  366. /*
  367. * Set by drivers which want cpufreq core to check if CPU is running at a
  368. * frequency present in freq-table exposed by the driver. For these drivers if
  369. * CPU is found running at an out of table freq, we will try to set it to a freq
  370. * from the table. And if that fails, we will stop further boot process by
  371. * issuing a BUG_ON().
  372. */
  373. #define CPUFREQ_NEED_INITIAL_FREQ_CHECK BIT(5)
  374. /*
  375. * Set by drivers to disallow use of governors with "dynamic_switching" flag
  376. * set.
  377. */
  378. #define CPUFREQ_NO_AUTO_DYNAMIC_SWITCHING BIT(6)
  379. int cpufreq_register_driver(struct cpufreq_driver *driver_data);
  380. int cpufreq_unregister_driver(struct cpufreq_driver *driver_data);
  381. bool cpufreq_driver_test_flags(u16 flags);
  382. const char *cpufreq_get_current_driver(void);
  383. void *cpufreq_get_driver_data(void);
  384. static inline int cpufreq_thermal_control_enabled(struct cpufreq_driver *drv)
  385. {
  386. return IS_ENABLED(CONFIG_CPU_THERMAL) &&
  387. (drv->flags & CPUFREQ_IS_COOLING_DEV);
  388. }
  389. static inline void cpufreq_verify_within_limits(struct cpufreq_policy_data *policy,
  390. unsigned int min,
  391. unsigned int max)
  392. {
  393. if (policy->min < min)
  394. policy->min = min;
  395. if (policy->max < min)
  396. policy->max = min;
  397. if (policy->min > max)
  398. policy->min = max;
  399. if (policy->max > max)
  400. policy->max = max;
  401. if (policy->min > policy->max)
  402. policy->min = policy->max;
  403. return;
  404. }
  405. static inline void
  406. cpufreq_verify_within_cpu_limits(struct cpufreq_policy_data *policy)
  407. {
  408. cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq,
  409. policy->cpuinfo.max_freq);
  410. }
  411. #ifdef CONFIG_CPU_FREQ
  412. void cpufreq_suspend(void);
  413. void cpufreq_resume(void);
  414. int cpufreq_generic_suspend(struct cpufreq_policy *policy);
  415. #else
  416. static inline void cpufreq_suspend(void) {}
  417. static inline void cpufreq_resume(void) {}
  418. #endif
  419. /*********************************************************************
  420. * CPUFREQ NOTIFIER INTERFACE *
  421. *********************************************************************/
  422. #define CPUFREQ_TRANSITION_NOTIFIER (0)
  423. #define CPUFREQ_POLICY_NOTIFIER (1)
  424. /* Transition notifiers */
  425. #define CPUFREQ_PRECHANGE (0)
  426. #define CPUFREQ_POSTCHANGE (1)
  427. /* Policy Notifiers */
  428. #define CPUFREQ_CREATE_POLICY (0)
  429. #define CPUFREQ_REMOVE_POLICY (1)
  430. #ifdef CONFIG_CPU_FREQ
  431. int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list);
  432. int cpufreq_unregister_notifier(struct notifier_block *nb, unsigned int list);
  433. void cpufreq_freq_transition_begin(struct cpufreq_policy *policy,
  434. struct cpufreq_freqs *freqs);
  435. void cpufreq_freq_transition_end(struct cpufreq_policy *policy,
  436. struct cpufreq_freqs *freqs, int transition_failed);
  437. #else /* CONFIG_CPU_FREQ */
  438. static inline int cpufreq_register_notifier(struct notifier_block *nb,
  439. unsigned int list)
  440. {
  441. return 0;
  442. }
  443. static inline int cpufreq_unregister_notifier(struct notifier_block *nb,
  444. unsigned int list)
  445. {
  446. return 0;
  447. }
  448. #endif /* !CONFIG_CPU_FREQ */
  449. /**
  450. * cpufreq_scale - "old * mult / div" calculation for large values (32-bit-arch
  451. * safe)
  452. * @old: old value
  453. * @div: divisor
  454. * @mult: multiplier
  455. *
  456. *
  457. * new = old * mult / div
  458. */
  459. static inline unsigned long cpufreq_scale(unsigned long old, u_int div,
  460. u_int mult)
  461. {
  462. #if BITS_PER_LONG == 32
  463. u64 result = ((u64) old) * ((u64) mult);
  464. do_div(result, div);
  465. return (unsigned long) result;
  466. #elif BITS_PER_LONG == 64
  467. unsigned long result = old * ((u64) mult);
  468. result /= div;
  469. return result;
  470. #endif
  471. }
  472. /*********************************************************************
  473. * CPUFREQ GOVERNORS *
  474. *********************************************************************/
  475. #define CPUFREQ_POLICY_UNKNOWN (0)
  476. /*
  477. * If (cpufreq_driver->target) exists, the ->governor decides what frequency
  478. * within the limits is used. If (cpufreq_driver->setpolicy> exists, these
  479. * two generic policies are available:
  480. */
  481. #define CPUFREQ_POLICY_POWERSAVE (1)
  482. #define CPUFREQ_POLICY_PERFORMANCE (2)
  483. /*
  484. * The polling frequency depends on the capability of the processor. Default
  485. * polling frequency is 1000 times the transition latency of the processor. The
  486. * ondemand governor will work on any processor with transition latency <= 10ms,
  487. * using appropriate sampling rate.
  488. */
  489. #define LATENCY_MULTIPLIER (1000)
  490. struct cpufreq_governor {
  491. char name[CPUFREQ_NAME_LEN];
  492. int (*init)(struct cpufreq_policy *policy);
  493. void (*exit)(struct cpufreq_policy *policy);
  494. int (*start)(struct cpufreq_policy *policy);
  495. void (*stop)(struct cpufreq_policy *policy);
  496. void (*limits)(struct cpufreq_policy *policy);
  497. ssize_t (*show_setspeed) (struct cpufreq_policy *policy,
  498. char *buf);
  499. int (*store_setspeed) (struct cpufreq_policy *policy,
  500. unsigned int freq);
  501. struct list_head governor_list;
  502. struct module *owner;
  503. u8 flags;
  504. };
  505. /* Governor flags */
  506. /* For governors which change frequency dynamically by themselves */
  507. #define CPUFREQ_GOV_DYNAMIC_SWITCHING BIT(0)
  508. /* For governors wanting the target frequency to be set exactly */
  509. #define CPUFREQ_GOV_STRICT_TARGET BIT(1)
  510. /* Pass a target to the cpufreq driver */
  511. unsigned int cpufreq_driver_fast_switch(struct cpufreq_policy *policy,
  512. unsigned int target_freq);
  513. void cpufreq_driver_adjust_perf(unsigned int cpu,
  514. unsigned long min_perf,
  515. unsigned long target_perf,
  516. unsigned long capacity);
  517. bool cpufreq_driver_has_adjust_perf(void);
  518. int cpufreq_driver_target(struct cpufreq_policy *policy,
  519. unsigned int target_freq,
  520. unsigned int relation);
  521. int __cpufreq_driver_target(struct cpufreq_policy *policy,
  522. unsigned int target_freq,
  523. unsigned int relation);
  524. unsigned int cpufreq_driver_resolve_freq(struct cpufreq_policy *policy,
  525. unsigned int target_freq);
  526. unsigned int cpufreq_policy_transition_delay_us(struct cpufreq_policy *policy);
  527. int cpufreq_register_governor(struct cpufreq_governor *governor);
  528. void cpufreq_unregister_governor(struct cpufreq_governor *governor);
  529. int cpufreq_start_governor(struct cpufreq_policy *policy);
  530. void cpufreq_stop_governor(struct cpufreq_policy *policy);
  531. #define cpufreq_governor_init(__governor) \
  532. static int __init __governor##_init(void) \
  533. { \
  534. return cpufreq_register_governor(&__governor); \
  535. } \
  536. core_initcall(__governor##_init)
  537. #define cpufreq_governor_exit(__governor) \
  538. static void __exit __governor##_exit(void) \
  539. { \
  540. return cpufreq_unregister_governor(&__governor); \
  541. } \
  542. module_exit(__governor##_exit)
  543. struct cpufreq_governor *cpufreq_default_governor(void);
  544. struct cpufreq_governor *cpufreq_fallback_governor(void);
  545. static inline void cpufreq_policy_apply_limits(struct cpufreq_policy *policy)
  546. {
  547. if (policy->max < policy->cur)
  548. __cpufreq_driver_target(policy, policy->max,
  549. CPUFREQ_RELATION_HE);
  550. else if (policy->min > policy->cur)
  551. __cpufreq_driver_target(policy, policy->min,
  552. CPUFREQ_RELATION_LE);
  553. }
  554. /* Governor attribute set */
  555. struct gov_attr_set {
  556. struct kobject kobj;
  557. struct list_head policy_list;
  558. struct mutex update_lock;
  559. int usage_count;
  560. };
  561. /* sysfs ops for cpufreq governors */
  562. extern const struct sysfs_ops governor_sysfs_ops;
  563. static inline struct gov_attr_set *to_gov_attr_set(struct kobject *kobj)
  564. {
  565. return container_of(kobj, struct gov_attr_set, kobj);
  566. }
  567. void gov_attr_set_init(struct gov_attr_set *attr_set, struct list_head *list_node);
  568. void gov_attr_set_get(struct gov_attr_set *attr_set, struct list_head *list_node);
  569. unsigned int gov_attr_set_put(struct gov_attr_set *attr_set, struct list_head *list_node);
  570. /* Governor sysfs attribute */
  571. struct governor_attr {
  572. struct attribute attr;
  573. ssize_t (*show)(struct gov_attr_set *attr_set, char *buf);
  574. ssize_t (*store)(struct gov_attr_set *attr_set, const char *buf,
  575. size_t count);
  576. };
  577. /*********************************************************************
  578. * FREQUENCY TABLE HELPERS *
  579. *********************************************************************/
  580. /* Special Values of .frequency field */
  581. #define CPUFREQ_ENTRY_INVALID ~0u
  582. #define CPUFREQ_TABLE_END ~1u
  583. /* Special Values of .flags field */
  584. #define CPUFREQ_BOOST_FREQ (1 << 0)
  585. #define CPUFREQ_INEFFICIENT_FREQ (1 << 1)
  586. struct cpufreq_frequency_table {
  587. unsigned int flags;
  588. unsigned int driver_data; /* driver specific data, not used by core */
  589. unsigned int frequency; /* kHz - doesn't need to be in ascending
  590. * order */
  591. };
  592. #if defined(CONFIG_CPU_FREQ) && defined(CONFIG_PM_OPP)
  593. int dev_pm_opp_init_cpufreq_table(struct device *dev,
  594. struct cpufreq_frequency_table **table);
  595. void dev_pm_opp_free_cpufreq_table(struct device *dev,
  596. struct cpufreq_frequency_table **table);
  597. #else
  598. static inline int dev_pm_opp_init_cpufreq_table(struct device *dev,
  599. struct cpufreq_frequency_table
  600. **table)
  601. {
  602. return -EINVAL;
  603. }
  604. static inline void dev_pm_opp_free_cpufreq_table(struct device *dev,
  605. struct cpufreq_frequency_table
  606. **table)
  607. {
  608. }
  609. #endif
  610. /*
  611. * cpufreq_for_each_entry - iterate over a cpufreq_frequency_table
  612. * @pos: the cpufreq_frequency_table * to use as a loop cursor.
  613. * @table: the cpufreq_frequency_table * to iterate over.
  614. */
  615. #define cpufreq_for_each_entry(pos, table) \
  616. for (pos = table; pos->frequency != CPUFREQ_TABLE_END; pos++)
  617. /*
  618. * cpufreq_for_each_entry_idx - iterate over a cpufreq_frequency_table
  619. * with index
  620. * @pos: the cpufreq_frequency_table * to use as a loop cursor.
  621. * @table: the cpufreq_frequency_table * to iterate over.
  622. * @idx: the table entry currently being processed
  623. */
  624. #define cpufreq_for_each_entry_idx(pos, table, idx) \
  625. for (pos = table, idx = 0; pos->frequency != CPUFREQ_TABLE_END; \
  626. pos++, idx++)
  627. /*
  628. * cpufreq_for_each_valid_entry - iterate over a cpufreq_frequency_table
  629. * excluding CPUFREQ_ENTRY_INVALID frequencies.
  630. * @pos: the cpufreq_frequency_table * to use as a loop cursor.
  631. * @table: the cpufreq_frequency_table * to iterate over.
  632. */
  633. #define cpufreq_for_each_valid_entry(pos, table) \
  634. for (pos = table; pos->frequency != CPUFREQ_TABLE_END; pos++) \
  635. if (pos->frequency == CPUFREQ_ENTRY_INVALID) \
  636. continue; \
  637. else
  638. /*
  639. * cpufreq_for_each_valid_entry_idx - iterate with index over a cpufreq
  640. * frequency_table excluding CPUFREQ_ENTRY_INVALID frequencies.
  641. * @pos: the cpufreq_frequency_table * to use as a loop cursor.
  642. * @table: the cpufreq_frequency_table * to iterate over.
  643. * @idx: the table entry currently being processed
  644. */
  645. #define cpufreq_for_each_valid_entry_idx(pos, table, idx) \
  646. cpufreq_for_each_entry_idx(pos, table, idx) \
  647. if (pos->frequency == CPUFREQ_ENTRY_INVALID) \
  648. continue; \
  649. else
  650. /**
  651. * cpufreq_for_each_efficient_entry_idx - iterate with index over a cpufreq
  652. * frequency_table excluding CPUFREQ_ENTRY_INVALID and
  653. * CPUFREQ_INEFFICIENT_FREQ frequencies.
  654. * @pos: the &struct cpufreq_frequency_table to use as a loop cursor.
  655. * @table: the &struct cpufreq_frequency_table to iterate over.
  656. * @idx: the table entry currently being processed.
  657. * @efficiencies: set to true to only iterate over efficient frequencies.
  658. */
  659. #define cpufreq_for_each_efficient_entry_idx(pos, table, idx, efficiencies) \
  660. cpufreq_for_each_valid_entry_idx(pos, table, idx) \
  661. if (efficiencies && (pos->flags & CPUFREQ_INEFFICIENT_FREQ)) \
  662. continue; \
  663. else
  664. int cpufreq_frequency_table_cpuinfo(struct cpufreq_policy *policy,
  665. struct cpufreq_frequency_table *table);
  666. int cpufreq_frequency_table_verify(struct cpufreq_policy_data *policy,
  667. struct cpufreq_frequency_table *table);
  668. int cpufreq_generic_frequency_table_verify(struct cpufreq_policy_data *policy);
  669. int cpufreq_table_index_unsorted(struct cpufreq_policy *policy,
  670. unsigned int target_freq,
  671. unsigned int relation);
  672. int cpufreq_frequency_table_get_index(struct cpufreq_policy *policy,
  673. unsigned int freq);
  674. ssize_t cpufreq_show_cpus(const struct cpumask *mask, char *buf);
  675. #ifdef CONFIG_CPU_FREQ
  676. int cpufreq_boost_trigger_state(int state);
  677. int cpufreq_boost_enabled(void);
  678. int cpufreq_enable_boost_support(void);
  679. bool policy_has_boost_freq(struct cpufreq_policy *policy);
  680. /* Find lowest freq at or above target in a table in ascending order */
  681. static inline int cpufreq_table_find_index_al(struct cpufreq_policy *policy,
  682. unsigned int target_freq,
  683. bool efficiencies)
  684. {
  685. struct cpufreq_frequency_table *table = policy->freq_table;
  686. struct cpufreq_frequency_table *pos;
  687. unsigned int freq;
  688. int idx, best = -1;
  689. cpufreq_for_each_efficient_entry_idx(pos, table, idx, efficiencies) {
  690. freq = pos->frequency;
  691. if (freq >= target_freq)
  692. return idx;
  693. best = idx;
  694. }
  695. return best;
  696. }
  697. /* Find lowest freq at or above target in a table in descending order */
  698. static inline int cpufreq_table_find_index_dl(struct cpufreq_policy *policy,
  699. unsigned int target_freq,
  700. bool efficiencies)
  701. {
  702. struct cpufreq_frequency_table *table = policy->freq_table;
  703. struct cpufreq_frequency_table *pos;
  704. unsigned int freq;
  705. int idx, best = -1;
  706. cpufreq_for_each_efficient_entry_idx(pos, table, idx, efficiencies) {
  707. freq = pos->frequency;
  708. if (freq == target_freq)
  709. return idx;
  710. if (freq > target_freq) {
  711. best = idx;
  712. continue;
  713. }
  714. /* No freq found above target_freq */
  715. if (best == -1)
  716. return idx;
  717. return best;
  718. }
  719. return best;
  720. }
  721. /* Works only on sorted freq-tables */
  722. static inline int cpufreq_table_find_index_l(struct cpufreq_policy *policy,
  723. unsigned int target_freq,
  724. bool efficiencies)
  725. {
  726. target_freq = clamp_val(target_freq, policy->min, policy->max);
  727. if (policy->freq_table_sorted == CPUFREQ_TABLE_SORTED_ASCENDING)
  728. return cpufreq_table_find_index_al(policy, target_freq,
  729. efficiencies);
  730. else
  731. return cpufreq_table_find_index_dl(policy, target_freq,
  732. efficiencies);
  733. }
  734. /* Find highest freq at or below target in a table in ascending order */
  735. static inline int cpufreq_table_find_index_ah(struct cpufreq_policy *policy,
  736. unsigned int target_freq,
  737. bool efficiencies)
  738. {
  739. struct cpufreq_frequency_table *table = policy->freq_table;
  740. struct cpufreq_frequency_table *pos;
  741. unsigned int freq;
  742. int idx, best = -1;
  743. cpufreq_for_each_efficient_entry_idx(pos, table, idx, efficiencies) {
  744. freq = pos->frequency;
  745. if (freq == target_freq)
  746. return idx;
  747. if (freq < target_freq) {
  748. best = idx;
  749. continue;
  750. }
  751. /* No freq found below target_freq */
  752. if (best == -1)
  753. return idx;
  754. return best;
  755. }
  756. return best;
  757. }
  758. /* Find highest freq at or below target in a table in descending order */
  759. static inline int cpufreq_table_find_index_dh(struct cpufreq_policy *policy,
  760. unsigned int target_freq,
  761. bool efficiencies)
  762. {
  763. struct cpufreq_frequency_table *table = policy->freq_table;
  764. struct cpufreq_frequency_table *pos;
  765. unsigned int freq;
  766. int idx, best = -1;
  767. cpufreq_for_each_efficient_entry_idx(pos, table, idx, efficiencies) {
  768. freq = pos->frequency;
  769. if (freq <= target_freq)
  770. return idx;
  771. best = idx;
  772. }
  773. return best;
  774. }
  775. /* Works only on sorted freq-tables */
  776. static inline int cpufreq_table_find_index_h(struct cpufreq_policy *policy,
  777. unsigned int target_freq,
  778. bool efficiencies)
  779. {
  780. target_freq = clamp_val(target_freq, policy->min, policy->max);
  781. if (policy->freq_table_sorted == CPUFREQ_TABLE_SORTED_ASCENDING)
  782. return cpufreq_table_find_index_ah(policy, target_freq,
  783. efficiencies);
  784. else
  785. return cpufreq_table_find_index_dh(policy, target_freq,
  786. efficiencies);
  787. }
  788. /* Find closest freq to target in a table in ascending order */
  789. static inline int cpufreq_table_find_index_ac(struct cpufreq_policy *policy,
  790. unsigned int target_freq,
  791. bool efficiencies)
  792. {
  793. struct cpufreq_frequency_table *table = policy->freq_table;
  794. struct cpufreq_frequency_table *pos;
  795. unsigned int freq;
  796. int idx, best = -1;
  797. cpufreq_for_each_efficient_entry_idx(pos, table, idx, efficiencies) {
  798. freq = pos->frequency;
  799. if (freq == target_freq)
  800. return idx;
  801. if (freq < target_freq) {
  802. best = idx;
  803. continue;
  804. }
  805. /* No freq found below target_freq */
  806. if (best == -1)
  807. return idx;
  808. /* Choose the closest freq */
  809. if (target_freq - table[best].frequency > freq - target_freq)
  810. return idx;
  811. return best;
  812. }
  813. return best;
  814. }
  815. /* Find closest freq to target in a table in descending order */
  816. static inline int cpufreq_table_find_index_dc(struct cpufreq_policy *policy,
  817. unsigned int target_freq,
  818. bool efficiencies)
  819. {
  820. struct cpufreq_frequency_table *table = policy->freq_table;
  821. struct cpufreq_frequency_table *pos;
  822. unsigned int freq;
  823. int idx, best = -1;
  824. cpufreq_for_each_efficient_entry_idx(pos, table, idx, efficiencies) {
  825. freq = pos->frequency;
  826. if (freq == target_freq)
  827. return idx;
  828. if (freq > target_freq) {
  829. best = idx;
  830. continue;
  831. }
  832. /* No freq found above target_freq */
  833. if (best == -1)
  834. return idx;
  835. /* Choose the closest freq */
  836. if (table[best].frequency - target_freq > target_freq - freq)
  837. return idx;
  838. return best;
  839. }
  840. return best;
  841. }
  842. /* Works only on sorted freq-tables */
  843. static inline int cpufreq_table_find_index_c(struct cpufreq_policy *policy,
  844. unsigned int target_freq,
  845. bool efficiencies)
  846. {
  847. target_freq = clamp_val(target_freq, policy->min, policy->max);
  848. if (policy->freq_table_sorted == CPUFREQ_TABLE_SORTED_ASCENDING)
  849. return cpufreq_table_find_index_ac(policy, target_freq,
  850. efficiencies);
  851. else
  852. return cpufreq_table_find_index_dc(policy, target_freq,
  853. efficiencies);
  854. }
  855. static inline int cpufreq_frequency_table_target(struct cpufreq_policy *policy,
  856. unsigned int target_freq,
  857. unsigned int relation)
  858. {
  859. bool efficiencies = policy->efficiencies_available &&
  860. (relation & CPUFREQ_RELATION_E);
  861. int idx;
  862. /* cpufreq_table_index_unsorted() has no use for this flag anyway */
  863. relation &= ~CPUFREQ_RELATION_E;
  864. if (unlikely(policy->freq_table_sorted == CPUFREQ_TABLE_UNSORTED))
  865. return cpufreq_table_index_unsorted(policy, target_freq,
  866. relation);
  867. retry:
  868. switch (relation) {
  869. case CPUFREQ_RELATION_L:
  870. idx = cpufreq_table_find_index_l(policy, target_freq,
  871. efficiencies);
  872. break;
  873. case CPUFREQ_RELATION_H:
  874. idx = cpufreq_table_find_index_h(policy, target_freq,
  875. efficiencies);
  876. break;
  877. case CPUFREQ_RELATION_C:
  878. idx = cpufreq_table_find_index_c(policy, target_freq,
  879. efficiencies);
  880. break;
  881. default:
  882. WARN_ON_ONCE(1);
  883. return 0;
  884. }
  885. if (idx < 0 && efficiencies) {
  886. efficiencies = false;
  887. goto retry;
  888. }
  889. return idx;
  890. }
  891. static inline int cpufreq_table_count_valid_entries(const struct cpufreq_policy *policy)
  892. {
  893. struct cpufreq_frequency_table *pos;
  894. int count = 0;
  895. if (unlikely(!policy->freq_table))
  896. return 0;
  897. cpufreq_for_each_valid_entry(pos, policy->freq_table)
  898. count++;
  899. return count;
  900. }
  901. /**
  902. * cpufreq_table_set_inefficient() - Mark a frequency as inefficient
  903. * @policy: the &struct cpufreq_policy containing the inefficient frequency
  904. * @frequency: the inefficient frequency
  905. *
  906. * The &struct cpufreq_policy must use a sorted frequency table
  907. *
  908. * Return: %0 on success or a negative errno code
  909. */
  910. static inline int
  911. cpufreq_table_set_inefficient(struct cpufreq_policy *policy,
  912. unsigned int frequency)
  913. {
  914. struct cpufreq_frequency_table *pos;
  915. /* Not supported */
  916. if (policy->freq_table_sorted == CPUFREQ_TABLE_UNSORTED)
  917. return -EINVAL;
  918. cpufreq_for_each_valid_entry(pos, policy->freq_table) {
  919. if (pos->frequency == frequency) {
  920. pos->flags |= CPUFREQ_INEFFICIENT_FREQ;
  921. policy->efficiencies_available = true;
  922. return 0;
  923. }
  924. }
  925. return -EINVAL;
  926. }
  927. static inline int parse_perf_domain(int cpu, const char *list_name,
  928. const char *cell_name)
  929. {
  930. struct device_node *cpu_np;
  931. struct of_phandle_args args;
  932. int ret;
  933. cpu_np = of_cpu_device_node_get(cpu);
  934. if (!cpu_np)
  935. return -ENODEV;
  936. ret = of_parse_phandle_with_args(cpu_np, list_name, cell_name, 0,
  937. &args);
  938. if (ret < 0)
  939. return ret;
  940. of_node_put(cpu_np);
  941. return args.args[0];
  942. }
  943. static inline int of_perf_domain_get_sharing_cpumask(int pcpu, const char *list_name,
  944. const char *cell_name, struct cpumask *cpumask)
  945. {
  946. int target_idx;
  947. int cpu, ret;
  948. ret = parse_perf_domain(pcpu, list_name, cell_name);
  949. if (ret < 0)
  950. return ret;
  951. target_idx = ret;
  952. cpumask_set_cpu(pcpu, cpumask);
  953. for_each_possible_cpu(cpu) {
  954. if (cpu == pcpu)
  955. continue;
  956. ret = parse_perf_domain(cpu, list_name, cell_name);
  957. if (ret < 0)
  958. continue;
  959. if (target_idx == ret)
  960. cpumask_set_cpu(cpu, cpumask);
  961. }
  962. return target_idx;
  963. }
  964. #else
  965. static inline int cpufreq_boost_trigger_state(int state)
  966. {
  967. return 0;
  968. }
  969. static inline int cpufreq_boost_enabled(void)
  970. {
  971. return 0;
  972. }
  973. static inline int cpufreq_enable_boost_support(void)
  974. {
  975. return -EINVAL;
  976. }
  977. static inline bool policy_has_boost_freq(struct cpufreq_policy *policy)
  978. {
  979. return false;
  980. }
  981. static inline int
  982. cpufreq_table_set_inefficient(struct cpufreq_policy *policy,
  983. unsigned int frequency)
  984. {
  985. return -EINVAL;
  986. }
  987. static inline int of_perf_domain_get_sharing_cpumask(int pcpu, const char *list_name,
  988. const char *cell_name, struct cpumask *cpumask)
  989. {
  990. return -EOPNOTSUPP;
  991. }
  992. #endif
  993. extern unsigned int arch_freq_get_on_cpu(int cpu);
  994. #ifndef arch_set_freq_scale
  995. static __always_inline
  996. void arch_set_freq_scale(const struct cpumask *cpus,
  997. unsigned long cur_freq,
  998. unsigned long max_freq)
  999. {
  1000. }
  1001. #endif
  1002. /* the following are really really optional */
  1003. extern struct freq_attr cpufreq_freq_attr_scaling_available_freqs;
  1004. extern struct freq_attr cpufreq_freq_attr_scaling_boost_freqs;
  1005. extern struct freq_attr *cpufreq_generic_attr[];
  1006. int cpufreq_table_validate_and_sort(struct cpufreq_policy *policy);
  1007. unsigned int cpufreq_generic_get(unsigned int cpu);
  1008. void cpufreq_generic_init(struct cpufreq_policy *policy,
  1009. struct cpufreq_frequency_table *table,
  1010. unsigned int transition_latency);
  1011. static inline void cpufreq_register_em_with_opp(struct cpufreq_policy *policy)
  1012. {
  1013. dev_pm_opp_of_register_em(get_cpu_device(policy->cpu),
  1014. policy->related_cpus);
  1015. }
  1016. #endif /* _LINUX_CPUFREQ_H */