hrtimer.c 66 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright(C) 2005-2006, Thomas Gleixner <[email protected]>
  4. * Copyright(C) 2005-2007, Red Hat, Inc., Ingo Molnar
  5. * Copyright(C) 2006-2007 Timesys Corp., Thomas Gleixner
  6. *
  7. * High-resolution kernel timers
  8. *
  9. * In contrast to the low-resolution timeout API, aka timer wheel,
  10. * hrtimers provide finer resolution and accuracy depending on system
  11. * configuration and capabilities.
  12. *
  13. * Started by: Thomas Gleixner and Ingo Molnar
  14. *
  15. * Credits:
  16. * Based on the original timer wheel code
  17. *
  18. * Help, testing, suggestions, bugfixes, improvements were
  19. * provided by:
  20. *
  21. * George Anzinger, Andrew Morton, Steven Rostedt, Roman Zippel
  22. * et. al.
  23. */
  24. #include <linux/cpu.h>
  25. #include <linux/export.h>
  26. #include <linux/percpu.h>
  27. #include <linux/hrtimer.h>
  28. #include <linux/notifier.h>
  29. #include <linux/syscalls.h>
  30. #include <linux/interrupt.h>
  31. #include <linux/tick.h>
  32. #include <linux/err.h>
  33. #include <linux/debugobjects.h>
  34. #include <linux/sched/signal.h>
  35. #include <linux/sched/sysctl.h>
  36. #include <linux/sched/rt.h>
  37. #include <linux/sched/deadline.h>
  38. #include <linux/sched/nohz.h>
  39. #include <linux/sched/debug.h>
  40. #include <linux/timer.h>
  41. #include <linux/freezer.h>
  42. #include <linux/compat.h>
  43. #include <linux/uaccess.h>
  44. #include <trace/events/timer.h>
  45. #include "tick-internal.h"
  46. /*
  47. * Masks for selecting the soft and hard context timers from
  48. * cpu_base->active
  49. */
  50. #define MASK_SHIFT (HRTIMER_BASE_MONOTONIC_SOFT)
  51. #define HRTIMER_ACTIVE_HARD ((1U << MASK_SHIFT) - 1)
  52. #define HRTIMER_ACTIVE_SOFT (HRTIMER_ACTIVE_HARD << MASK_SHIFT)
  53. #define HRTIMER_ACTIVE_ALL (HRTIMER_ACTIVE_SOFT | HRTIMER_ACTIVE_HARD)
  54. /*
  55. * The timer bases:
  56. *
  57. * There are more clockids than hrtimer bases. Thus, we index
  58. * into the timer bases by the hrtimer_base_type enum. When trying
  59. * to reach a base using a clockid, hrtimer_clockid_to_base()
  60. * is used to convert from clockid to the proper hrtimer_base_type.
  61. */
  62. DEFINE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases) =
  63. {
  64. .lock = __RAW_SPIN_LOCK_UNLOCKED(hrtimer_bases.lock),
  65. .clock_base =
  66. {
  67. {
  68. .index = HRTIMER_BASE_MONOTONIC,
  69. .clockid = CLOCK_MONOTONIC,
  70. .get_time = &ktime_get,
  71. },
  72. {
  73. .index = HRTIMER_BASE_REALTIME,
  74. .clockid = CLOCK_REALTIME,
  75. .get_time = &ktime_get_real,
  76. },
  77. {
  78. .index = HRTIMER_BASE_BOOTTIME,
  79. .clockid = CLOCK_BOOTTIME,
  80. .get_time = &ktime_get_boottime,
  81. },
  82. {
  83. .index = HRTIMER_BASE_TAI,
  84. .clockid = CLOCK_TAI,
  85. .get_time = &ktime_get_clocktai,
  86. },
  87. {
  88. .index = HRTIMER_BASE_MONOTONIC_SOFT,
  89. .clockid = CLOCK_MONOTONIC,
  90. .get_time = &ktime_get,
  91. },
  92. {
  93. .index = HRTIMER_BASE_REALTIME_SOFT,
  94. .clockid = CLOCK_REALTIME,
  95. .get_time = &ktime_get_real,
  96. },
  97. {
  98. .index = HRTIMER_BASE_BOOTTIME_SOFT,
  99. .clockid = CLOCK_BOOTTIME,
  100. .get_time = &ktime_get_boottime,
  101. },
  102. {
  103. .index = HRTIMER_BASE_TAI_SOFT,
  104. .clockid = CLOCK_TAI,
  105. .get_time = &ktime_get_clocktai,
  106. },
  107. }
  108. };
  109. static const int hrtimer_clock_to_base_table[MAX_CLOCKS] = {
  110. /* Make sure we catch unsupported clockids */
  111. [0 ... MAX_CLOCKS - 1] = HRTIMER_MAX_CLOCK_BASES,
  112. [CLOCK_REALTIME] = HRTIMER_BASE_REALTIME,
  113. [CLOCK_MONOTONIC] = HRTIMER_BASE_MONOTONIC,
  114. [CLOCK_BOOTTIME] = HRTIMER_BASE_BOOTTIME,
  115. [CLOCK_TAI] = HRTIMER_BASE_TAI,
  116. };
  117. /*
  118. * Functions and macros which are different for UP/SMP systems are kept in a
  119. * single place
  120. */
  121. #ifdef CONFIG_SMP
  122. /*
  123. * We require the migration_base for lock_hrtimer_base()/switch_hrtimer_base()
  124. * such that hrtimer_callback_running() can unconditionally dereference
  125. * timer->base->cpu_base
  126. */
  127. static struct hrtimer_cpu_base migration_cpu_base = {
  128. .clock_base = { {
  129. .cpu_base = &migration_cpu_base,
  130. .seq = SEQCNT_RAW_SPINLOCK_ZERO(migration_cpu_base.seq,
  131. &migration_cpu_base.lock),
  132. }, },
  133. };
  134. #define migration_base migration_cpu_base.clock_base[0]
  135. static inline bool is_migration_base(struct hrtimer_clock_base *base)
  136. {
  137. return base == &migration_base;
  138. }
  139. /*
  140. * We are using hashed locking: holding per_cpu(hrtimer_bases)[n].lock
  141. * means that all timers which are tied to this base via timer->base are
  142. * locked, and the base itself is locked too.
  143. *
  144. * So __run_timers/migrate_timers can safely modify all timers which could
  145. * be found on the lists/queues.
  146. *
  147. * When the timer's base is locked, and the timer removed from list, it is
  148. * possible to set timer->base = &migration_base and drop the lock: the timer
  149. * remains locked.
  150. */
  151. static
  152. struct hrtimer_clock_base *lock_hrtimer_base(const struct hrtimer *timer,
  153. unsigned long *flags)
  154. {
  155. struct hrtimer_clock_base *base;
  156. for (;;) {
  157. base = READ_ONCE(timer->base);
  158. if (likely(base != &migration_base)) {
  159. raw_spin_lock_irqsave(&base->cpu_base->lock, *flags);
  160. if (likely(base == timer->base))
  161. return base;
  162. /* The timer has migrated to another CPU: */
  163. raw_spin_unlock_irqrestore(&base->cpu_base->lock, *flags);
  164. }
  165. cpu_relax();
  166. }
  167. }
  168. /*
  169. * We do not migrate the timer when it is expiring before the next
  170. * event on the target cpu. When high resolution is enabled, we cannot
  171. * reprogram the target cpu hardware and we would cause it to fire
  172. * late. To keep it simple, we handle the high resolution enabled and
  173. * disabled case similar.
  174. *
  175. * Called with cpu_base->lock of target cpu held.
  176. */
  177. static int
  178. hrtimer_check_target(struct hrtimer *timer, struct hrtimer_clock_base *new_base)
  179. {
  180. ktime_t expires;
  181. expires = ktime_sub(hrtimer_get_expires(timer), new_base->offset);
  182. return expires < new_base->cpu_base->expires_next;
  183. }
  184. static inline
  185. struct hrtimer_cpu_base *get_target_base(struct hrtimer_cpu_base *base,
  186. int pinned)
  187. {
  188. #if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON)
  189. if (static_branch_likely(&timers_migration_enabled) && !pinned)
  190. return &per_cpu(hrtimer_bases, get_nohz_timer_target());
  191. #endif
  192. return base;
  193. }
  194. /*
  195. * We switch the timer base to a power-optimized selected CPU target,
  196. * if:
  197. * - NO_HZ_COMMON is enabled
  198. * - timer migration is enabled
  199. * - the timer callback is not running
  200. * - the timer is not the first expiring timer on the new target
  201. *
  202. * If one of the above requirements is not fulfilled we move the timer
  203. * to the current CPU or leave it on the previously assigned CPU if
  204. * the timer callback is currently running.
  205. */
  206. static inline struct hrtimer_clock_base *
  207. switch_hrtimer_base(struct hrtimer *timer, struct hrtimer_clock_base *base,
  208. int pinned)
  209. {
  210. struct hrtimer_cpu_base *new_cpu_base, *this_cpu_base;
  211. struct hrtimer_clock_base *new_base;
  212. int basenum = base->index;
  213. this_cpu_base = this_cpu_ptr(&hrtimer_bases);
  214. new_cpu_base = get_target_base(this_cpu_base, pinned);
  215. again:
  216. new_base = &new_cpu_base->clock_base[basenum];
  217. if (base != new_base) {
  218. /*
  219. * We are trying to move timer to new_base.
  220. * However we can't change timer's base while it is running,
  221. * so we keep it on the same CPU. No hassle vs. reprogramming
  222. * the event source in the high resolution case. The softirq
  223. * code will take care of this when the timer function has
  224. * completed. There is no conflict as we hold the lock until
  225. * the timer is enqueued.
  226. */
  227. if (unlikely(hrtimer_callback_running(timer)))
  228. return base;
  229. /* See the comment in lock_hrtimer_base() */
  230. WRITE_ONCE(timer->base, &migration_base);
  231. raw_spin_unlock(&base->cpu_base->lock);
  232. raw_spin_lock(&new_base->cpu_base->lock);
  233. if (new_cpu_base != this_cpu_base &&
  234. hrtimer_check_target(timer, new_base)) {
  235. raw_spin_unlock(&new_base->cpu_base->lock);
  236. raw_spin_lock(&base->cpu_base->lock);
  237. new_cpu_base = this_cpu_base;
  238. WRITE_ONCE(timer->base, base);
  239. goto again;
  240. }
  241. WRITE_ONCE(timer->base, new_base);
  242. } else {
  243. if (new_cpu_base != this_cpu_base &&
  244. hrtimer_check_target(timer, new_base)) {
  245. new_cpu_base = this_cpu_base;
  246. goto again;
  247. }
  248. }
  249. return new_base;
  250. }
  251. #else /* CONFIG_SMP */
  252. static inline bool is_migration_base(struct hrtimer_clock_base *base)
  253. {
  254. return false;
  255. }
  256. static inline struct hrtimer_clock_base *
  257. lock_hrtimer_base(const struct hrtimer *timer, unsigned long *flags)
  258. {
  259. struct hrtimer_clock_base *base = timer->base;
  260. raw_spin_lock_irqsave(&base->cpu_base->lock, *flags);
  261. return base;
  262. }
  263. # define switch_hrtimer_base(t, b, p) (b)
  264. #endif /* !CONFIG_SMP */
  265. /*
  266. * Functions for the union type storage format of ktime_t which are
  267. * too large for inlining:
  268. */
  269. #if BITS_PER_LONG < 64
  270. /*
  271. * Divide a ktime value by a nanosecond value
  272. */
  273. s64 __ktime_divns(const ktime_t kt, s64 div)
  274. {
  275. int sft = 0;
  276. s64 dclc;
  277. u64 tmp;
  278. dclc = ktime_to_ns(kt);
  279. tmp = dclc < 0 ? -dclc : dclc;
  280. /* Make sure the divisor is less than 2^32: */
  281. while (div >> 32) {
  282. sft++;
  283. div >>= 1;
  284. }
  285. tmp >>= sft;
  286. do_div(tmp, (u32) div);
  287. return dclc < 0 ? -tmp : tmp;
  288. }
  289. EXPORT_SYMBOL_GPL(__ktime_divns);
  290. #endif /* BITS_PER_LONG >= 64 */
  291. /*
  292. * Add two ktime values and do a safety check for overflow:
  293. */
  294. ktime_t ktime_add_safe(const ktime_t lhs, const ktime_t rhs)
  295. {
  296. ktime_t res = ktime_add_unsafe(lhs, rhs);
  297. /*
  298. * We use KTIME_SEC_MAX here, the maximum timeout which we can
  299. * return to user space in a timespec:
  300. */
  301. if (res < 0 || res < lhs || res < rhs)
  302. res = ktime_set(KTIME_SEC_MAX, 0);
  303. return res;
  304. }
  305. EXPORT_SYMBOL_GPL(ktime_add_safe);
  306. #ifdef CONFIG_DEBUG_OBJECTS_TIMERS
  307. static const struct debug_obj_descr hrtimer_debug_descr;
  308. static void *hrtimer_debug_hint(void *addr)
  309. {
  310. return ((struct hrtimer *) addr)->function;
  311. }
  312. /*
  313. * fixup_init is called when:
  314. * - an active object is initialized
  315. */
  316. static bool hrtimer_fixup_init(void *addr, enum debug_obj_state state)
  317. {
  318. struct hrtimer *timer = addr;
  319. switch (state) {
  320. case ODEBUG_STATE_ACTIVE:
  321. hrtimer_cancel(timer);
  322. debug_object_init(timer, &hrtimer_debug_descr);
  323. return true;
  324. default:
  325. return false;
  326. }
  327. }
  328. /*
  329. * fixup_activate is called when:
  330. * - an active object is activated
  331. * - an unknown non-static object is activated
  332. */
  333. static bool hrtimer_fixup_activate(void *addr, enum debug_obj_state state)
  334. {
  335. switch (state) {
  336. case ODEBUG_STATE_ACTIVE:
  337. WARN_ON(1);
  338. fallthrough;
  339. default:
  340. return false;
  341. }
  342. }
  343. /*
  344. * fixup_free is called when:
  345. * - an active object is freed
  346. */
  347. static bool hrtimer_fixup_free(void *addr, enum debug_obj_state state)
  348. {
  349. struct hrtimer *timer = addr;
  350. switch (state) {
  351. case ODEBUG_STATE_ACTIVE:
  352. hrtimer_cancel(timer);
  353. debug_object_free(timer, &hrtimer_debug_descr);
  354. return true;
  355. default:
  356. return false;
  357. }
  358. }
  359. static const struct debug_obj_descr hrtimer_debug_descr = {
  360. .name = "hrtimer",
  361. .debug_hint = hrtimer_debug_hint,
  362. .fixup_init = hrtimer_fixup_init,
  363. .fixup_activate = hrtimer_fixup_activate,
  364. .fixup_free = hrtimer_fixup_free,
  365. };
  366. static inline void debug_hrtimer_init(struct hrtimer *timer)
  367. {
  368. debug_object_init(timer, &hrtimer_debug_descr);
  369. }
  370. static inline void debug_hrtimer_activate(struct hrtimer *timer,
  371. enum hrtimer_mode mode)
  372. {
  373. debug_object_activate(timer, &hrtimer_debug_descr);
  374. }
  375. static inline void debug_hrtimer_deactivate(struct hrtimer *timer)
  376. {
  377. debug_object_deactivate(timer, &hrtimer_debug_descr);
  378. }
  379. static void __hrtimer_init(struct hrtimer *timer, clockid_t clock_id,
  380. enum hrtimer_mode mode);
  381. void hrtimer_init_on_stack(struct hrtimer *timer, clockid_t clock_id,
  382. enum hrtimer_mode mode)
  383. {
  384. debug_object_init_on_stack(timer, &hrtimer_debug_descr);
  385. __hrtimer_init(timer, clock_id, mode);
  386. }
  387. EXPORT_SYMBOL_GPL(hrtimer_init_on_stack);
  388. static void __hrtimer_init_sleeper(struct hrtimer_sleeper *sl,
  389. clockid_t clock_id, enum hrtimer_mode mode);
  390. void hrtimer_init_sleeper_on_stack(struct hrtimer_sleeper *sl,
  391. clockid_t clock_id, enum hrtimer_mode mode)
  392. {
  393. debug_object_init_on_stack(&sl->timer, &hrtimer_debug_descr);
  394. __hrtimer_init_sleeper(sl, clock_id, mode);
  395. }
  396. EXPORT_SYMBOL_GPL(hrtimer_init_sleeper_on_stack);
  397. void destroy_hrtimer_on_stack(struct hrtimer *timer)
  398. {
  399. debug_object_free(timer, &hrtimer_debug_descr);
  400. }
  401. EXPORT_SYMBOL_GPL(destroy_hrtimer_on_stack);
  402. #else
  403. static inline void debug_hrtimer_init(struct hrtimer *timer) { }
  404. static inline void debug_hrtimer_activate(struct hrtimer *timer,
  405. enum hrtimer_mode mode) { }
  406. static inline void debug_hrtimer_deactivate(struct hrtimer *timer) { }
  407. #endif
  408. static inline void
  409. debug_init(struct hrtimer *timer, clockid_t clockid,
  410. enum hrtimer_mode mode)
  411. {
  412. debug_hrtimer_init(timer);
  413. trace_hrtimer_init(timer, clockid, mode);
  414. }
  415. static inline void debug_activate(struct hrtimer *timer,
  416. enum hrtimer_mode mode)
  417. {
  418. debug_hrtimer_activate(timer, mode);
  419. trace_hrtimer_start(timer, mode);
  420. }
  421. static inline void debug_deactivate(struct hrtimer *timer)
  422. {
  423. debug_hrtimer_deactivate(timer);
  424. trace_hrtimer_cancel(timer);
  425. }
  426. static struct hrtimer_clock_base *
  427. __next_base(struct hrtimer_cpu_base *cpu_base, unsigned int *active)
  428. {
  429. unsigned int idx;
  430. if (!*active)
  431. return NULL;
  432. idx = __ffs(*active);
  433. *active &= ~(1U << idx);
  434. return &cpu_base->clock_base[idx];
  435. }
  436. #define for_each_active_base(base, cpu_base, active) \
  437. while ((base = __next_base((cpu_base), &(active))))
  438. static ktime_t __hrtimer_next_event_base(struct hrtimer_cpu_base *cpu_base,
  439. const struct hrtimer *exclude,
  440. unsigned int active,
  441. ktime_t expires_next)
  442. {
  443. struct hrtimer_clock_base *base;
  444. ktime_t expires;
  445. for_each_active_base(base, cpu_base, active) {
  446. struct timerqueue_node *next;
  447. struct hrtimer *timer;
  448. next = timerqueue_getnext(&base->active);
  449. timer = container_of(next, struct hrtimer, node);
  450. if (timer == exclude) {
  451. /* Get to the next timer in the queue. */
  452. next = timerqueue_iterate_next(next);
  453. if (!next)
  454. continue;
  455. timer = container_of(next, struct hrtimer, node);
  456. }
  457. expires = ktime_sub(hrtimer_get_expires(timer), base->offset);
  458. if (expires < expires_next) {
  459. expires_next = expires;
  460. /* Skip cpu_base update if a timer is being excluded. */
  461. if (exclude)
  462. continue;
  463. if (timer->is_soft)
  464. cpu_base->softirq_next_timer = timer;
  465. else
  466. cpu_base->next_timer = timer;
  467. }
  468. }
  469. /*
  470. * clock_was_set() might have changed base->offset of any of
  471. * the clock bases so the result might be negative. Fix it up
  472. * to prevent a false positive in clockevents_program_event().
  473. */
  474. if (expires_next < 0)
  475. expires_next = 0;
  476. return expires_next;
  477. }
  478. /*
  479. * Recomputes cpu_base::*next_timer and returns the earliest expires_next
  480. * but does not set cpu_base::*expires_next, that is done by
  481. * hrtimer[_force]_reprogram and hrtimer_interrupt only. When updating
  482. * cpu_base::*expires_next right away, reprogramming logic would no longer
  483. * work.
  484. *
  485. * When a softirq is pending, we can ignore the HRTIMER_ACTIVE_SOFT bases,
  486. * those timers will get run whenever the softirq gets handled, at the end of
  487. * hrtimer_run_softirq(), hrtimer_update_softirq_timer() will re-add these bases.
  488. *
  489. * Therefore softirq values are those from the HRTIMER_ACTIVE_SOFT clock bases.
  490. * The !softirq values are the minima across HRTIMER_ACTIVE_ALL, unless an actual
  491. * softirq is pending, in which case they're the minima of HRTIMER_ACTIVE_HARD.
  492. *
  493. * @active_mask must be one of:
  494. * - HRTIMER_ACTIVE_ALL,
  495. * - HRTIMER_ACTIVE_SOFT, or
  496. * - HRTIMER_ACTIVE_HARD.
  497. */
  498. static ktime_t
  499. __hrtimer_get_next_event(struct hrtimer_cpu_base *cpu_base, unsigned int active_mask)
  500. {
  501. unsigned int active;
  502. struct hrtimer *next_timer = NULL;
  503. ktime_t expires_next = KTIME_MAX;
  504. if (!cpu_base->softirq_activated && (active_mask & HRTIMER_ACTIVE_SOFT)) {
  505. active = cpu_base->active_bases & HRTIMER_ACTIVE_SOFT;
  506. cpu_base->softirq_next_timer = NULL;
  507. expires_next = __hrtimer_next_event_base(cpu_base, NULL,
  508. active, KTIME_MAX);
  509. next_timer = cpu_base->softirq_next_timer;
  510. }
  511. if (active_mask & HRTIMER_ACTIVE_HARD) {
  512. active = cpu_base->active_bases & HRTIMER_ACTIVE_HARD;
  513. cpu_base->next_timer = next_timer;
  514. expires_next = __hrtimer_next_event_base(cpu_base, NULL, active,
  515. expires_next);
  516. }
  517. return expires_next;
  518. }
  519. static ktime_t hrtimer_update_next_event(struct hrtimer_cpu_base *cpu_base)
  520. {
  521. ktime_t expires_next, soft = KTIME_MAX;
  522. /*
  523. * If the soft interrupt has already been activated, ignore the
  524. * soft bases. They will be handled in the already raised soft
  525. * interrupt.
  526. */
  527. if (!cpu_base->softirq_activated) {
  528. soft = __hrtimer_get_next_event(cpu_base, HRTIMER_ACTIVE_SOFT);
  529. /*
  530. * Update the soft expiry time. clock_settime() might have
  531. * affected it.
  532. */
  533. cpu_base->softirq_expires_next = soft;
  534. }
  535. expires_next = __hrtimer_get_next_event(cpu_base, HRTIMER_ACTIVE_HARD);
  536. /*
  537. * If a softirq timer is expiring first, update cpu_base->next_timer
  538. * and program the hardware with the soft expiry time.
  539. */
  540. if (expires_next > soft) {
  541. cpu_base->next_timer = cpu_base->softirq_next_timer;
  542. expires_next = soft;
  543. }
  544. return expires_next;
  545. }
  546. static inline ktime_t hrtimer_update_base(struct hrtimer_cpu_base *base)
  547. {
  548. ktime_t *offs_real = &base->clock_base[HRTIMER_BASE_REALTIME].offset;
  549. ktime_t *offs_boot = &base->clock_base[HRTIMER_BASE_BOOTTIME].offset;
  550. ktime_t *offs_tai = &base->clock_base[HRTIMER_BASE_TAI].offset;
  551. ktime_t now = ktime_get_update_offsets_now(&base->clock_was_set_seq,
  552. offs_real, offs_boot, offs_tai);
  553. base->clock_base[HRTIMER_BASE_REALTIME_SOFT].offset = *offs_real;
  554. base->clock_base[HRTIMER_BASE_BOOTTIME_SOFT].offset = *offs_boot;
  555. base->clock_base[HRTIMER_BASE_TAI_SOFT].offset = *offs_tai;
  556. return now;
  557. }
  558. /*
  559. * Is the high resolution mode active ?
  560. */
  561. static inline int __hrtimer_hres_active(struct hrtimer_cpu_base *cpu_base)
  562. {
  563. return IS_ENABLED(CONFIG_HIGH_RES_TIMERS) ?
  564. cpu_base->hres_active : 0;
  565. }
  566. static inline int hrtimer_hres_active(void)
  567. {
  568. return __hrtimer_hres_active(this_cpu_ptr(&hrtimer_bases));
  569. }
  570. static void __hrtimer_reprogram(struct hrtimer_cpu_base *cpu_base,
  571. struct hrtimer *next_timer,
  572. ktime_t expires_next)
  573. {
  574. cpu_base->expires_next = expires_next;
  575. /*
  576. * If hres is not active, hardware does not have to be
  577. * reprogrammed yet.
  578. *
  579. * If a hang was detected in the last timer interrupt then we
  580. * leave the hang delay active in the hardware. We want the
  581. * system to make progress. That also prevents the following
  582. * scenario:
  583. * T1 expires 50ms from now
  584. * T2 expires 5s from now
  585. *
  586. * T1 is removed, so this code is called and would reprogram
  587. * the hardware to 5s from now. Any hrtimer_start after that
  588. * will not reprogram the hardware due to hang_detected being
  589. * set. So we'd effectively block all timers until the T2 event
  590. * fires.
  591. */
  592. if (!__hrtimer_hres_active(cpu_base) || cpu_base->hang_detected)
  593. return;
  594. tick_program_event(expires_next, 1);
  595. }
  596. /*
  597. * Reprogram the event source with checking both queues for the
  598. * next event
  599. * Called with interrupts disabled and base->lock held
  600. */
  601. static void
  602. hrtimer_force_reprogram(struct hrtimer_cpu_base *cpu_base, int skip_equal)
  603. {
  604. ktime_t expires_next;
  605. expires_next = hrtimer_update_next_event(cpu_base);
  606. if (skip_equal && expires_next == cpu_base->expires_next)
  607. return;
  608. __hrtimer_reprogram(cpu_base, cpu_base->next_timer, expires_next);
  609. }
  610. /* High resolution timer related functions */
  611. #ifdef CONFIG_HIGH_RES_TIMERS
  612. /*
  613. * High resolution timer enabled ?
  614. */
  615. static bool hrtimer_hres_enabled __read_mostly = true;
  616. unsigned int hrtimer_resolution __read_mostly = LOW_RES_NSEC;
  617. EXPORT_SYMBOL_GPL(hrtimer_resolution);
  618. /*
  619. * Enable / Disable high resolution mode
  620. */
  621. static int __init setup_hrtimer_hres(char *str)
  622. {
  623. return (kstrtobool(str, &hrtimer_hres_enabled) == 0);
  624. }
  625. __setup("highres=", setup_hrtimer_hres);
  626. /*
  627. * hrtimer_high_res_enabled - query, if the highres mode is enabled
  628. */
  629. static inline int hrtimer_is_hres_enabled(void)
  630. {
  631. return hrtimer_hres_enabled;
  632. }
  633. static void retrigger_next_event(void *arg);
  634. /*
  635. * Switch to high resolution mode
  636. */
  637. static void hrtimer_switch_to_hres(void)
  638. {
  639. struct hrtimer_cpu_base *base = this_cpu_ptr(&hrtimer_bases);
  640. if (tick_init_highres()) {
  641. pr_warn("Could not switch to high resolution mode on CPU %u\n",
  642. base->cpu);
  643. return;
  644. }
  645. base->hres_active = 1;
  646. hrtimer_resolution = HIGH_RES_NSEC;
  647. tick_setup_sched_timer();
  648. /* "Retrigger" the interrupt to get things going */
  649. retrigger_next_event(NULL);
  650. }
  651. #else
  652. static inline int hrtimer_is_hres_enabled(void) { return 0; }
  653. static inline void hrtimer_switch_to_hres(void) { }
  654. #endif /* CONFIG_HIGH_RES_TIMERS */
  655. /*
  656. * Retrigger next event is called after clock was set with interrupts
  657. * disabled through an SMP function call or directly from low level
  658. * resume code.
  659. *
  660. * This is only invoked when:
  661. * - CONFIG_HIGH_RES_TIMERS is enabled.
  662. * - CONFIG_NOHZ_COMMON is enabled
  663. *
  664. * For the other cases this function is empty and because the call sites
  665. * are optimized out it vanishes as well, i.e. no need for lots of
  666. * #ifdeffery.
  667. */
  668. static void retrigger_next_event(void *arg)
  669. {
  670. struct hrtimer_cpu_base *base = this_cpu_ptr(&hrtimer_bases);
  671. /*
  672. * When high resolution mode or nohz is active, then the offsets of
  673. * CLOCK_REALTIME/TAI/BOOTTIME have to be updated. Otherwise the
  674. * next tick will take care of that.
  675. *
  676. * If high resolution mode is active then the next expiring timer
  677. * must be reevaluated and the clock event device reprogrammed if
  678. * necessary.
  679. *
  680. * In the NOHZ case the update of the offset and the reevaluation
  681. * of the next expiring timer is enough. The return from the SMP
  682. * function call will take care of the reprogramming in case the
  683. * CPU was in a NOHZ idle sleep.
  684. */
  685. if (!__hrtimer_hres_active(base) && !tick_nohz_active)
  686. return;
  687. raw_spin_lock(&base->lock);
  688. hrtimer_update_base(base);
  689. if (__hrtimer_hres_active(base))
  690. hrtimer_force_reprogram(base, 0);
  691. else
  692. hrtimer_update_next_event(base);
  693. raw_spin_unlock(&base->lock);
  694. }
  695. /*
  696. * When a timer is enqueued and expires earlier than the already enqueued
  697. * timers, we have to check, whether it expires earlier than the timer for
  698. * which the clock event device was armed.
  699. *
  700. * Called with interrupts disabled and base->cpu_base.lock held
  701. */
  702. static void hrtimer_reprogram(struct hrtimer *timer, bool reprogram)
  703. {
  704. struct hrtimer_cpu_base *cpu_base = this_cpu_ptr(&hrtimer_bases);
  705. struct hrtimer_clock_base *base = timer->base;
  706. ktime_t expires = ktime_sub(hrtimer_get_expires(timer), base->offset);
  707. WARN_ON_ONCE(hrtimer_get_expires_tv64(timer) < 0);
  708. /*
  709. * CLOCK_REALTIME timer might be requested with an absolute
  710. * expiry time which is less than base->offset. Set it to 0.
  711. */
  712. if (expires < 0)
  713. expires = 0;
  714. if (timer->is_soft) {
  715. /*
  716. * soft hrtimer could be started on a remote CPU. In this
  717. * case softirq_expires_next needs to be updated on the
  718. * remote CPU. The soft hrtimer will not expire before the
  719. * first hard hrtimer on the remote CPU -
  720. * hrtimer_check_target() prevents this case.
  721. */
  722. struct hrtimer_cpu_base *timer_cpu_base = base->cpu_base;
  723. if (timer_cpu_base->softirq_activated)
  724. return;
  725. if (!ktime_before(expires, timer_cpu_base->softirq_expires_next))
  726. return;
  727. timer_cpu_base->softirq_next_timer = timer;
  728. timer_cpu_base->softirq_expires_next = expires;
  729. if (!ktime_before(expires, timer_cpu_base->expires_next) ||
  730. !reprogram)
  731. return;
  732. }
  733. /*
  734. * If the timer is not on the current cpu, we cannot reprogram
  735. * the other cpus clock event device.
  736. */
  737. if (base->cpu_base != cpu_base)
  738. return;
  739. if (expires >= cpu_base->expires_next)
  740. return;
  741. /*
  742. * If the hrtimer interrupt is running, then it will reevaluate the
  743. * clock bases and reprogram the clock event device.
  744. */
  745. if (cpu_base->in_hrtirq)
  746. return;
  747. cpu_base->next_timer = timer;
  748. __hrtimer_reprogram(cpu_base, timer, expires);
  749. }
  750. static bool update_needs_ipi(struct hrtimer_cpu_base *cpu_base,
  751. unsigned int active)
  752. {
  753. struct hrtimer_clock_base *base;
  754. unsigned int seq;
  755. ktime_t expires;
  756. /*
  757. * Update the base offsets unconditionally so the following
  758. * checks whether the SMP function call is required works.
  759. *
  760. * The update is safe even when the remote CPU is in the hrtimer
  761. * interrupt or the hrtimer soft interrupt and expiring affected
  762. * bases. Either it will see the update before handling a base or
  763. * it will see it when it finishes the processing and reevaluates
  764. * the next expiring timer.
  765. */
  766. seq = cpu_base->clock_was_set_seq;
  767. hrtimer_update_base(cpu_base);
  768. /*
  769. * If the sequence did not change over the update then the
  770. * remote CPU already handled it.
  771. */
  772. if (seq == cpu_base->clock_was_set_seq)
  773. return false;
  774. /*
  775. * If the remote CPU is currently handling an hrtimer interrupt, it
  776. * will reevaluate the first expiring timer of all clock bases
  777. * before reprogramming. Nothing to do here.
  778. */
  779. if (cpu_base->in_hrtirq)
  780. return false;
  781. /*
  782. * Walk the affected clock bases and check whether the first expiring
  783. * timer in a clock base is moving ahead of the first expiring timer of
  784. * @cpu_base. If so, the IPI must be invoked because per CPU clock
  785. * event devices cannot be remotely reprogrammed.
  786. */
  787. active &= cpu_base->active_bases;
  788. for_each_active_base(base, cpu_base, active) {
  789. struct timerqueue_node *next;
  790. next = timerqueue_getnext(&base->active);
  791. expires = ktime_sub(next->expires, base->offset);
  792. if (expires < cpu_base->expires_next)
  793. return true;
  794. /* Extra check for softirq clock bases */
  795. if (base->clockid < HRTIMER_BASE_MONOTONIC_SOFT)
  796. continue;
  797. if (cpu_base->softirq_activated)
  798. continue;
  799. if (expires < cpu_base->softirq_expires_next)
  800. return true;
  801. }
  802. return false;
  803. }
  804. /*
  805. * Clock was set. This might affect CLOCK_REALTIME, CLOCK_TAI and
  806. * CLOCK_BOOTTIME (for late sleep time injection).
  807. *
  808. * This requires to update the offsets for these clocks
  809. * vs. CLOCK_MONOTONIC. When high resolution timers are enabled, then this
  810. * also requires to eventually reprogram the per CPU clock event devices
  811. * when the change moves an affected timer ahead of the first expiring
  812. * timer on that CPU. Obviously remote per CPU clock event devices cannot
  813. * be reprogrammed. The other reason why an IPI has to be sent is when the
  814. * system is in !HIGH_RES and NOHZ mode. The NOHZ mode updates the offsets
  815. * in the tick, which obviously might be stopped, so this has to bring out
  816. * the remote CPU which might sleep in idle to get this sorted.
  817. */
  818. void clock_was_set(unsigned int bases)
  819. {
  820. struct hrtimer_cpu_base *cpu_base = raw_cpu_ptr(&hrtimer_bases);
  821. cpumask_var_t mask;
  822. int cpu;
  823. if (!__hrtimer_hres_active(cpu_base) && !tick_nohz_active)
  824. goto out_timerfd;
  825. if (!zalloc_cpumask_var(&mask, GFP_KERNEL)) {
  826. on_each_cpu(retrigger_next_event, NULL, 1);
  827. goto out_timerfd;
  828. }
  829. /* Avoid interrupting CPUs if possible */
  830. cpus_read_lock();
  831. for_each_online_cpu(cpu) {
  832. unsigned long flags;
  833. cpu_base = &per_cpu(hrtimer_bases, cpu);
  834. raw_spin_lock_irqsave(&cpu_base->lock, flags);
  835. if (update_needs_ipi(cpu_base, bases))
  836. cpumask_set_cpu(cpu, mask);
  837. raw_spin_unlock_irqrestore(&cpu_base->lock, flags);
  838. }
  839. preempt_disable();
  840. smp_call_function_many(mask, retrigger_next_event, NULL, 1);
  841. preempt_enable();
  842. cpus_read_unlock();
  843. free_cpumask_var(mask);
  844. out_timerfd:
  845. timerfd_clock_was_set();
  846. }
  847. static void clock_was_set_work(struct work_struct *work)
  848. {
  849. clock_was_set(CLOCK_SET_WALL);
  850. }
  851. static DECLARE_WORK(hrtimer_work, clock_was_set_work);
  852. /*
  853. * Called from timekeeping code to reprogram the hrtimer interrupt device
  854. * on all cpus and to notify timerfd.
  855. */
  856. void clock_was_set_delayed(void)
  857. {
  858. schedule_work(&hrtimer_work);
  859. }
  860. /*
  861. * Called during resume either directly from via timekeeping_resume()
  862. * or in the case of s2idle from tick_unfreeze() to ensure that the
  863. * hrtimers are up to date.
  864. */
  865. void hrtimers_resume_local(void)
  866. {
  867. lockdep_assert_irqs_disabled();
  868. /* Retrigger on the local CPU */
  869. retrigger_next_event(NULL);
  870. }
  871. /*
  872. * Counterpart to lock_hrtimer_base above:
  873. */
  874. static inline
  875. void unlock_hrtimer_base(const struct hrtimer *timer, unsigned long *flags)
  876. {
  877. raw_spin_unlock_irqrestore(&timer->base->cpu_base->lock, *flags);
  878. }
  879. /**
  880. * hrtimer_forward - forward the timer expiry
  881. * @timer: hrtimer to forward
  882. * @now: forward past this time
  883. * @interval: the interval to forward
  884. *
  885. * Forward the timer expiry so it will expire in the future.
  886. * Returns the number of overruns.
  887. *
  888. * Can be safely called from the callback function of @timer. If
  889. * called from other contexts @timer must neither be enqueued nor
  890. * running the callback and the caller needs to take care of
  891. * serialization.
  892. *
  893. * Note: This only updates the timer expiry value and does not requeue
  894. * the timer.
  895. */
  896. u64 hrtimer_forward(struct hrtimer *timer, ktime_t now, ktime_t interval)
  897. {
  898. u64 orun = 1;
  899. ktime_t delta;
  900. delta = ktime_sub(now, hrtimer_get_expires(timer));
  901. if (delta < 0)
  902. return 0;
  903. if (WARN_ON(timer->state & HRTIMER_STATE_ENQUEUED))
  904. return 0;
  905. if (interval < hrtimer_resolution)
  906. interval = hrtimer_resolution;
  907. if (unlikely(delta >= interval)) {
  908. s64 incr = ktime_to_ns(interval);
  909. orun = ktime_divns(delta, incr);
  910. hrtimer_add_expires_ns(timer, incr * orun);
  911. if (hrtimer_get_expires_tv64(timer) > now)
  912. return orun;
  913. /*
  914. * This (and the ktime_add() below) is the
  915. * correction for exact:
  916. */
  917. orun++;
  918. }
  919. hrtimer_add_expires(timer, interval);
  920. return orun;
  921. }
  922. EXPORT_SYMBOL_GPL(hrtimer_forward);
  923. /*
  924. * enqueue_hrtimer - internal function to (re)start a timer
  925. *
  926. * The timer is inserted in expiry order. Insertion into the
  927. * red black tree is O(log(n)). Must hold the base lock.
  928. *
  929. * Returns 1 when the new timer is the leftmost timer in the tree.
  930. */
  931. static int enqueue_hrtimer(struct hrtimer *timer,
  932. struct hrtimer_clock_base *base,
  933. enum hrtimer_mode mode)
  934. {
  935. debug_activate(timer, mode);
  936. base->cpu_base->active_bases |= 1 << base->index;
  937. /* Pairs with the lockless read in hrtimer_is_queued() */
  938. WRITE_ONCE(timer->state, HRTIMER_STATE_ENQUEUED);
  939. return timerqueue_add(&base->active, &timer->node);
  940. }
  941. /*
  942. * __remove_hrtimer - internal function to remove a timer
  943. *
  944. * Caller must hold the base lock.
  945. *
  946. * High resolution timer mode reprograms the clock event device when the
  947. * timer is the one which expires next. The caller can disable this by setting
  948. * reprogram to zero. This is useful, when the context does a reprogramming
  949. * anyway (e.g. timer interrupt)
  950. */
  951. static void __remove_hrtimer(struct hrtimer *timer,
  952. struct hrtimer_clock_base *base,
  953. u8 newstate, int reprogram)
  954. {
  955. struct hrtimer_cpu_base *cpu_base = base->cpu_base;
  956. u8 state = timer->state;
  957. /* Pairs with the lockless read in hrtimer_is_queued() */
  958. WRITE_ONCE(timer->state, newstate);
  959. if (!(state & HRTIMER_STATE_ENQUEUED))
  960. return;
  961. if (!timerqueue_del(&base->active, &timer->node))
  962. cpu_base->active_bases &= ~(1 << base->index);
  963. /*
  964. * Note: If reprogram is false we do not update
  965. * cpu_base->next_timer. This happens when we remove the first
  966. * timer on a remote cpu. No harm as we never dereference
  967. * cpu_base->next_timer. So the worst thing what can happen is
  968. * an superfluous call to hrtimer_force_reprogram() on the
  969. * remote cpu later on if the same timer gets enqueued again.
  970. */
  971. if (reprogram && timer == cpu_base->next_timer)
  972. hrtimer_force_reprogram(cpu_base, 1);
  973. }
  974. /*
  975. * remove hrtimer, called with base lock held
  976. */
  977. static inline int
  978. remove_hrtimer(struct hrtimer *timer, struct hrtimer_clock_base *base,
  979. bool restart, bool keep_local)
  980. {
  981. u8 state = timer->state;
  982. if (state & HRTIMER_STATE_ENQUEUED) {
  983. bool reprogram;
  984. /*
  985. * Remove the timer and force reprogramming when high
  986. * resolution mode is active and the timer is on the current
  987. * CPU. If we remove a timer on another CPU, reprogramming is
  988. * skipped. The interrupt event on this CPU is fired and
  989. * reprogramming happens in the interrupt handler. This is a
  990. * rare case and less expensive than a smp call.
  991. */
  992. debug_deactivate(timer);
  993. reprogram = base->cpu_base == this_cpu_ptr(&hrtimer_bases);
  994. /*
  995. * If the timer is not restarted then reprogramming is
  996. * required if the timer is local. If it is local and about
  997. * to be restarted, avoid programming it twice (on removal
  998. * and a moment later when it's requeued).
  999. */
  1000. if (!restart)
  1001. state = HRTIMER_STATE_INACTIVE;
  1002. else
  1003. reprogram &= !keep_local;
  1004. __remove_hrtimer(timer, base, state, reprogram);
  1005. return 1;
  1006. }
  1007. return 0;
  1008. }
  1009. static inline ktime_t hrtimer_update_lowres(struct hrtimer *timer, ktime_t tim,
  1010. const enum hrtimer_mode mode)
  1011. {
  1012. #ifdef CONFIG_TIME_LOW_RES
  1013. /*
  1014. * CONFIG_TIME_LOW_RES indicates that the system has no way to return
  1015. * granular time values. For relative timers we add hrtimer_resolution
  1016. * (i.e. one jiffie) to prevent short timeouts.
  1017. */
  1018. timer->is_rel = mode & HRTIMER_MODE_REL;
  1019. if (timer->is_rel)
  1020. tim = ktime_add_safe(tim, hrtimer_resolution);
  1021. #endif
  1022. return tim;
  1023. }
  1024. static void
  1025. hrtimer_update_softirq_timer(struct hrtimer_cpu_base *cpu_base, bool reprogram)
  1026. {
  1027. ktime_t expires;
  1028. /*
  1029. * Find the next SOFT expiration.
  1030. */
  1031. expires = __hrtimer_get_next_event(cpu_base, HRTIMER_ACTIVE_SOFT);
  1032. /*
  1033. * reprogramming needs to be triggered, even if the next soft
  1034. * hrtimer expires at the same time than the next hard
  1035. * hrtimer. cpu_base->softirq_expires_next needs to be updated!
  1036. */
  1037. if (expires == KTIME_MAX)
  1038. return;
  1039. /*
  1040. * cpu_base->*next_timer is recomputed by __hrtimer_get_next_event()
  1041. * cpu_base->*expires_next is only set by hrtimer_reprogram()
  1042. */
  1043. hrtimer_reprogram(cpu_base->softirq_next_timer, reprogram);
  1044. }
  1045. static int __hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
  1046. u64 delta_ns, const enum hrtimer_mode mode,
  1047. struct hrtimer_clock_base *base)
  1048. {
  1049. struct hrtimer_clock_base *new_base;
  1050. bool force_local, first;
  1051. /*
  1052. * If the timer is on the local cpu base and is the first expiring
  1053. * timer then this might end up reprogramming the hardware twice
  1054. * (on removal and on enqueue). To avoid that by prevent the
  1055. * reprogram on removal, keep the timer local to the current CPU
  1056. * and enforce reprogramming after it is queued no matter whether
  1057. * it is the new first expiring timer again or not.
  1058. */
  1059. force_local = base->cpu_base == this_cpu_ptr(&hrtimer_bases);
  1060. force_local &= base->cpu_base->next_timer == timer;
  1061. /*
  1062. * Remove an active timer from the queue. In case it is not queued
  1063. * on the current CPU, make sure that remove_hrtimer() updates the
  1064. * remote data correctly.
  1065. *
  1066. * If it's on the current CPU and the first expiring timer, then
  1067. * skip reprogramming, keep the timer local and enforce
  1068. * reprogramming later if it was the first expiring timer. This
  1069. * avoids programming the underlying clock event twice (once at
  1070. * removal and once after enqueue).
  1071. */
  1072. remove_hrtimer(timer, base, true, force_local);
  1073. if (mode & HRTIMER_MODE_REL)
  1074. tim = ktime_add_safe(tim, base->get_time());
  1075. tim = hrtimer_update_lowres(timer, tim, mode);
  1076. hrtimer_set_expires_range_ns(timer, tim, delta_ns);
  1077. /* Switch the timer base, if necessary: */
  1078. if (!force_local) {
  1079. new_base = switch_hrtimer_base(timer, base,
  1080. mode & HRTIMER_MODE_PINNED);
  1081. } else {
  1082. new_base = base;
  1083. }
  1084. first = enqueue_hrtimer(timer, new_base, mode);
  1085. if (!force_local)
  1086. return first;
  1087. /*
  1088. * Timer was forced to stay on the current CPU to avoid
  1089. * reprogramming on removal and enqueue. Force reprogram the
  1090. * hardware by evaluating the new first expiring timer.
  1091. */
  1092. hrtimer_force_reprogram(new_base->cpu_base, 1);
  1093. return 0;
  1094. }
  1095. /**
  1096. * hrtimer_start_range_ns - (re)start an hrtimer
  1097. * @timer: the timer to be added
  1098. * @tim: expiry time
  1099. * @delta_ns: "slack" range for the timer
  1100. * @mode: timer mode: absolute (HRTIMER_MODE_ABS) or
  1101. * relative (HRTIMER_MODE_REL), and pinned (HRTIMER_MODE_PINNED);
  1102. * softirq based mode is considered for debug purpose only!
  1103. */
  1104. void hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
  1105. u64 delta_ns, const enum hrtimer_mode mode)
  1106. {
  1107. struct hrtimer_clock_base *base;
  1108. unsigned long flags;
  1109. /*
  1110. * Check whether the HRTIMER_MODE_SOFT bit and hrtimer.is_soft
  1111. * match on CONFIG_PREEMPT_RT = n. With PREEMPT_RT check the hard
  1112. * expiry mode because unmarked timers are moved to softirq expiry.
  1113. */
  1114. if (!IS_ENABLED(CONFIG_PREEMPT_RT))
  1115. WARN_ON_ONCE(!(mode & HRTIMER_MODE_SOFT) ^ !timer->is_soft);
  1116. else
  1117. WARN_ON_ONCE(!(mode & HRTIMER_MODE_HARD) ^ !timer->is_hard);
  1118. base = lock_hrtimer_base(timer, &flags);
  1119. if (__hrtimer_start_range_ns(timer, tim, delta_ns, mode, base))
  1120. hrtimer_reprogram(timer, true);
  1121. unlock_hrtimer_base(timer, &flags);
  1122. }
  1123. EXPORT_SYMBOL_GPL(hrtimer_start_range_ns);
  1124. /**
  1125. * hrtimer_try_to_cancel - try to deactivate a timer
  1126. * @timer: hrtimer to stop
  1127. *
  1128. * Returns:
  1129. *
  1130. * * 0 when the timer was not active
  1131. * * 1 when the timer was active
  1132. * * -1 when the timer is currently executing the callback function and
  1133. * cannot be stopped
  1134. */
  1135. int hrtimer_try_to_cancel(struct hrtimer *timer)
  1136. {
  1137. struct hrtimer_clock_base *base;
  1138. unsigned long flags;
  1139. int ret = -1;
  1140. /*
  1141. * Check lockless first. If the timer is not active (neither
  1142. * enqueued nor running the callback, nothing to do here. The
  1143. * base lock does not serialize against a concurrent enqueue,
  1144. * so we can avoid taking it.
  1145. */
  1146. if (!hrtimer_active(timer))
  1147. return 0;
  1148. base = lock_hrtimer_base(timer, &flags);
  1149. if (!hrtimer_callback_running(timer))
  1150. ret = remove_hrtimer(timer, base, false, false);
  1151. unlock_hrtimer_base(timer, &flags);
  1152. return ret;
  1153. }
  1154. EXPORT_SYMBOL_GPL(hrtimer_try_to_cancel);
  1155. #ifdef CONFIG_PREEMPT_RT
  1156. static void hrtimer_cpu_base_init_expiry_lock(struct hrtimer_cpu_base *base)
  1157. {
  1158. spin_lock_init(&base->softirq_expiry_lock);
  1159. }
  1160. static void hrtimer_cpu_base_lock_expiry(struct hrtimer_cpu_base *base)
  1161. {
  1162. spin_lock(&base->softirq_expiry_lock);
  1163. }
  1164. static void hrtimer_cpu_base_unlock_expiry(struct hrtimer_cpu_base *base)
  1165. {
  1166. spin_unlock(&base->softirq_expiry_lock);
  1167. }
  1168. /*
  1169. * The counterpart to hrtimer_cancel_wait_running().
  1170. *
  1171. * If there is a waiter for cpu_base->expiry_lock, then it was waiting for
  1172. * the timer callback to finish. Drop expiry_lock and reacquire it. That
  1173. * allows the waiter to acquire the lock and make progress.
  1174. */
  1175. static void hrtimer_sync_wait_running(struct hrtimer_cpu_base *cpu_base,
  1176. unsigned long flags)
  1177. {
  1178. if (atomic_read(&cpu_base->timer_waiters)) {
  1179. raw_spin_unlock_irqrestore(&cpu_base->lock, flags);
  1180. spin_unlock(&cpu_base->softirq_expiry_lock);
  1181. spin_lock(&cpu_base->softirq_expiry_lock);
  1182. raw_spin_lock_irq(&cpu_base->lock);
  1183. }
  1184. }
  1185. /*
  1186. * This function is called on PREEMPT_RT kernels when the fast path
  1187. * deletion of a timer failed because the timer callback function was
  1188. * running.
  1189. *
  1190. * This prevents priority inversion: if the soft irq thread is preempted
  1191. * in the middle of a timer callback, then calling del_timer_sync() can
  1192. * lead to two issues:
  1193. *
  1194. * - If the caller is on a remote CPU then it has to spin wait for the timer
  1195. * handler to complete. This can result in unbound priority inversion.
  1196. *
  1197. * - If the caller originates from the task which preempted the timer
  1198. * handler on the same CPU, then spin waiting for the timer handler to
  1199. * complete is never going to end.
  1200. */
  1201. void hrtimer_cancel_wait_running(const struct hrtimer *timer)
  1202. {
  1203. /* Lockless read. Prevent the compiler from reloading it below */
  1204. struct hrtimer_clock_base *base = READ_ONCE(timer->base);
  1205. /*
  1206. * Just relax if the timer expires in hard interrupt context or if
  1207. * it is currently on the migration base.
  1208. */
  1209. if (!timer->is_soft || is_migration_base(base)) {
  1210. cpu_relax();
  1211. return;
  1212. }
  1213. /*
  1214. * Mark the base as contended and grab the expiry lock, which is
  1215. * held by the softirq across the timer callback. Drop the lock
  1216. * immediately so the softirq can expire the next timer. In theory
  1217. * the timer could already be running again, but that's more than
  1218. * unlikely and just causes another wait loop.
  1219. */
  1220. atomic_inc(&base->cpu_base->timer_waiters);
  1221. spin_lock_bh(&base->cpu_base->softirq_expiry_lock);
  1222. atomic_dec(&base->cpu_base->timer_waiters);
  1223. spin_unlock_bh(&base->cpu_base->softirq_expiry_lock);
  1224. }
  1225. #else
  1226. static inline void
  1227. hrtimer_cpu_base_init_expiry_lock(struct hrtimer_cpu_base *base) { }
  1228. static inline void
  1229. hrtimer_cpu_base_lock_expiry(struct hrtimer_cpu_base *base) { }
  1230. static inline void
  1231. hrtimer_cpu_base_unlock_expiry(struct hrtimer_cpu_base *base) { }
  1232. static inline void hrtimer_sync_wait_running(struct hrtimer_cpu_base *base,
  1233. unsigned long flags) { }
  1234. #endif
  1235. /**
  1236. * hrtimer_cancel - cancel a timer and wait for the handler to finish.
  1237. * @timer: the timer to be cancelled
  1238. *
  1239. * Returns:
  1240. * 0 when the timer was not active
  1241. * 1 when the timer was active
  1242. */
  1243. int hrtimer_cancel(struct hrtimer *timer)
  1244. {
  1245. int ret;
  1246. do {
  1247. ret = hrtimer_try_to_cancel(timer);
  1248. if (ret < 0)
  1249. hrtimer_cancel_wait_running(timer);
  1250. } while (ret < 0);
  1251. return ret;
  1252. }
  1253. EXPORT_SYMBOL_GPL(hrtimer_cancel);
  1254. /**
  1255. * __hrtimer_get_remaining - get remaining time for the timer
  1256. * @timer: the timer to read
  1257. * @adjust: adjust relative timers when CONFIG_TIME_LOW_RES=y
  1258. */
  1259. ktime_t __hrtimer_get_remaining(const struct hrtimer *timer, bool adjust)
  1260. {
  1261. unsigned long flags;
  1262. ktime_t rem;
  1263. lock_hrtimer_base(timer, &flags);
  1264. if (IS_ENABLED(CONFIG_TIME_LOW_RES) && adjust)
  1265. rem = hrtimer_expires_remaining_adjusted(timer);
  1266. else
  1267. rem = hrtimer_expires_remaining(timer);
  1268. unlock_hrtimer_base(timer, &flags);
  1269. return rem;
  1270. }
  1271. EXPORT_SYMBOL_GPL(__hrtimer_get_remaining);
  1272. #ifdef CONFIG_NO_HZ_COMMON
  1273. /**
  1274. * hrtimer_get_next_event - get the time until next expiry event
  1275. *
  1276. * Returns the next expiry time or KTIME_MAX if no timer is pending.
  1277. */
  1278. u64 hrtimer_get_next_event(void)
  1279. {
  1280. struct hrtimer_cpu_base *cpu_base = this_cpu_ptr(&hrtimer_bases);
  1281. u64 expires = KTIME_MAX;
  1282. unsigned long flags;
  1283. raw_spin_lock_irqsave(&cpu_base->lock, flags);
  1284. if (!__hrtimer_hres_active(cpu_base))
  1285. expires = __hrtimer_get_next_event(cpu_base, HRTIMER_ACTIVE_ALL);
  1286. raw_spin_unlock_irqrestore(&cpu_base->lock, flags);
  1287. return expires;
  1288. }
  1289. /**
  1290. * hrtimer_next_event_without - time until next expiry event w/o one timer
  1291. * @exclude: timer to exclude
  1292. *
  1293. * Returns the next expiry time over all timers except for the @exclude one or
  1294. * KTIME_MAX if none of them is pending.
  1295. */
  1296. u64 hrtimer_next_event_without(const struct hrtimer *exclude)
  1297. {
  1298. struct hrtimer_cpu_base *cpu_base = this_cpu_ptr(&hrtimer_bases);
  1299. u64 expires = KTIME_MAX;
  1300. unsigned long flags;
  1301. raw_spin_lock_irqsave(&cpu_base->lock, flags);
  1302. if (__hrtimer_hres_active(cpu_base)) {
  1303. unsigned int active;
  1304. if (!cpu_base->softirq_activated) {
  1305. active = cpu_base->active_bases & HRTIMER_ACTIVE_SOFT;
  1306. expires = __hrtimer_next_event_base(cpu_base, exclude,
  1307. active, KTIME_MAX);
  1308. }
  1309. active = cpu_base->active_bases & HRTIMER_ACTIVE_HARD;
  1310. expires = __hrtimer_next_event_base(cpu_base, exclude, active,
  1311. expires);
  1312. }
  1313. raw_spin_unlock_irqrestore(&cpu_base->lock, flags);
  1314. return expires;
  1315. }
  1316. #endif
  1317. static inline int hrtimer_clockid_to_base(clockid_t clock_id)
  1318. {
  1319. if (likely(clock_id < MAX_CLOCKS)) {
  1320. int base = hrtimer_clock_to_base_table[clock_id];
  1321. if (likely(base != HRTIMER_MAX_CLOCK_BASES))
  1322. return base;
  1323. }
  1324. WARN(1, "Invalid clockid %d. Using MONOTONIC\n", clock_id);
  1325. return HRTIMER_BASE_MONOTONIC;
  1326. }
  1327. static void __hrtimer_init(struct hrtimer *timer, clockid_t clock_id,
  1328. enum hrtimer_mode mode)
  1329. {
  1330. bool softtimer = !!(mode & HRTIMER_MODE_SOFT);
  1331. struct hrtimer_cpu_base *cpu_base;
  1332. int base;
  1333. /*
  1334. * On PREEMPT_RT enabled kernels hrtimers which are not explicitly
  1335. * marked for hard interrupt expiry mode are moved into soft
  1336. * interrupt context for latency reasons and because the callbacks
  1337. * can invoke functions which might sleep on RT, e.g. spin_lock().
  1338. */
  1339. if (IS_ENABLED(CONFIG_PREEMPT_RT) && !(mode & HRTIMER_MODE_HARD))
  1340. softtimer = true;
  1341. memset(timer, 0, sizeof(struct hrtimer));
  1342. cpu_base = raw_cpu_ptr(&hrtimer_bases);
  1343. /*
  1344. * POSIX magic: Relative CLOCK_REALTIME timers are not affected by
  1345. * clock modifications, so they needs to become CLOCK_MONOTONIC to
  1346. * ensure POSIX compliance.
  1347. */
  1348. if (clock_id == CLOCK_REALTIME && mode & HRTIMER_MODE_REL)
  1349. clock_id = CLOCK_MONOTONIC;
  1350. base = softtimer ? HRTIMER_MAX_CLOCK_BASES / 2 : 0;
  1351. base += hrtimer_clockid_to_base(clock_id);
  1352. timer->is_soft = softtimer;
  1353. timer->is_hard = !!(mode & HRTIMER_MODE_HARD);
  1354. timer->base = &cpu_base->clock_base[base];
  1355. timerqueue_init(&timer->node);
  1356. }
  1357. /**
  1358. * hrtimer_init - initialize a timer to the given clock
  1359. * @timer: the timer to be initialized
  1360. * @clock_id: the clock to be used
  1361. * @mode: The modes which are relevant for initialization:
  1362. * HRTIMER_MODE_ABS, HRTIMER_MODE_REL, HRTIMER_MODE_ABS_SOFT,
  1363. * HRTIMER_MODE_REL_SOFT
  1364. *
  1365. * The PINNED variants of the above can be handed in,
  1366. * but the PINNED bit is ignored as pinning happens
  1367. * when the hrtimer is started
  1368. */
  1369. void hrtimer_init(struct hrtimer *timer, clockid_t clock_id,
  1370. enum hrtimer_mode mode)
  1371. {
  1372. debug_init(timer, clock_id, mode);
  1373. __hrtimer_init(timer, clock_id, mode);
  1374. }
  1375. EXPORT_SYMBOL_GPL(hrtimer_init);
  1376. /*
  1377. * A timer is active, when it is enqueued into the rbtree or the
  1378. * callback function is running or it's in the state of being migrated
  1379. * to another cpu.
  1380. *
  1381. * It is important for this function to not return a false negative.
  1382. */
  1383. bool hrtimer_active(const struct hrtimer *timer)
  1384. {
  1385. struct hrtimer_clock_base *base;
  1386. unsigned int seq;
  1387. do {
  1388. base = READ_ONCE(timer->base);
  1389. seq = raw_read_seqcount_begin(&base->seq);
  1390. if (timer->state != HRTIMER_STATE_INACTIVE ||
  1391. base->running == timer)
  1392. return true;
  1393. } while (read_seqcount_retry(&base->seq, seq) ||
  1394. base != READ_ONCE(timer->base));
  1395. return false;
  1396. }
  1397. EXPORT_SYMBOL_GPL(hrtimer_active);
  1398. /*
  1399. * The write_seqcount_barrier()s in __run_hrtimer() split the thing into 3
  1400. * distinct sections:
  1401. *
  1402. * - queued: the timer is queued
  1403. * - callback: the timer is being ran
  1404. * - post: the timer is inactive or (re)queued
  1405. *
  1406. * On the read side we ensure we observe timer->state and cpu_base->running
  1407. * from the same section, if anything changed while we looked at it, we retry.
  1408. * This includes timer->base changing because sequence numbers alone are
  1409. * insufficient for that.
  1410. *
  1411. * The sequence numbers are required because otherwise we could still observe
  1412. * a false negative if the read side got smeared over multiple consecutive
  1413. * __run_hrtimer() invocations.
  1414. */
  1415. static void __run_hrtimer(struct hrtimer_cpu_base *cpu_base,
  1416. struct hrtimer_clock_base *base,
  1417. struct hrtimer *timer, ktime_t *now,
  1418. unsigned long flags) __must_hold(&cpu_base->lock)
  1419. {
  1420. enum hrtimer_restart (*fn)(struct hrtimer *);
  1421. bool expires_in_hardirq;
  1422. int restart;
  1423. lockdep_assert_held(&cpu_base->lock);
  1424. debug_deactivate(timer);
  1425. base->running = timer;
  1426. /*
  1427. * Separate the ->running assignment from the ->state assignment.
  1428. *
  1429. * As with a regular write barrier, this ensures the read side in
  1430. * hrtimer_active() cannot observe base->running == NULL &&
  1431. * timer->state == INACTIVE.
  1432. */
  1433. raw_write_seqcount_barrier(&base->seq);
  1434. __remove_hrtimer(timer, base, HRTIMER_STATE_INACTIVE, 0);
  1435. fn = timer->function;
  1436. /*
  1437. * Clear the 'is relative' flag for the TIME_LOW_RES case. If the
  1438. * timer is restarted with a period then it becomes an absolute
  1439. * timer. If its not restarted it does not matter.
  1440. */
  1441. if (IS_ENABLED(CONFIG_TIME_LOW_RES))
  1442. timer->is_rel = false;
  1443. /*
  1444. * The timer is marked as running in the CPU base, so it is
  1445. * protected against migration to a different CPU even if the lock
  1446. * is dropped.
  1447. */
  1448. raw_spin_unlock_irqrestore(&cpu_base->lock, flags);
  1449. trace_hrtimer_expire_entry(timer, now);
  1450. expires_in_hardirq = lockdep_hrtimer_enter(timer);
  1451. restart = fn(timer);
  1452. lockdep_hrtimer_exit(expires_in_hardirq);
  1453. trace_hrtimer_expire_exit(timer);
  1454. raw_spin_lock_irq(&cpu_base->lock);
  1455. /*
  1456. * Note: We clear the running state after enqueue_hrtimer and
  1457. * we do not reprogram the event hardware. Happens either in
  1458. * hrtimer_start_range_ns() or in hrtimer_interrupt()
  1459. *
  1460. * Note: Because we dropped the cpu_base->lock above,
  1461. * hrtimer_start_range_ns() can have popped in and enqueued the timer
  1462. * for us already.
  1463. */
  1464. if (restart != HRTIMER_NORESTART &&
  1465. !(timer->state & HRTIMER_STATE_ENQUEUED))
  1466. enqueue_hrtimer(timer, base, HRTIMER_MODE_ABS);
  1467. /*
  1468. * Separate the ->running assignment from the ->state assignment.
  1469. *
  1470. * As with a regular write barrier, this ensures the read side in
  1471. * hrtimer_active() cannot observe base->running.timer == NULL &&
  1472. * timer->state == INACTIVE.
  1473. */
  1474. raw_write_seqcount_barrier(&base->seq);
  1475. WARN_ON_ONCE(base->running != timer);
  1476. base->running = NULL;
  1477. }
  1478. static void __hrtimer_run_queues(struct hrtimer_cpu_base *cpu_base, ktime_t now,
  1479. unsigned long flags, unsigned int active_mask)
  1480. {
  1481. struct hrtimer_clock_base *base;
  1482. unsigned int active = cpu_base->active_bases & active_mask;
  1483. for_each_active_base(base, cpu_base, active) {
  1484. struct timerqueue_node *node;
  1485. ktime_t basenow;
  1486. basenow = ktime_add(now, base->offset);
  1487. while ((node = timerqueue_getnext(&base->active))) {
  1488. struct hrtimer *timer;
  1489. timer = container_of(node, struct hrtimer, node);
  1490. /*
  1491. * The immediate goal for using the softexpires is
  1492. * minimizing wakeups, not running timers at the
  1493. * earliest interrupt after their soft expiration.
  1494. * This allows us to avoid using a Priority Search
  1495. * Tree, which can answer a stabbing query for
  1496. * overlapping intervals and instead use the simple
  1497. * BST we already have.
  1498. * We don't add extra wakeups by delaying timers that
  1499. * are right-of a not yet expired timer, because that
  1500. * timer will have to trigger a wakeup anyway.
  1501. */
  1502. if (basenow < hrtimer_get_softexpires_tv64(timer))
  1503. break;
  1504. __run_hrtimer(cpu_base, base, timer, &basenow, flags);
  1505. if (active_mask == HRTIMER_ACTIVE_SOFT)
  1506. hrtimer_sync_wait_running(cpu_base, flags);
  1507. }
  1508. }
  1509. }
  1510. static __latent_entropy void hrtimer_run_softirq(struct softirq_action *h)
  1511. {
  1512. struct hrtimer_cpu_base *cpu_base = this_cpu_ptr(&hrtimer_bases);
  1513. unsigned long flags;
  1514. ktime_t now;
  1515. hrtimer_cpu_base_lock_expiry(cpu_base);
  1516. raw_spin_lock_irqsave(&cpu_base->lock, flags);
  1517. now = hrtimer_update_base(cpu_base);
  1518. __hrtimer_run_queues(cpu_base, now, flags, HRTIMER_ACTIVE_SOFT);
  1519. cpu_base->softirq_activated = 0;
  1520. hrtimer_update_softirq_timer(cpu_base, true);
  1521. raw_spin_unlock_irqrestore(&cpu_base->lock, flags);
  1522. hrtimer_cpu_base_unlock_expiry(cpu_base);
  1523. }
  1524. #ifdef CONFIG_HIGH_RES_TIMERS
  1525. /*
  1526. * High resolution timer interrupt
  1527. * Called with interrupts disabled
  1528. */
  1529. void hrtimer_interrupt(struct clock_event_device *dev)
  1530. {
  1531. struct hrtimer_cpu_base *cpu_base = this_cpu_ptr(&hrtimer_bases);
  1532. ktime_t expires_next, now, entry_time, delta;
  1533. unsigned long flags;
  1534. int retries = 0;
  1535. BUG_ON(!cpu_base->hres_active);
  1536. cpu_base->nr_events++;
  1537. dev->next_event = KTIME_MAX;
  1538. raw_spin_lock_irqsave(&cpu_base->lock, flags);
  1539. entry_time = now = hrtimer_update_base(cpu_base);
  1540. retry:
  1541. cpu_base->in_hrtirq = 1;
  1542. /*
  1543. * We set expires_next to KTIME_MAX here with cpu_base->lock
  1544. * held to prevent that a timer is enqueued in our queue via
  1545. * the migration code. This does not affect enqueueing of
  1546. * timers which run their callback and need to be requeued on
  1547. * this CPU.
  1548. */
  1549. cpu_base->expires_next = KTIME_MAX;
  1550. if (!ktime_before(now, cpu_base->softirq_expires_next)) {
  1551. cpu_base->softirq_expires_next = KTIME_MAX;
  1552. cpu_base->softirq_activated = 1;
  1553. raise_softirq_irqoff(HRTIMER_SOFTIRQ);
  1554. }
  1555. __hrtimer_run_queues(cpu_base, now, flags, HRTIMER_ACTIVE_HARD);
  1556. /* Reevaluate the clock bases for the [soft] next expiry */
  1557. expires_next = hrtimer_update_next_event(cpu_base);
  1558. /*
  1559. * Store the new expiry value so the migration code can verify
  1560. * against it.
  1561. */
  1562. cpu_base->expires_next = expires_next;
  1563. cpu_base->in_hrtirq = 0;
  1564. raw_spin_unlock_irqrestore(&cpu_base->lock, flags);
  1565. /* Reprogramming necessary ? */
  1566. if (!tick_program_event(expires_next, 0)) {
  1567. cpu_base->hang_detected = 0;
  1568. return;
  1569. }
  1570. /*
  1571. * The next timer was already expired due to:
  1572. * - tracing
  1573. * - long lasting callbacks
  1574. * - being scheduled away when running in a VM
  1575. *
  1576. * We need to prevent that we loop forever in the hrtimer
  1577. * interrupt routine. We give it 3 attempts to avoid
  1578. * overreacting on some spurious event.
  1579. *
  1580. * Acquire base lock for updating the offsets and retrieving
  1581. * the current time.
  1582. */
  1583. raw_spin_lock_irqsave(&cpu_base->lock, flags);
  1584. now = hrtimer_update_base(cpu_base);
  1585. cpu_base->nr_retries++;
  1586. if (++retries < 3)
  1587. goto retry;
  1588. /*
  1589. * Give the system a chance to do something else than looping
  1590. * here. We stored the entry time, so we know exactly how long
  1591. * we spent here. We schedule the next event this amount of
  1592. * time away.
  1593. */
  1594. cpu_base->nr_hangs++;
  1595. cpu_base->hang_detected = 1;
  1596. raw_spin_unlock_irqrestore(&cpu_base->lock, flags);
  1597. delta = ktime_sub(now, entry_time);
  1598. if ((unsigned int)delta > cpu_base->max_hang_time)
  1599. cpu_base->max_hang_time = (unsigned int) delta;
  1600. /*
  1601. * Limit it to a sensible value as we enforce a longer
  1602. * delay. Give the CPU at least 100ms to catch up.
  1603. */
  1604. if (delta > 100 * NSEC_PER_MSEC)
  1605. expires_next = ktime_add_ns(now, 100 * NSEC_PER_MSEC);
  1606. else
  1607. expires_next = ktime_add(now, delta);
  1608. tick_program_event(expires_next, 1);
  1609. pr_warn_once("hrtimer: interrupt took %llu ns\n", ktime_to_ns(delta));
  1610. }
  1611. /* called with interrupts disabled */
  1612. static inline void __hrtimer_peek_ahead_timers(void)
  1613. {
  1614. struct tick_device *td;
  1615. if (!hrtimer_hres_active())
  1616. return;
  1617. td = this_cpu_ptr(&tick_cpu_device);
  1618. if (td && td->evtdev)
  1619. hrtimer_interrupt(td->evtdev);
  1620. }
  1621. #else /* CONFIG_HIGH_RES_TIMERS */
  1622. static inline void __hrtimer_peek_ahead_timers(void) { }
  1623. #endif /* !CONFIG_HIGH_RES_TIMERS */
  1624. /*
  1625. * Called from run_local_timers in hardirq context every jiffy
  1626. */
  1627. void hrtimer_run_queues(void)
  1628. {
  1629. struct hrtimer_cpu_base *cpu_base = this_cpu_ptr(&hrtimer_bases);
  1630. unsigned long flags;
  1631. ktime_t now;
  1632. if (__hrtimer_hres_active(cpu_base))
  1633. return;
  1634. /*
  1635. * This _is_ ugly: We have to check periodically, whether we
  1636. * can switch to highres and / or nohz mode. The clocksource
  1637. * switch happens with xtime_lock held. Notification from
  1638. * there only sets the check bit in the tick_oneshot code,
  1639. * otherwise we might deadlock vs. xtime_lock.
  1640. */
  1641. if (tick_check_oneshot_change(!hrtimer_is_hres_enabled())) {
  1642. hrtimer_switch_to_hres();
  1643. return;
  1644. }
  1645. raw_spin_lock_irqsave(&cpu_base->lock, flags);
  1646. now = hrtimer_update_base(cpu_base);
  1647. if (!ktime_before(now, cpu_base->softirq_expires_next)) {
  1648. cpu_base->softirq_expires_next = KTIME_MAX;
  1649. cpu_base->softirq_activated = 1;
  1650. raise_softirq_irqoff(HRTIMER_SOFTIRQ);
  1651. }
  1652. __hrtimer_run_queues(cpu_base, now, flags, HRTIMER_ACTIVE_HARD);
  1653. raw_spin_unlock_irqrestore(&cpu_base->lock, flags);
  1654. }
  1655. /*
  1656. * Sleep related functions:
  1657. */
  1658. static enum hrtimer_restart hrtimer_wakeup(struct hrtimer *timer)
  1659. {
  1660. struct hrtimer_sleeper *t =
  1661. container_of(timer, struct hrtimer_sleeper, timer);
  1662. struct task_struct *task = t->task;
  1663. t->task = NULL;
  1664. if (task)
  1665. wake_up_process(task);
  1666. return HRTIMER_NORESTART;
  1667. }
  1668. /**
  1669. * hrtimer_sleeper_start_expires - Start a hrtimer sleeper timer
  1670. * @sl: sleeper to be started
  1671. * @mode: timer mode abs/rel
  1672. *
  1673. * Wrapper around hrtimer_start_expires() for hrtimer_sleeper based timers
  1674. * to allow PREEMPT_RT to tweak the delivery mode (soft/hardirq context)
  1675. */
  1676. void hrtimer_sleeper_start_expires(struct hrtimer_sleeper *sl,
  1677. enum hrtimer_mode mode)
  1678. {
  1679. /*
  1680. * Make the enqueue delivery mode check work on RT. If the sleeper
  1681. * was initialized for hard interrupt delivery, force the mode bit.
  1682. * This is a special case for hrtimer_sleepers because
  1683. * hrtimer_init_sleeper() determines the delivery mode on RT so the
  1684. * fiddling with this decision is avoided at the call sites.
  1685. */
  1686. if (IS_ENABLED(CONFIG_PREEMPT_RT) && sl->timer.is_hard)
  1687. mode |= HRTIMER_MODE_HARD;
  1688. hrtimer_start_expires(&sl->timer, mode);
  1689. }
  1690. EXPORT_SYMBOL_GPL(hrtimer_sleeper_start_expires);
  1691. static void __hrtimer_init_sleeper(struct hrtimer_sleeper *sl,
  1692. clockid_t clock_id, enum hrtimer_mode mode)
  1693. {
  1694. /*
  1695. * On PREEMPT_RT enabled kernels hrtimers which are not explicitly
  1696. * marked for hard interrupt expiry mode are moved into soft
  1697. * interrupt context either for latency reasons or because the
  1698. * hrtimer callback takes regular spinlocks or invokes other
  1699. * functions which are not suitable for hard interrupt context on
  1700. * PREEMPT_RT.
  1701. *
  1702. * The hrtimer_sleeper callback is RT compatible in hard interrupt
  1703. * context, but there is a latency concern: Untrusted userspace can
  1704. * spawn many threads which arm timers for the same expiry time on
  1705. * the same CPU. That causes a latency spike due to the wakeup of
  1706. * a gazillion threads.
  1707. *
  1708. * OTOH, privileged real-time user space applications rely on the
  1709. * low latency of hard interrupt wakeups. If the current task is in
  1710. * a real-time scheduling class, mark the mode for hard interrupt
  1711. * expiry.
  1712. */
  1713. if (IS_ENABLED(CONFIG_PREEMPT_RT)) {
  1714. if (task_is_realtime(current) && !(mode & HRTIMER_MODE_SOFT))
  1715. mode |= HRTIMER_MODE_HARD;
  1716. }
  1717. __hrtimer_init(&sl->timer, clock_id, mode);
  1718. sl->timer.function = hrtimer_wakeup;
  1719. sl->task = current;
  1720. }
  1721. /**
  1722. * hrtimer_init_sleeper - initialize sleeper to the given clock
  1723. * @sl: sleeper to be initialized
  1724. * @clock_id: the clock to be used
  1725. * @mode: timer mode abs/rel
  1726. */
  1727. void hrtimer_init_sleeper(struct hrtimer_sleeper *sl, clockid_t clock_id,
  1728. enum hrtimer_mode mode)
  1729. {
  1730. debug_init(&sl->timer, clock_id, mode);
  1731. __hrtimer_init_sleeper(sl, clock_id, mode);
  1732. }
  1733. EXPORT_SYMBOL_GPL(hrtimer_init_sleeper);
  1734. int nanosleep_copyout(struct restart_block *restart, struct timespec64 *ts)
  1735. {
  1736. switch(restart->nanosleep.type) {
  1737. #ifdef CONFIG_COMPAT_32BIT_TIME
  1738. case TT_COMPAT:
  1739. if (put_old_timespec32(ts, restart->nanosleep.compat_rmtp))
  1740. return -EFAULT;
  1741. break;
  1742. #endif
  1743. case TT_NATIVE:
  1744. if (put_timespec64(ts, restart->nanosleep.rmtp))
  1745. return -EFAULT;
  1746. break;
  1747. default:
  1748. BUG();
  1749. }
  1750. return -ERESTART_RESTARTBLOCK;
  1751. }
  1752. static int __sched do_nanosleep(struct hrtimer_sleeper *t, enum hrtimer_mode mode)
  1753. {
  1754. struct restart_block *restart;
  1755. do {
  1756. set_current_state(TASK_INTERRUPTIBLE|TASK_FREEZABLE);
  1757. hrtimer_sleeper_start_expires(t, mode);
  1758. if (likely(t->task))
  1759. schedule();
  1760. hrtimer_cancel(&t->timer);
  1761. mode = HRTIMER_MODE_ABS;
  1762. } while (t->task && !signal_pending(current));
  1763. __set_current_state(TASK_RUNNING);
  1764. if (!t->task)
  1765. return 0;
  1766. restart = &current->restart_block;
  1767. if (restart->nanosleep.type != TT_NONE) {
  1768. ktime_t rem = hrtimer_expires_remaining(&t->timer);
  1769. struct timespec64 rmt;
  1770. if (rem <= 0)
  1771. return 0;
  1772. rmt = ktime_to_timespec64(rem);
  1773. return nanosleep_copyout(restart, &rmt);
  1774. }
  1775. return -ERESTART_RESTARTBLOCK;
  1776. }
  1777. static long __sched hrtimer_nanosleep_restart(struct restart_block *restart)
  1778. {
  1779. struct hrtimer_sleeper t;
  1780. int ret;
  1781. hrtimer_init_sleeper_on_stack(&t, restart->nanosleep.clockid,
  1782. HRTIMER_MODE_ABS);
  1783. hrtimer_set_expires_tv64(&t.timer, restart->nanosleep.expires);
  1784. ret = do_nanosleep(&t, HRTIMER_MODE_ABS);
  1785. destroy_hrtimer_on_stack(&t.timer);
  1786. return ret;
  1787. }
  1788. long hrtimer_nanosleep(ktime_t rqtp, const enum hrtimer_mode mode,
  1789. const clockid_t clockid)
  1790. {
  1791. struct restart_block *restart;
  1792. struct hrtimer_sleeper t;
  1793. int ret = 0;
  1794. u64 slack;
  1795. slack = current->timer_slack_ns;
  1796. if (dl_task(current) || rt_task(current))
  1797. slack = 0;
  1798. hrtimer_init_sleeper_on_stack(&t, clockid, mode);
  1799. hrtimer_set_expires_range_ns(&t.timer, rqtp, slack);
  1800. ret = do_nanosleep(&t, mode);
  1801. if (ret != -ERESTART_RESTARTBLOCK)
  1802. goto out;
  1803. /* Absolute timers do not update the rmtp value and restart: */
  1804. if (mode == HRTIMER_MODE_ABS) {
  1805. ret = -ERESTARTNOHAND;
  1806. goto out;
  1807. }
  1808. restart = &current->restart_block;
  1809. restart->nanosleep.clockid = t.timer.base->clockid;
  1810. restart->nanosleep.expires = hrtimer_get_expires_tv64(&t.timer);
  1811. set_restart_fn(restart, hrtimer_nanosleep_restart);
  1812. out:
  1813. destroy_hrtimer_on_stack(&t.timer);
  1814. return ret;
  1815. }
  1816. #ifdef CONFIG_64BIT
  1817. SYSCALL_DEFINE2(nanosleep, struct __kernel_timespec __user *, rqtp,
  1818. struct __kernel_timespec __user *, rmtp)
  1819. {
  1820. struct timespec64 tu;
  1821. if (get_timespec64(&tu, rqtp))
  1822. return -EFAULT;
  1823. if (!timespec64_valid(&tu))
  1824. return -EINVAL;
  1825. current->restart_block.fn = do_no_restart_syscall;
  1826. current->restart_block.nanosleep.type = rmtp ? TT_NATIVE : TT_NONE;
  1827. current->restart_block.nanosleep.rmtp = rmtp;
  1828. return hrtimer_nanosleep(timespec64_to_ktime(tu), HRTIMER_MODE_REL,
  1829. CLOCK_MONOTONIC);
  1830. }
  1831. #endif
  1832. #ifdef CONFIG_COMPAT_32BIT_TIME
  1833. SYSCALL_DEFINE2(nanosleep_time32, struct old_timespec32 __user *, rqtp,
  1834. struct old_timespec32 __user *, rmtp)
  1835. {
  1836. struct timespec64 tu;
  1837. if (get_old_timespec32(&tu, rqtp))
  1838. return -EFAULT;
  1839. if (!timespec64_valid(&tu))
  1840. return -EINVAL;
  1841. current->restart_block.fn = do_no_restart_syscall;
  1842. current->restart_block.nanosleep.type = rmtp ? TT_COMPAT : TT_NONE;
  1843. current->restart_block.nanosleep.compat_rmtp = rmtp;
  1844. return hrtimer_nanosleep(timespec64_to_ktime(tu), HRTIMER_MODE_REL,
  1845. CLOCK_MONOTONIC);
  1846. }
  1847. #endif
  1848. /*
  1849. * Functions related to boot-time initialization:
  1850. */
  1851. int hrtimers_prepare_cpu(unsigned int cpu)
  1852. {
  1853. struct hrtimer_cpu_base *cpu_base = &per_cpu(hrtimer_bases, cpu);
  1854. int i;
  1855. for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) {
  1856. struct hrtimer_clock_base *clock_b = &cpu_base->clock_base[i];
  1857. clock_b->cpu_base = cpu_base;
  1858. seqcount_raw_spinlock_init(&clock_b->seq, &cpu_base->lock);
  1859. timerqueue_init_head(&clock_b->active);
  1860. }
  1861. cpu_base->cpu = cpu;
  1862. cpu_base->active_bases = 0;
  1863. cpu_base->hres_active = 0;
  1864. cpu_base->hang_detected = 0;
  1865. cpu_base->next_timer = NULL;
  1866. cpu_base->softirq_next_timer = NULL;
  1867. cpu_base->expires_next = KTIME_MAX;
  1868. cpu_base->softirq_expires_next = KTIME_MAX;
  1869. hrtimer_cpu_base_init_expiry_lock(cpu_base);
  1870. return 0;
  1871. }
  1872. #ifdef CONFIG_HOTPLUG_CPU
  1873. static void migrate_hrtimer_list(struct hrtimer_clock_base *old_base,
  1874. struct hrtimer_clock_base *new_base)
  1875. {
  1876. struct hrtimer *timer;
  1877. struct timerqueue_node *node;
  1878. while ((node = timerqueue_getnext(&old_base->active))) {
  1879. timer = container_of(node, struct hrtimer, node);
  1880. BUG_ON(hrtimer_callback_running(timer));
  1881. debug_deactivate(timer);
  1882. /*
  1883. * Mark it as ENQUEUED not INACTIVE otherwise the
  1884. * timer could be seen as !active and just vanish away
  1885. * under us on another CPU
  1886. */
  1887. __remove_hrtimer(timer, old_base, HRTIMER_STATE_ENQUEUED, 0);
  1888. timer->base = new_base;
  1889. /*
  1890. * Enqueue the timers on the new cpu. This does not
  1891. * reprogram the event device in case the timer
  1892. * expires before the earliest on this CPU, but we run
  1893. * hrtimer_interrupt after we migrated everything to
  1894. * sort out already expired timers and reprogram the
  1895. * event device.
  1896. */
  1897. enqueue_hrtimer(timer, new_base, HRTIMER_MODE_ABS);
  1898. }
  1899. }
  1900. int hrtimers_dead_cpu(unsigned int scpu)
  1901. {
  1902. struct hrtimer_cpu_base *old_base, *new_base;
  1903. int i;
  1904. BUG_ON(cpu_online(scpu));
  1905. tick_cancel_sched_timer(scpu);
  1906. /*
  1907. * this BH disable ensures that raise_softirq_irqoff() does
  1908. * not wakeup ksoftirqd (and acquire the pi-lock) while
  1909. * holding the cpu_base lock
  1910. */
  1911. local_bh_disable();
  1912. local_irq_disable();
  1913. old_base = &per_cpu(hrtimer_bases, scpu);
  1914. new_base = this_cpu_ptr(&hrtimer_bases);
  1915. /*
  1916. * The caller is globally serialized and nobody else
  1917. * takes two locks at once, deadlock is not possible.
  1918. */
  1919. raw_spin_lock(&new_base->lock);
  1920. raw_spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING);
  1921. for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) {
  1922. migrate_hrtimer_list(&old_base->clock_base[i],
  1923. &new_base->clock_base[i]);
  1924. }
  1925. /*
  1926. * The migration might have changed the first expiring softirq
  1927. * timer on this CPU. Update it.
  1928. */
  1929. hrtimer_update_softirq_timer(new_base, false);
  1930. raw_spin_unlock(&old_base->lock);
  1931. raw_spin_unlock(&new_base->lock);
  1932. /* Check, if we got expired work to do */
  1933. __hrtimer_peek_ahead_timers();
  1934. local_irq_enable();
  1935. local_bh_enable();
  1936. return 0;
  1937. }
  1938. #endif /* CONFIG_HOTPLUG_CPU */
  1939. void __init hrtimers_init(void)
  1940. {
  1941. hrtimers_prepare_cpu(smp_processor_id());
  1942. open_softirq(HRTIMER_SOFTIRQ, hrtimer_run_softirq);
  1943. }
  1944. /**
  1945. * schedule_hrtimeout_range_clock - sleep until timeout
  1946. * @expires: timeout value (ktime_t)
  1947. * @delta: slack in expires timeout (ktime_t)
  1948. * @mode: timer mode
  1949. * @clock_id: timer clock to be used
  1950. */
  1951. int __sched
  1952. schedule_hrtimeout_range_clock(ktime_t *expires, u64 delta,
  1953. const enum hrtimer_mode mode, clockid_t clock_id)
  1954. {
  1955. struct hrtimer_sleeper t;
  1956. /*
  1957. * Optimize when a zero timeout value is given. It does not
  1958. * matter whether this is an absolute or a relative time.
  1959. */
  1960. if (expires && *expires == 0) {
  1961. __set_current_state(TASK_RUNNING);
  1962. return 0;
  1963. }
  1964. /*
  1965. * A NULL parameter means "infinite"
  1966. */
  1967. if (!expires) {
  1968. schedule();
  1969. return -EINTR;
  1970. }
  1971. hrtimer_init_sleeper_on_stack(&t, clock_id, mode);
  1972. hrtimer_set_expires_range_ns(&t.timer, *expires, delta);
  1973. hrtimer_sleeper_start_expires(&t, mode);
  1974. if (likely(t.task))
  1975. schedule();
  1976. hrtimer_cancel(&t.timer);
  1977. destroy_hrtimer_on_stack(&t.timer);
  1978. __set_current_state(TASK_RUNNING);
  1979. return !t.task ? 0 : -EINTR;
  1980. }
  1981. EXPORT_SYMBOL_GPL(schedule_hrtimeout_range_clock);
  1982. /**
  1983. * schedule_hrtimeout_range - sleep until timeout
  1984. * @expires: timeout value (ktime_t)
  1985. * @delta: slack in expires timeout (ktime_t)
  1986. * @mode: timer mode
  1987. *
  1988. * Make the current task sleep until the given expiry time has
  1989. * elapsed. The routine will return immediately unless
  1990. * the current task state has been set (see set_current_state()).
  1991. *
  1992. * The @delta argument gives the kernel the freedom to schedule the
  1993. * actual wakeup to a time that is both power and performance friendly.
  1994. * The kernel give the normal best effort behavior for "@expires+@delta",
  1995. * but may decide to fire the timer earlier, but no earlier than @expires.
  1996. *
  1997. * You can set the task state as follows -
  1998. *
  1999. * %TASK_UNINTERRUPTIBLE - at least @timeout time is guaranteed to
  2000. * pass before the routine returns unless the current task is explicitly
  2001. * woken up, (e.g. by wake_up_process()).
  2002. *
  2003. * %TASK_INTERRUPTIBLE - the routine may return early if a signal is
  2004. * delivered to the current task or the current task is explicitly woken
  2005. * up.
  2006. *
  2007. * The current task state is guaranteed to be TASK_RUNNING when this
  2008. * routine returns.
  2009. *
  2010. * Returns 0 when the timer has expired. If the task was woken before the
  2011. * timer expired by a signal (only possible in state TASK_INTERRUPTIBLE) or
  2012. * by an explicit wakeup, it returns -EINTR.
  2013. */
  2014. int __sched schedule_hrtimeout_range(ktime_t *expires, u64 delta,
  2015. const enum hrtimer_mode mode)
  2016. {
  2017. return schedule_hrtimeout_range_clock(expires, delta, mode,
  2018. CLOCK_MONOTONIC);
  2019. }
  2020. EXPORT_SYMBOL_GPL(schedule_hrtimeout_range);
  2021. /**
  2022. * schedule_hrtimeout - sleep until timeout
  2023. * @expires: timeout value (ktime_t)
  2024. * @mode: timer mode
  2025. *
  2026. * Make the current task sleep until the given expiry time has
  2027. * elapsed. The routine will return immediately unless
  2028. * the current task state has been set (see set_current_state()).
  2029. *
  2030. * You can set the task state as follows -
  2031. *
  2032. * %TASK_UNINTERRUPTIBLE - at least @timeout time is guaranteed to
  2033. * pass before the routine returns unless the current task is explicitly
  2034. * woken up, (e.g. by wake_up_process()).
  2035. *
  2036. * %TASK_INTERRUPTIBLE - the routine may return early if a signal is
  2037. * delivered to the current task or the current task is explicitly woken
  2038. * up.
  2039. *
  2040. * The current task state is guaranteed to be TASK_RUNNING when this
  2041. * routine returns.
  2042. *
  2043. * Returns 0 when the timer has expired. If the task was woken before the
  2044. * timer expired by a signal (only possible in state TASK_INTERRUPTIBLE) or
  2045. * by an explicit wakeup, it returns -EINTR.
  2046. */
  2047. int __sched schedule_hrtimeout(ktime_t *expires,
  2048. const enum hrtimer_mode mode)
  2049. {
  2050. return schedule_hrtimeout_range(expires, 0, mode);
  2051. }
  2052. EXPORT_SYMBOL_GPL(schedule_hrtimeout);