arm_arch_timer.c 46 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * linux/drivers/clocksource/arm_arch_timer.c
  4. *
  5. * Copyright (C) 2011 ARM Ltd.
  6. * All Rights Reserved
  7. */
  8. #define pr_fmt(fmt) "arch_timer: " fmt
  9. #include <linux/init.h>
  10. #include <linux/kernel.h>
  11. #include <linux/device.h>
  12. #include <linux/smp.h>
  13. #include <linux/cpu.h>
  14. #include <linux/cpu_pm.h>
  15. #include <linux/clockchips.h>
  16. #include <linux/clocksource.h>
  17. #include <linux/clocksource_ids.h>
  18. #include <linux/interrupt.h>
  19. #include <linux/of_irq.h>
  20. #include <linux/of_address.h>
  21. #include <linux/io.h>
  22. #include <linux/slab.h>
  23. #include <linux/sched/clock.h>
  24. #include <linux/sched_clock.h>
  25. #include <linux/acpi.h>
  26. #include <linux/arm-smccc.h>
  27. #include <linux/ptp_kvm.h>
  28. #include <asm/arch_timer.h>
  29. #include <asm/virt.h>
  30. #include <clocksource/arm_arch_timer.h>
  31. #define CNTTIDR 0x08
  32. #define CNTTIDR_VIRT(n) (BIT(1) << ((n) * 4))
  33. #define CNTACR(n) (0x40 + ((n) * 4))
  34. #define CNTACR_RPCT BIT(0)
  35. #define CNTACR_RVCT BIT(1)
  36. #define CNTACR_RFRQ BIT(2)
  37. #define CNTACR_RVOFF BIT(3)
  38. #define CNTACR_RWVT BIT(4)
  39. #define CNTACR_RWPT BIT(5)
  40. #define CNTPCT_LO 0x00
  41. #define CNTVCT_LO 0x08
  42. #define CNTFRQ 0x10
  43. #define CNTP_CVAL_LO 0x20
  44. #define CNTP_CTL 0x2c
  45. #define CNTV_CVAL_LO 0x30
  46. #define CNTV_CTL 0x3c
  47. /*
  48. * The minimum amount of time a generic counter is guaranteed to not roll over
  49. * (40 years)
  50. */
  51. #define MIN_ROLLOVER_SECS (40ULL * 365 * 24 * 3600)
  52. static unsigned arch_timers_present __initdata;
  53. struct arch_timer {
  54. void __iomem *base;
  55. struct clock_event_device evt;
  56. };
  57. static struct arch_timer *arch_timer_mem __ro_after_init;
  58. #define to_arch_timer(e) container_of(e, struct arch_timer, evt)
  59. static u32 arch_timer_rate __ro_after_init;
  60. static int arch_timer_ppi[ARCH_TIMER_MAX_TIMER_PPI] __ro_after_init;
  61. static const char *arch_timer_ppi_names[ARCH_TIMER_MAX_TIMER_PPI] = {
  62. [ARCH_TIMER_PHYS_SECURE_PPI] = "sec-phys",
  63. [ARCH_TIMER_PHYS_NONSECURE_PPI] = "phys",
  64. [ARCH_TIMER_VIRT_PPI] = "virt",
  65. [ARCH_TIMER_HYP_PPI] = "hyp-phys",
  66. [ARCH_TIMER_HYP_VIRT_PPI] = "hyp-virt",
  67. };
  68. static struct clock_event_device __percpu *arch_timer_evt;
  69. static enum arch_timer_ppi_nr arch_timer_uses_ppi __ro_after_init = ARCH_TIMER_VIRT_PPI;
  70. static bool arch_timer_c3stop __ro_after_init;
  71. static bool arch_timer_mem_use_virtual __ro_after_init;
  72. static bool arch_counter_suspend_stop __ro_after_init;
  73. #ifdef CONFIG_GENERIC_GETTIMEOFDAY
  74. static enum vdso_clock_mode vdso_default = VDSO_CLOCKMODE_ARCHTIMER;
  75. #else
  76. static enum vdso_clock_mode vdso_default = VDSO_CLOCKMODE_NONE;
  77. #endif /* CONFIG_GENERIC_GETTIMEOFDAY */
  78. static cpumask_t evtstrm_available = CPU_MASK_NONE;
  79. static bool evtstrm_enable __ro_after_init = IS_ENABLED(CONFIG_ARM_ARCH_TIMER_EVTSTREAM);
  80. static int __init early_evtstrm_cfg(char *buf)
  81. {
  82. return strtobool(buf, &evtstrm_enable);
  83. }
  84. early_param("clocksource.arm_arch_timer.evtstrm", early_evtstrm_cfg);
  85. /*
  86. * Makes an educated guess at a valid counter width based on the Generic Timer
  87. * specification. Of note:
  88. * 1) the system counter is at least 56 bits wide
  89. * 2) a roll-over time of not less than 40 years
  90. *
  91. * See 'ARM DDI 0487G.a D11.1.2 ("The system counter")' for more details.
  92. */
  93. static int arch_counter_get_width(void)
  94. {
  95. u64 min_cycles = MIN_ROLLOVER_SECS * arch_timer_rate;
  96. /* guarantee the returned width is within the valid range */
  97. return clamp_val(ilog2(min_cycles - 1) + 1, 56, 64);
  98. }
  99. /*
  100. * Architected system timer support.
  101. */
  102. static __always_inline
  103. void arch_timer_reg_write(int access, enum arch_timer_reg reg, u64 val,
  104. struct clock_event_device *clk)
  105. {
  106. if (access == ARCH_TIMER_MEM_PHYS_ACCESS) {
  107. struct arch_timer *timer = to_arch_timer(clk);
  108. switch (reg) {
  109. case ARCH_TIMER_REG_CTRL:
  110. writel_relaxed((u32)val, timer->base + CNTP_CTL);
  111. break;
  112. case ARCH_TIMER_REG_CVAL:
  113. /*
  114. * Not guaranteed to be atomic, so the timer
  115. * must be disabled at this point.
  116. */
  117. writeq_relaxed(val, timer->base + CNTP_CVAL_LO);
  118. break;
  119. default:
  120. BUILD_BUG();
  121. }
  122. } else if (access == ARCH_TIMER_MEM_VIRT_ACCESS) {
  123. struct arch_timer *timer = to_arch_timer(clk);
  124. switch (reg) {
  125. case ARCH_TIMER_REG_CTRL:
  126. writel_relaxed((u32)val, timer->base + CNTV_CTL);
  127. break;
  128. case ARCH_TIMER_REG_CVAL:
  129. /* Same restriction as above */
  130. writeq_relaxed(val, timer->base + CNTV_CVAL_LO);
  131. break;
  132. default:
  133. BUILD_BUG();
  134. }
  135. } else {
  136. arch_timer_reg_write_cp15(access, reg, val);
  137. }
  138. }
  139. static __always_inline
  140. u32 arch_timer_reg_read(int access, enum arch_timer_reg reg,
  141. struct clock_event_device *clk)
  142. {
  143. u32 val;
  144. if (access == ARCH_TIMER_MEM_PHYS_ACCESS) {
  145. struct arch_timer *timer = to_arch_timer(clk);
  146. switch (reg) {
  147. case ARCH_TIMER_REG_CTRL:
  148. val = readl_relaxed(timer->base + CNTP_CTL);
  149. break;
  150. default:
  151. BUILD_BUG();
  152. }
  153. } else if (access == ARCH_TIMER_MEM_VIRT_ACCESS) {
  154. struct arch_timer *timer = to_arch_timer(clk);
  155. switch (reg) {
  156. case ARCH_TIMER_REG_CTRL:
  157. val = readl_relaxed(timer->base + CNTV_CTL);
  158. break;
  159. default:
  160. BUILD_BUG();
  161. }
  162. } else {
  163. val = arch_timer_reg_read_cp15(access, reg);
  164. }
  165. return val;
  166. }
  167. static notrace u64 arch_counter_get_cntpct_stable(void)
  168. {
  169. return __arch_counter_get_cntpct_stable();
  170. }
  171. static notrace u64 arch_counter_get_cntpct(void)
  172. {
  173. return __arch_counter_get_cntpct();
  174. }
  175. static notrace u64 arch_counter_get_cntvct_stable(void)
  176. {
  177. return __arch_counter_get_cntvct_stable();
  178. }
  179. static notrace u64 arch_counter_get_cntvct(void)
  180. {
  181. return __arch_counter_get_cntvct();
  182. }
  183. /*
  184. * Default to cp15 based access because arm64 uses this function for
  185. * sched_clock() before DT is probed and the cp15 method is guaranteed
  186. * to exist on arm64. arm doesn't use this before DT is probed so even
  187. * if we don't have the cp15 accessors we won't have a problem.
  188. */
  189. u64 (*arch_timer_read_counter)(void) __ro_after_init = arch_counter_get_cntvct;
  190. EXPORT_SYMBOL_GPL(arch_timer_read_counter);
  191. static u64 arch_counter_read(struct clocksource *cs)
  192. {
  193. return arch_timer_read_counter();
  194. }
  195. static u64 arch_counter_read_cc(const struct cyclecounter *cc)
  196. {
  197. return arch_timer_read_counter();
  198. }
  199. static struct clocksource clocksource_counter = {
  200. .name = "arch_sys_counter",
  201. .id = CSID_ARM_ARCH_COUNTER,
  202. .rating = 400,
  203. .read = arch_counter_read,
  204. .flags = CLOCK_SOURCE_IS_CONTINUOUS,
  205. };
  206. static struct cyclecounter cyclecounter __ro_after_init = {
  207. .read = arch_counter_read_cc,
  208. };
  209. struct ate_acpi_oem_info {
  210. char oem_id[ACPI_OEM_ID_SIZE + 1];
  211. char oem_table_id[ACPI_OEM_TABLE_ID_SIZE + 1];
  212. u32 oem_revision;
  213. };
  214. #ifdef CONFIG_FSL_ERRATUM_A008585
  215. /*
  216. * The number of retries is an arbitrary value well beyond the highest number
  217. * of iterations the loop has been observed to take.
  218. */
  219. #define __fsl_a008585_read_reg(reg) ({ \
  220. u64 _old, _new; \
  221. int _retries = 200; \
  222. \
  223. do { \
  224. _old = read_sysreg(reg); \
  225. _new = read_sysreg(reg); \
  226. _retries--; \
  227. } while (unlikely(_old != _new) && _retries); \
  228. \
  229. WARN_ON_ONCE(!_retries); \
  230. _new; \
  231. })
  232. static u64 notrace fsl_a008585_read_cntpct_el0(void)
  233. {
  234. return __fsl_a008585_read_reg(cntpct_el0);
  235. }
  236. static u64 notrace fsl_a008585_read_cntvct_el0(void)
  237. {
  238. return __fsl_a008585_read_reg(cntvct_el0);
  239. }
  240. #endif
  241. #ifdef CONFIG_HISILICON_ERRATUM_161010101
  242. /*
  243. * Verify whether the value of the second read is larger than the first by
  244. * less than 32 is the only way to confirm the value is correct, so clear the
  245. * lower 5 bits to check whether the difference is greater than 32 or not.
  246. * Theoretically the erratum should not occur more than twice in succession
  247. * when reading the system counter, but it is possible that some interrupts
  248. * may lead to more than twice read errors, triggering the warning, so setting
  249. * the number of retries far beyond the number of iterations the loop has been
  250. * observed to take.
  251. */
  252. #define __hisi_161010101_read_reg(reg) ({ \
  253. u64 _old, _new; \
  254. int _retries = 50; \
  255. \
  256. do { \
  257. _old = read_sysreg(reg); \
  258. _new = read_sysreg(reg); \
  259. _retries--; \
  260. } while (unlikely((_new - _old) >> 5) && _retries); \
  261. \
  262. WARN_ON_ONCE(!_retries); \
  263. _new; \
  264. })
  265. static u64 notrace hisi_161010101_read_cntpct_el0(void)
  266. {
  267. return __hisi_161010101_read_reg(cntpct_el0);
  268. }
  269. static u64 notrace hisi_161010101_read_cntvct_el0(void)
  270. {
  271. return __hisi_161010101_read_reg(cntvct_el0);
  272. }
  273. static struct ate_acpi_oem_info hisi_161010101_oem_info[] = {
  274. /*
  275. * Note that trailing spaces are required to properly match
  276. * the OEM table information.
  277. */
  278. {
  279. .oem_id = "HISI ",
  280. .oem_table_id = "HIP05 ",
  281. .oem_revision = 0,
  282. },
  283. {
  284. .oem_id = "HISI ",
  285. .oem_table_id = "HIP06 ",
  286. .oem_revision = 0,
  287. },
  288. {
  289. .oem_id = "HISI ",
  290. .oem_table_id = "HIP07 ",
  291. .oem_revision = 0,
  292. },
  293. { /* Sentinel indicating the end of the OEM array */ },
  294. };
  295. #endif
  296. #ifdef CONFIG_ARM64_ERRATUM_858921
  297. static u64 notrace arm64_858921_read_cntpct_el0(void)
  298. {
  299. u64 old, new;
  300. old = read_sysreg(cntpct_el0);
  301. new = read_sysreg(cntpct_el0);
  302. return (((old ^ new) >> 32) & 1) ? old : new;
  303. }
  304. static u64 notrace arm64_858921_read_cntvct_el0(void)
  305. {
  306. u64 old, new;
  307. old = read_sysreg(cntvct_el0);
  308. new = read_sysreg(cntvct_el0);
  309. return (((old ^ new) >> 32) & 1) ? old : new;
  310. }
  311. #endif
  312. #ifdef CONFIG_SUN50I_ERRATUM_UNKNOWN1
  313. /*
  314. * The low bits of the counter registers are indeterminate while bit 10 or
  315. * greater is rolling over. Since the counter value can jump both backward
  316. * (7ff -> 000 -> 800) and forward (7ff -> fff -> 800), ignore register values
  317. * with all ones or all zeros in the low bits. Bound the loop by the maximum
  318. * number of CPU cycles in 3 consecutive 24 MHz counter periods.
  319. */
  320. #define __sun50i_a64_read_reg(reg) ({ \
  321. u64 _val; \
  322. int _retries = 150; \
  323. \
  324. do { \
  325. _val = read_sysreg(reg); \
  326. _retries--; \
  327. } while (((_val + 1) & GENMASK(8, 0)) <= 1 && _retries); \
  328. \
  329. WARN_ON_ONCE(!_retries); \
  330. _val; \
  331. })
  332. static u64 notrace sun50i_a64_read_cntpct_el0(void)
  333. {
  334. return __sun50i_a64_read_reg(cntpct_el0);
  335. }
  336. static u64 notrace sun50i_a64_read_cntvct_el0(void)
  337. {
  338. return __sun50i_a64_read_reg(cntvct_el0);
  339. }
  340. #endif
  341. #ifdef CONFIG_ARM_ARCH_TIMER_OOL_WORKAROUND
  342. DEFINE_PER_CPU(const struct arch_timer_erratum_workaround *, timer_unstable_counter_workaround);
  343. EXPORT_SYMBOL_GPL(timer_unstable_counter_workaround);
  344. static atomic_t timer_unstable_counter_workaround_in_use = ATOMIC_INIT(0);
  345. /*
  346. * Force the inlining of this function so that the register accesses
  347. * can be themselves correctly inlined.
  348. */
  349. static __always_inline
  350. void erratum_set_next_event_generic(const int access, unsigned long evt,
  351. struct clock_event_device *clk)
  352. {
  353. unsigned long ctrl;
  354. u64 cval;
  355. ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL, clk);
  356. ctrl |= ARCH_TIMER_CTRL_ENABLE;
  357. ctrl &= ~ARCH_TIMER_CTRL_IT_MASK;
  358. if (access == ARCH_TIMER_PHYS_ACCESS) {
  359. cval = evt + arch_counter_get_cntpct_stable();
  360. write_sysreg(cval, cntp_cval_el0);
  361. } else {
  362. cval = evt + arch_counter_get_cntvct_stable();
  363. write_sysreg(cval, cntv_cval_el0);
  364. }
  365. arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl, clk);
  366. }
  367. static __maybe_unused int erratum_set_next_event_virt(unsigned long evt,
  368. struct clock_event_device *clk)
  369. {
  370. erratum_set_next_event_generic(ARCH_TIMER_VIRT_ACCESS, evt, clk);
  371. return 0;
  372. }
  373. static __maybe_unused int erratum_set_next_event_phys(unsigned long evt,
  374. struct clock_event_device *clk)
  375. {
  376. erratum_set_next_event_generic(ARCH_TIMER_PHYS_ACCESS, evt, clk);
  377. return 0;
  378. }
  379. static const struct arch_timer_erratum_workaround ool_workarounds[] = {
  380. #ifdef CONFIG_FSL_ERRATUM_A008585
  381. {
  382. .match_type = ate_match_dt,
  383. .id = "fsl,erratum-a008585",
  384. .desc = "Freescale erratum a005858",
  385. .read_cntpct_el0 = fsl_a008585_read_cntpct_el0,
  386. .read_cntvct_el0 = fsl_a008585_read_cntvct_el0,
  387. .set_next_event_phys = erratum_set_next_event_phys,
  388. .set_next_event_virt = erratum_set_next_event_virt,
  389. },
  390. #endif
  391. #ifdef CONFIG_HISILICON_ERRATUM_161010101
  392. {
  393. .match_type = ate_match_dt,
  394. .id = "hisilicon,erratum-161010101",
  395. .desc = "HiSilicon erratum 161010101",
  396. .read_cntpct_el0 = hisi_161010101_read_cntpct_el0,
  397. .read_cntvct_el0 = hisi_161010101_read_cntvct_el0,
  398. .set_next_event_phys = erratum_set_next_event_phys,
  399. .set_next_event_virt = erratum_set_next_event_virt,
  400. },
  401. {
  402. .match_type = ate_match_acpi_oem_info,
  403. .id = hisi_161010101_oem_info,
  404. .desc = "HiSilicon erratum 161010101",
  405. .read_cntpct_el0 = hisi_161010101_read_cntpct_el0,
  406. .read_cntvct_el0 = hisi_161010101_read_cntvct_el0,
  407. .set_next_event_phys = erratum_set_next_event_phys,
  408. .set_next_event_virt = erratum_set_next_event_virt,
  409. },
  410. #endif
  411. #ifdef CONFIG_ARM64_ERRATUM_858921
  412. {
  413. .match_type = ate_match_local_cap_id,
  414. .id = (void *)ARM64_WORKAROUND_858921,
  415. .desc = "ARM erratum 858921",
  416. .read_cntpct_el0 = arm64_858921_read_cntpct_el0,
  417. .read_cntvct_el0 = arm64_858921_read_cntvct_el0,
  418. .set_next_event_phys = erratum_set_next_event_phys,
  419. .set_next_event_virt = erratum_set_next_event_virt,
  420. },
  421. #endif
  422. #ifdef CONFIG_SUN50I_ERRATUM_UNKNOWN1
  423. {
  424. .match_type = ate_match_dt,
  425. .id = "allwinner,erratum-unknown1",
  426. .desc = "Allwinner erratum UNKNOWN1",
  427. .read_cntpct_el0 = sun50i_a64_read_cntpct_el0,
  428. .read_cntvct_el0 = sun50i_a64_read_cntvct_el0,
  429. .set_next_event_phys = erratum_set_next_event_phys,
  430. .set_next_event_virt = erratum_set_next_event_virt,
  431. },
  432. #endif
  433. #ifdef CONFIG_ARM64_ERRATUM_1418040
  434. {
  435. .match_type = ate_match_local_cap_id,
  436. .id = (void *)ARM64_WORKAROUND_1418040,
  437. .desc = "ARM erratum 1418040",
  438. .disable_compat_vdso = true,
  439. },
  440. #endif
  441. };
  442. typedef bool (*ate_match_fn_t)(const struct arch_timer_erratum_workaround *,
  443. const void *);
  444. static
  445. bool arch_timer_check_dt_erratum(const struct arch_timer_erratum_workaround *wa,
  446. const void *arg)
  447. {
  448. const struct device_node *np = arg;
  449. return of_property_read_bool(np, wa->id);
  450. }
  451. static
  452. bool arch_timer_check_local_cap_erratum(const struct arch_timer_erratum_workaround *wa,
  453. const void *arg)
  454. {
  455. return this_cpu_has_cap((uintptr_t)wa->id);
  456. }
  457. static
  458. bool arch_timer_check_acpi_oem_erratum(const struct arch_timer_erratum_workaround *wa,
  459. const void *arg)
  460. {
  461. static const struct ate_acpi_oem_info empty_oem_info = {};
  462. const struct ate_acpi_oem_info *info = wa->id;
  463. const struct acpi_table_header *table = arg;
  464. /* Iterate over the ACPI OEM info array, looking for a match */
  465. while (memcmp(info, &empty_oem_info, sizeof(*info))) {
  466. if (!memcmp(info->oem_id, table->oem_id, ACPI_OEM_ID_SIZE) &&
  467. !memcmp(info->oem_table_id, table->oem_table_id, ACPI_OEM_TABLE_ID_SIZE) &&
  468. info->oem_revision == table->oem_revision)
  469. return true;
  470. info++;
  471. }
  472. return false;
  473. }
  474. static const struct arch_timer_erratum_workaround *
  475. arch_timer_iterate_errata(enum arch_timer_erratum_match_type type,
  476. ate_match_fn_t match_fn,
  477. void *arg)
  478. {
  479. int i;
  480. for (i = 0; i < ARRAY_SIZE(ool_workarounds); i++) {
  481. if (ool_workarounds[i].match_type != type)
  482. continue;
  483. if (match_fn(&ool_workarounds[i], arg))
  484. return &ool_workarounds[i];
  485. }
  486. return NULL;
  487. }
  488. static
  489. void arch_timer_enable_workaround(const struct arch_timer_erratum_workaround *wa,
  490. bool local)
  491. {
  492. int i;
  493. if (local) {
  494. __this_cpu_write(timer_unstable_counter_workaround, wa);
  495. } else {
  496. for_each_possible_cpu(i)
  497. per_cpu(timer_unstable_counter_workaround, i) = wa;
  498. }
  499. if (wa->read_cntvct_el0 || wa->read_cntpct_el0)
  500. atomic_set(&timer_unstable_counter_workaround_in_use, 1);
  501. /*
  502. * Don't use the vdso fastpath if errata require using the
  503. * out-of-line counter accessor. We may change our mind pretty
  504. * late in the game (with a per-CPU erratum, for example), so
  505. * change both the default value and the vdso itself.
  506. */
  507. if (wa->read_cntvct_el0) {
  508. clocksource_counter.vdso_clock_mode = VDSO_CLOCKMODE_NONE;
  509. vdso_default = VDSO_CLOCKMODE_NONE;
  510. } else if (wa->disable_compat_vdso && vdso_default != VDSO_CLOCKMODE_NONE) {
  511. vdso_default = VDSO_CLOCKMODE_ARCHTIMER_NOCOMPAT;
  512. clocksource_counter.vdso_clock_mode = vdso_default;
  513. }
  514. }
  515. static void arch_timer_check_ool_workaround(enum arch_timer_erratum_match_type type,
  516. void *arg)
  517. {
  518. const struct arch_timer_erratum_workaround *wa, *__wa;
  519. ate_match_fn_t match_fn = NULL;
  520. bool local = false;
  521. switch (type) {
  522. case ate_match_dt:
  523. match_fn = arch_timer_check_dt_erratum;
  524. break;
  525. case ate_match_local_cap_id:
  526. match_fn = arch_timer_check_local_cap_erratum;
  527. local = true;
  528. break;
  529. case ate_match_acpi_oem_info:
  530. match_fn = arch_timer_check_acpi_oem_erratum;
  531. break;
  532. default:
  533. WARN_ON(1);
  534. return;
  535. }
  536. wa = arch_timer_iterate_errata(type, match_fn, arg);
  537. if (!wa)
  538. return;
  539. __wa = __this_cpu_read(timer_unstable_counter_workaround);
  540. if (__wa && wa != __wa)
  541. pr_warn("Can't enable workaround for %s (clashes with %s\n)",
  542. wa->desc, __wa->desc);
  543. if (__wa)
  544. return;
  545. arch_timer_enable_workaround(wa, local);
  546. pr_info("Enabling %s workaround for %s\n",
  547. local ? "local" : "global", wa->desc);
  548. }
  549. static bool arch_timer_this_cpu_has_cntvct_wa(void)
  550. {
  551. return has_erratum_handler(read_cntvct_el0);
  552. }
  553. static bool arch_timer_counter_has_wa(void)
  554. {
  555. return atomic_read(&timer_unstable_counter_workaround_in_use);
  556. }
  557. #else
  558. #define arch_timer_check_ool_workaround(t,a) do { } while(0)
  559. #define arch_timer_this_cpu_has_cntvct_wa() ({false;})
  560. #define arch_timer_counter_has_wa() ({false;})
  561. #endif /* CONFIG_ARM_ARCH_TIMER_OOL_WORKAROUND */
  562. static __always_inline irqreturn_t timer_handler(const int access,
  563. struct clock_event_device *evt)
  564. {
  565. unsigned long ctrl;
  566. ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL, evt);
  567. if (ctrl & ARCH_TIMER_CTRL_IT_STAT) {
  568. ctrl |= ARCH_TIMER_CTRL_IT_MASK;
  569. arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl, evt);
  570. evt->event_handler(evt);
  571. return IRQ_HANDLED;
  572. }
  573. return IRQ_NONE;
  574. }
  575. static irqreturn_t arch_timer_handler_virt(int irq, void *dev_id)
  576. {
  577. struct clock_event_device *evt = dev_id;
  578. return timer_handler(ARCH_TIMER_VIRT_ACCESS, evt);
  579. }
  580. static irqreturn_t arch_timer_handler_phys(int irq, void *dev_id)
  581. {
  582. struct clock_event_device *evt = dev_id;
  583. return timer_handler(ARCH_TIMER_PHYS_ACCESS, evt);
  584. }
  585. static irqreturn_t arch_timer_handler_phys_mem(int irq, void *dev_id)
  586. {
  587. struct clock_event_device *evt = dev_id;
  588. return timer_handler(ARCH_TIMER_MEM_PHYS_ACCESS, evt);
  589. }
  590. static irqreturn_t arch_timer_handler_virt_mem(int irq, void *dev_id)
  591. {
  592. struct clock_event_device *evt = dev_id;
  593. return timer_handler(ARCH_TIMER_MEM_VIRT_ACCESS, evt);
  594. }
  595. static __always_inline int timer_shutdown(const int access,
  596. struct clock_event_device *clk)
  597. {
  598. unsigned long ctrl;
  599. ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL, clk);
  600. ctrl &= ~ARCH_TIMER_CTRL_ENABLE;
  601. arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl, clk);
  602. return 0;
  603. }
  604. static int arch_timer_shutdown_virt(struct clock_event_device *clk)
  605. {
  606. return timer_shutdown(ARCH_TIMER_VIRT_ACCESS, clk);
  607. }
  608. static int arch_timer_shutdown_phys(struct clock_event_device *clk)
  609. {
  610. return timer_shutdown(ARCH_TIMER_PHYS_ACCESS, clk);
  611. }
  612. static int arch_timer_shutdown_virt_mem(struct clock_event_device *clk)
  613. {
  614. return timer_shutdown(ARCH_TIMER_MEM_VIRT_ACCESS, clk);
  615. }
  616. static int arch_timer_shutdown_phys_mem(struct clock_event_device *clk)
  617. {
  618. return timer_shutdown(ARCH_TIMER_MEM_PHYS_ACCESS, clk);
  619. }
  620. static __always_inline void set_next_event(const int access, unsigned long evt,
  621. struct clock_event_device *clk)
  622. {
  623. unsigned long ctrl;
  624. u64 cnt;
  625. ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL, clk);
  626. ctrl |= ARCH_TIMER_CTRL_ENABLE;
  627. ctrl &= ~ARCH_TIMER_CTRL_IT_MASK;
  628. if (access == ARCH_TIMER_PHYS_ACCESS)
  629. cnt = __arch_counter_get_cntpct();
  630. else
  631. cnt = __arch_counter_get_cntvct();
  632. arch_timer_reg_write(access, ARCH_TIMER_REG_CVAL, evt + cnt, clk);
  633. arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl, clk);
  634. }
  635. static int arch_timer_set_next_event_virt(unsigned long evt,
  636. struct clock_event_device *clk)
  637. {
  638. set_next_event(ARCH_TIMER_VIRT_ACCESS, evt, clk);
  639. return 0;
  640. }
  641. static int arch_timer_set_next_event_phys(unsigned long evt,
  642. struct clock_event_device *clk)
  643. {
  644. set_next_event(ARCH_TIMER_PHYS_ACCESS, evt, clk);
  645. return 0;
  646. }
  647. static u64 arch_counter_get_cnt_mem(struct arch_timer *t, int offset_lo)
  648. {
  649. u32 cnt_lo, cnt_hi, tmp_hi;
  650. do {
  651. cnt_hi = readl_relaxed(t->base + offset_lo + 4);
  652. cnt_lo = readl_relaxed(t->base + offset_lo);
  653. tmp_hi = readl_relaxed(t->base + offset_lo + 4);
  654. } while (cnt_hi != tmp_hi);
  655. return ((u64) cnt_hi << 32) | cnt_lo;
  656. }
  657. static __always_inline void set_next_event_mem(const int access, unsigned long evt,
  658. struct clock_event_device *clk)
  659. {
  660. struct arch_timer *timer = to_arch_timer(clk);
  661. unsigned long ctrl;
  662. u64 cnt;
  663. ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL, clk);
  664. /* Timer must be disabled before programming CVAL */
  665. if (ctrl & ARCH_TIMER_CTRL_ENABLE) {
  666. ctrl &= ~ARCH_TIMER_CTRL_ENABLE;
  667. arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl, clk);
  668. }
  669. ctrl |= ARCH_TIMER_CTRL_ENABLE;
  670. ctrl &= ~ARCH_TIMER_CTRL_IT_MASK;
  671. if (access == ARCH_TIMER_MEM_VIRT_ACCESS)
  672. cnt = arch_counter_get_cnt_mem(timer, CNTVCT_LO);
  673. else
  674. cnt = arch_counter_get_cnt_mem(timer, CNTPCT_LO);
  675. arch_timer_reg_write(access, ARCH_TIMER_REG_CVAL, evt + cnt, clk);
  676. arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl, clk);
  677. }
  678. static int arch_timer_set_next_event_virt_mem(unsigned long evt,
  679. struct clock_event_device *clk)
  680. {
  681. set_next_event_mem(ARCH_TIMER_MEM_VIRT_ACCESS, evt, clk);
  682. return 0;
  683. }
  684. static int arch_timer_set_next_event_phys_mem(unsigned long evt,
  685. struct clock_event_device *clk)
  686. {
  687. set_next_event_mem(ARCH_TIMER_MEM_PHYS_ACCESS, evt, clk);
  688. return 0;
  689. }
  690. static u64 __arch_timer_check_delta(void)
  691. {
  692. #ifdef CONFIG_ARM64
  693. const struct midr_range broken_cval_midrs[] = {
  694. /*
  695. * XGene-1 implements CVAL in terms of TVAL, meaning
  696. * that the maximum timer range is 32bit. Shame on them.
  697. *
  698. * Note that TVAL is signed, thus has only 31 of its
  699. * 32 bits to express magnitude.
  700. */
  701. MIDR_REV_RANGE(MIDR_CPU_MODEL(ARM_CPU_IMP_APM,
  702. APM_CPU_PART_XGENE),
  703. APM_CPU_VAR_POTENZA, 0x0, 0xf),
  704. {},
  705. };
  706. if (is_midr_in_range_list(read_cpuid_id(), broken_cval_midrs)) {
  707. pr_warn_once("Broken CNTx_CVAL_EL1, using 31 bit TVAL instead.\n");
  708. return CLOCKSOURCE_MASK(31);
  709. }
  710. #endif
  711. return CLOCKSOURCE_MASK(arch_counter_get_width());
  712. }
  713. static void __arch_timer_setup(unsigned type,
  714. struct clock_event_device *clk)
  715. {
  716. u64 max_delta;
  717. clk->features = CLOCK_EVT_FEAT_ONESHOT;
  718. if (type == ARCH_TIMER_TYPE_CP15) {
  719. typeof(clk->set_next_event) sne;
  720. arch_timer_check_ool_workaround(ate_match_local_cap_id, NULL);
  721. if (arch_timer_c3stop)
  722. clk->features |= CLOCK_EVT_FEAT_C3STOP;
  723. clk->name = "arch_sys_timer";
  724. clk->rating = 450;
  725. clk->cpumask = cpumask_of(smp_processor_id());
  726. clk->irq = arch_timer_ppi[arch_timer_uses_ppi];
  727. switch (arch_timer_uses_ppi) {
  728. case ARCH_TIMER_VIRT_PPI:
  729. clk->set_state_shutdown = arch_timer_shutdown_virt;
  730. clk->set_state_oneshot_stopped = arch_timer_shutdown_virt;
  731. sne = erratum_handler(set_next_event_virt);
  732. break;
  733. case ARCH_TIMER_PHYS_SECURE_PPI:
  734. case ARCH_TIMER_PHYS_NONSECURE_PPI:
  735. case ARCH_TIMER_HYP_PPI:
  736. clk->set_state_shutdown = arch_timer_shutdown_phys;
  737. clk->set_state_oneshot_stopped = arch_timer_shutdown_phys;
  738. sne = erratum_handler(set_next_event_phys);
  739. break;
  740. default:
  741. BUG();
  742. }
  743. clk->set_next_event = sne;
  744. max_delta = __arch_timer_check_delta();
  745. } else {
  746. clk->features |= CLOCK_EVT_FEAT_DYNIRQ;
  747. clk->name = "arch_mem_timer";
  748. clk->rating = 400;
  749. clk->cpumask = cpu_possible_mask;
  750. if (arch_timer_mem_use_virtual) {
  751. clk->set_state_shutdown = arch_timer_shutdown_virt_mem;
  752. clk->set_state_oneshot_stopped = arch_timer_shutdown_virt_mem;
  753. clk->set_next_event =
  754. arch_timer_set_next_event_virt_mem;
  755. } else {
  756. clk->set_state_shutdown = arch_timer_shutdown_phys_mem;
  757. clk->set_state_oneshot_stopped = arch_timer_shutdown_phys_mem;
  758. clk->set_next_event =
  759. arch_timer_set_next_event_phys_mem;
  760. }
  761. max_delta = CLOCKSOURCE_MASK(56);
  762. }
  763. clk->set_state_shutdown(clk);
  764. clockevents_config_and_register(clk, arch_timer_rate, 0xf, max_delta);
  765. }
  766. static void arch_timer_evtstrm_enable(unsigned int divider)
  767. {
  768. u32 cntkctl = arch_timer_get_cntkctl();
  769. #ifdef CONFIG_ARM64
  770. /* ECV is likely to require a large divider. Use the EVNTIS flag. */
  771. if (cpus_have_const_cap(ARM64_HAS_ECV) && divider > 15) {
  772. cntkctl |= ARCH_TIMER_EVT_INTERVAL_SCALE;
  773. divider -= 8;
  774. }
  775. #endif
  776. divider = min(divider, 15U);
  777. cntkctl &= ~ARCH_TIMER_EVT_TRIGGER_MASK;
  778. /* Set the divider and enable virtual event stream */
  779. cntkctl |= (divider << ARCH_TIMER_EVT_TRIGGER_SHIFT)
  780. | ARCH_TIMER_VIRT_EVT_EN;
  781. arch_timer_set_cntkctl(cntkctl);
  782. arch_timer_set_evtstrm_feature();
  783. cpumask_set_cpu(smp_processor_id(), &evtstrm_available);
  784. }
  785. static void arch_timer_configure_evtstream(void)
  786. {
  787. int evt_stream_div, lsb;
  788. /*
  789. * As the event stream can at most be generated at half the frequency
  790. * of the counter, use half the frequency when computing the divider.
  791. */
  792. evt_stream_div = arch_timer_rate / ARCH_TIMER_EVT_STREAM_FREQ / 2;
  793. /*
  794. * Find the closest power of two to the divisor. If the adjacent bit
  795. * of lsb (last set bit, starts from 0) is set, then we use (lsb + 1).
  796. */
  797. lsb = fls(evt_stream_div) - 1;
  798. if (lsb > 0 && (evt_stream_div & BIT(lsb - 1)))
  799. lsb++;
  800. /* enable event stream */
  801. arch_timer_evtstrm_enable(max(0, lsb));
  802. }
  803. static void arch_counter_set_user_access(void)
  804. {
  805. u32 cntkctl = arch_timer_get_cntkctl();
  806. /* Disable user access to the timers and both counters */
  807. /* Also disable virtual event stream */
  808. cntkctl &= ~(ARCH_TIMER_USR_PT_ACCESS_EN
  809. | ARCH_TIMER_USR_VT_ACCESS_EN
  810. | ARCH_TIMER_USR_VCT_ACCESS_EN
  811. | ARCH_TIMER_VIRT_EVT_EN
  812. | ARCH_TIMER_USR_PCT_ACCESS_EN);
  813. /*
  814. * Enable user access to the virtual counter if it doesn't
  815. * need to be workaround. The vdso may have been already
  816. * disabled though.
  817. */
  818. if (arch_timer_this_cpu_has_cntvct_wa())
  819. pr_info("CPU%d: Trapping CNTVCT access\n", smp_processor_id());
  820. else
  821. cntkctl |= ARCH_TIMER_USR_VCT_ACCESS_EN;
  822. arch_timer_set_cntkctl(cntkctl);
  823. }
  824. static bool arch_timer_has_nonsecure_ppi(void)
  825. {
  826. return (arch_timer_uses_ppi == ARCH_TIMER_PHYS_SECURE_PPI &&
  827. arch_timer_ppi[ARCH_TIMER_PHYS_NONSECURE_PPI]);
  828. }
  829. static u32 check_ppi_trigger(int irq)
  830. {
  831. u32 flags = irq_get_trigger_type(irq);
  832. if (flags != IRQF_TRIGGER_HIGH && flags != IRQF_TRIGGER_LOW) {
  833. pr_warn("WARNING: Invalid trigger for IRQ%d, assuming level low\n", irq);
  834. pr_warn("WARNING: Please fix your firmware\n");
  835. flags = IRQF_TRIGGER_LOW;
  836. }
  837. return flags;
  838. }
  839. static int arch_timer_starting_cpu(unsigned int cpu)
  840. {
  841. struct clock_event_device *clk = this_cpu_ptr(arch_timer_evt);
  842. u32 flags;
  843. __arch_timer_setup(ARCH_TIMER_TYPE_CP15, clk);
  844. flags = check_ppi_trigger(arch_timer_ppi[arch_timer_uses_ppi]);
  845. enable_percpu_irq(arch_timer_ppi[arch_timer_uses_ppi], flags);
  846. if (arch_timer_has_nonsecure_ppi()) {
  847. flags = check_ppi_trigger(arch_timer_ppi[ARCH_TIMER_PHYS_NONSECURE_PPI]);
  848. enable_percpu_irq(arch_timer_ppi[ARCH_TIMER_PHYS_NONSECURE_PPI],
  849. flags);
  850. }
  851. arch_counter_set_user_access();
  852. if (evtstrm_enable)
  853. arch_timer_configure_evtstream();
  854. return 0;
  855. }
  856. static int validate_timer_rate(void)
  857. {
  858. if (!arch_timer_rate)
  859. return -EINVAL;
  860. /* Arch timer frequency < 1MHz can cause trouble */
  861. WARN_ON(arch_timer_rate < 1000000);
  862. return 0;
  863. }
  864. /*
  865. * For historical reasons, when probing with DT we use whichever (non-zero)
  866. * rate was probed first, and don't verify that others match. If the first node
  867. * probed has a clock-frequency property, this overrides the HW register.
  868. */
  869. static void __init arch_timer_of_configure_rate(u32 rate, struct device_node *np)
  870. {
  871. /* Who has more than one independent system counter? */
  872. if (arch_timer_rate)
  873. return;
  874. if (of_property_read_u32(np, "clock-frequency", &arch_timer_rate))
  875. arch_timer_rate = rate;
  876. /* Check the timer frequency. */
  877. if (validate_timer_rate())
  878. pr_warn("frequency not available\n");
  879. }
  880. static void __init arch_timer_banner(unsigned type)
  881. {
  882. pr_info("%s%s%s timer(s) running at %lu.%02luMHz (%s%s%s).\n",
  883. type & ARCH_TIMER_TYPE_CP15 ? "cp15" : "",
  884. type == (ARCH_TIMER_TYPE_CP15 | ARCH_TIMER_TYPE_MEM) ?
  885. " and " : "",
  886. type & ARCH_TIMER_TYPE_MEM ? "mmio" : "",
  887. (unsigned long)arch_timer_rate / 1000000,
  888. (unsigned long)(arch_timer_rate / 10000) % 100,
  889. type & ARCH_TIMER_TYPE_CP15 ?
  890. (arch_timer_uses_ppi == ARCH_TIMER_VIRT_PPI) ? "virt" : "phys" :
  891. "",
  892. type == (ARCH_TIMER_TYPE_CP15 | ARCH_TIMER_TYPE_MEM) ? "/" : "",
  893. type & ARCH_TIMER_TYPE_MEM ?
  894. arch_timer_mem_use_virtual ? "virt" : "phys" :
  895. "");
  896. }
  897. u32 arch_timer_get_rate(void)
  898. {
  899. return arch_timer_rate;
  900. }
  901. bool arch_timer_evtstrm_available(void)
  902. {
  903. /*
  904. * We might get called from a preemptible context. This is fine
  905. * because availability of the event stream should be always the same
  906. * for a preemptible context and context where we might resume a task.
  907. */
  908. return cpumask_test_cpu(raw_smp_processor_id(), &evtstrm_available);
  909. }
  910. static u64 arch_counter_get_cntvct_mem(void)
  911. {
  912. return arch_counter_get_cnt_mem(arch_timer_mem, CNTVCT_LO);
  913. }
  914. static struct arch_timer_kvm_info arch_timer_kvm_info;
  915. struct arch_timer_kvm_info *arch_timer_get_kvm_info(void)
  916. {
  917. return &arch_timer_kvm_info;
  918. }
  919. static void __init arch_counter_register(unsigned type)
  920. {
  921. u64 start_count;
  922. int width;
  923. /* Register the CP15 based counter if we have one */
  924. if (type & ARCH_TIMER_TYPE_CP15) {
  925. u64 (*rd)(void);
  926. if ((IS_ENABLED(CONFIG_ARM64) && !is_hyp_mode_available()) ||
  927. arch_timer_uses_ppi == ARCH_TIMER_VIRT_PPI) {
  928. if (arch_timer_counter_has_wa())
  929. rd = arch_counter_get_cntvct_stable;
  930. else
  931. rd = arch_counter_get_cntvct;
  932. } else {
  933. if (arch_timer_counter_has_wa())
  934. rd = arch_counter_get_cntpct_stable;
  935. else
  936. rd = arch_counter_get_cntpct;
  937. }
  938. arch_timer_read_counter = rd;
  939. clocksource_counter.vdso_clock_mode = vdso_default;
  940. } else {
  941. arch_timer_read_counter = arch_counter_get_cntvct_mem;
  942. }
  943. width = arch_counter_get_width();
  944. clocksource_counter.mask = CLOCKSOURCE_MASK(width);
  945. cyclecounter.mask = CLOCKSOURCE_MASK(width);
  946. if (!arch_counter_suspend_stop)
  947. clocksource_counter.flags |= CLOCK_SOURCE_SUSPEND_NONSTOP;
  948. start_count = arch_timer_read_counter();
  949. clocksource_register_hz(&clocksource_counter, arch_timer_rate);
  950. cyclecounter.mult = clocksource_counter.mult;
  951. cyclecounter.shift = clocksource_counter.shift;
  952. timecounter_init(&arch_timer_kvm_info.timecounter,
  953. &cyclecounter, start_count);
  954. sched_clock_register(arch_timer_read_counter, width, arch_timer_rate);
  955. }
  956. static void arch_timer_stop(struct clock_event_device *clk)
  957. {
  958. pr_debug("disable IRQ%d cpu #%d\n", clk->irq, smp_processor_id());
  959. disable_percpu_irq(arch_timer_ppi[arch_timer_uses_ppi]);
  960. if (arch_timer_has_nonsecure_ppi())
  961. disable_percpu_irq(arch_timer_ppi[ARCH_TIMER_PHYS_NONSECURE_PPI]);
  962. clk->set_state_shutdown(clk);
  963. }
  964. static int arch_timer_dying_cpu(unsigned int cpu)
  965. {
  966. struct clock_event_device *clk = this_cpu_ptr(arch_timer_evt);
  967. cpumask_clear_cpu(smp_processor_id(), &evtstrm_available);
  968. arch_timer_stop(clk);
  969. return 0;
  970. }
  971. #ifdef CONFIG_CPU_PM
  972. static DEFINE_PER_CPU(unsigned long, saved_cntkctl);
  973. static int arch_timer_cpu_pm_notify(struct notifier_block *self,
  974. unsigned long action, void *hcpu)
  975. {
  976. if (action == CPU_PM_ENTER) {
  977. __this_cpu_write(saved_cntkctl, arch_timer_get_cntkctl());
  978. cpumask_clear_cpu(smp_processor_id(), &evtstrm_available);
  979. } else if (action == CPU_PM_ENTER_FAILED || action == CPU_PM_EXIT) {
  980. arch_timer_set_cntkctl(__this_cpu_read(saved_cntkctl));
  981. if (arch_timer_have_evtstrm_feature())
  982. cpumask_set_cpu(smp_processor_id(), &evtstrm_available);
  983. }
  984. return NOTIFY_OK;
  985. }
  986. static struct notifier_block arch_timer_cpu_pm_notifier = {
  987. .notifier_call = arch_timer_cpu_pm_notify,
  988. };
  989. static int __init arch_timer_cpu_pm_init(void)
  990. {
  991. return cpu_pm_register_notifier(&arch_timer_cpu_pm_notifier);
  992. }
  993. static void __init arch_timer_cpu_pm_deinit(void)
  994. {
  995. WARN_ON(cpu_pm_unregister_notifier(&arch_timer_cpu_pm_notifier));
  996. }
  997. #else
  998. static int __init arch_timer_cpu_pm_init(void)
  999. {
  1000. return 0;
  1001. }
  1002. static void __init arch_timer_cpu_pm_deinit(void)
  1003. {
  1004. }
  1005. #endif
  1006. static int __init arch_timer_register(void)
  1007. {
  1008. int err;
  1009. int ppi;
  1010. arch_timer_evt = alloc_percpu(struct clock_event_device);
  1011. if (!arch_timer_evt) {
  1012. err = -ENOMEM;
  1013. goto out;
  1014. }
  1015. ppi = arch_timer_ppi[arch_timer_uses_ppi];
  1016. switch (arch_timer_uses_ppi) {
  1017. case ARCH_TIMER_VIRT_PPI:
  1018. err = request_percpu_irq(ppi, arch_timer_handler_virt,
  1019. "arch_timer", arch_timer_evt);
  1020. break;
  1021. case ARCH_TIMER_PHYS_SECURE_PPI:
  1022. case ARCH_TIMER_PHYS_NONSECURE_PPI:
  1023. err = request_percpu_irq(ppi, arch_timer_handler_phys,
  1024. "arch_timer", arch_timer_evt);
  1025. if (!err && arch_timer_has_nonsecure_ppi()) {
  1026. ppi = arch_timer_ppi[ARCH_TIMER_PHYS_NONSECURE_PPI];
  1027. err = request_percpu_irq(ppi, arch_timer_handler_phys,
  1028. "arch_timer", arch_timer_evt);
  1029. if (err)
  1030. free_percpu_irq(arch_timer_ppi[ARCH_TIMER_PHYS_SECURE_PPI],
  1031. arch_timer_evt);
  1032. }
  1033. break;
  1034. case ARCH_TIMER_HYP_PPI:
  1035. err = request_percpu_irq(ppi, arch_timer_handler_phys,
  1036. "arch_timer", arch_timer_evt);
  1037. break;
  1038. default:
  1039. BUG();
  1040. }
  1041. if (err) {
  1042. pr_err("can't register interrupt %d (%d)\n", ppi, err);
  1043. goto out_free;
  1044. }
  1045. err = arch_timer_cpu_pm_init();
  1046. if (err)
  1047. goto out_unreg_notify;
  1048. /* Register and immediately configure the timer on the boot CPU */
  1049. err = cpuhp_setup_state(CPUHP_AP_ARM_ARCH_TIMER_STARTING,
  1050. "clockevents/arm/arch_timer:starting",
  1051. arch_timer_starting_cpu, arch_timer_dying_cpu);
  1052. if (err)
  1053. goto out_unreg_cpupm;
  1054. return 0;
  1055. out_unreg_cpupm:
  1056. arch_timer_cpu_pm_deinit();
  1057. out_unreg_notify:
  1058. free_percpu_irq(arch_timer_ppi[arch_timer_uses_ppi], arch_timer_evt);
  1059. if (arch_timer_has_nonsecure_ppi())
  1060. free_percpu_irq(arch_timer_ppi[ARCH_TIMER_PHYS_NONSECURE_PPI],
  1061. arch_timer_evt);
  1062. out_free:
  1063. free_percpu(arch_timer_evt);
  1064. out:
  1065. return err;
  1066. }
  1067. static int __init arch_timer_mem_register(void __iomem *base, unsigned int irq)
  1068. {
  1069. int ret;
  1070. irq_handler_t func;
  1071. arch_timer_mem = kzalloc(sizeof(*arch_timer_mem), GFP_KERNEL);
  1072. if (!arch_timer_mem)
  1073. return -ENOMEM;
  1074. arch_timer_mem->base = base;
  1075. arch_timer_mem->evt.irq = irq;
  1076. __arch_timer_setup(ARCH_TIMER_TYPE_MEM, &arch_timer_mem->evt);
  1077. if (arch_timer_mem_use_virtual)
  1078. func = arch_timer_handler_virt_mem;
  1079. else
  1080. func = arch_timer_handler_phys_mem;
  1081. ret = request_irq(irq, func, IRQF_TIMER, "arch_mem_timer", &arch_timer_mem->evt);
  1082. if (ret) {
  1083. pr_err("Failed to request mem timer irq\n");
  1084. kfree(arch_timer_mem);
  1085. arch_timer_mem = NULL;
  1086. }
  1087. return ret;
  1088. }
  1089. static const struct of_device_id arch_timer_of_match[] __initconst = {
  1090. { .compatible = "arm,armv7-timer", },
  1091. { .compatible = "arm,armv8-timer", },
  1092. {},
  1093. };
  1094. static const struct of_device_id arch_timer_mem_of_match[] __initconst = {
  1095. { .compatible = "arm,armv7-timer-mem", },
  1096. {},
  1097. };
  1098. static bool __init arch_timer_needs_of_probing(void)
  1099. {
  1100. struct device_node *dn;
  1101. bool needs_probing = false;
  1102. unsigned int mask = ARCH_TIMER_TYPE_CP15 | ARCH_TIMER_TYPE_MEM;
  1103. /* We have two timers, and both device-tree nodes are probed. */
  1104. if ((arch_timers_present & mask) == mask)
  1105. return false;
  1106. /*
  1107. * Only one type of timer is probed,
  1108. * check if we have another type of timer node in device-tree.
  1109. */
  1110. if (arch_timers_present & ARCH_TIMER_TYPE_CP15)
  1111. dn = of_find_matching_node(NULL, arch_timer_mem_of_match);
  1112. else
  1113. dn = of_find_matching_node(NULL, arch_timer_of_match);
  1114. if (dn && of_device_is_available(dn))
  1115. needs_probing = true;
  1116. of_node_put(dn);
  1117. return needs_probing;
  1118. }
  1119. static int __init arch_timer_common_init(void)
  1120. {
  1121. arch_timer_banner(arch_timers_present);
  1122. arch_counter_register(arch_timers_present);
  1123. return arch_timer_arch_init();
  1124. }
  1125. /**
  1126. * arch_timer_select_ppi() - Select suitable PPI for the current system.
  1127. *
  1128. * If HYP mode is available, we know that the physical timer
  1129. * has been configured to be accessible from PL1. Use it, so
  1130. * that a guest can use the virtual timer instead.
  1131. *
  1132. * On ARMv8.1 with VH extensions, the kernel runs in HYP. VHE
  1133. * accesses to CNTP_*_EL1 registers are silently redirected to
  1134. * their CNTHP_*_EL2 counterparts, and use a different PPI
  1135. * number.
  1136. *
  1137. * If no interrupt provided for virtual timer, we'll have to
  1138. * stick to the physical timer. It'd better be accessible...
  1139. * For arm64 we never use the secure interrupt.
  1140. *
  1141. * Return: a suitable PPI type for the current system.
  1142. */
  1143. static enum arch_timer_ppi_nr __init arch_timer_select_ppi(void)
  1144. {
  1145. if (is_kernel_in_hyp_mode())
  1146. return ARCH_TIMER_HYP_PPI;
  1147. if (!is_hyp_mode_available() && arch_timer_ppi[ARCH_TIMER_VIRT_PPI])
  1148. return ARCH_TIMER_VIRT_PPI;
  1149. if (IS_ENABLED(CONFIG_ARM64))
  1150. return ARCH_TIMER_PHYS_NONSECURE_PPI;
  1151. return ARCH_TIMER_PHYS_SECURE_PPI;
  1152. }
  1153. static void __init arch_timer_populate_kvm_info(void)
  1154. {
  1155. arch_timer_kvm_info.virtual_irq = arch_timer_ppi[ARCH_TIMER_VIRT_PPI];
  1156. if (is_kernel_in_hyp_mode())
  1157. arch_timer_kvm_info.physical_irq = arch_timer_ppi[ARCH_TIMER_PHYS_NONSECURE_PPI];
  1158. }
  1159. static int __init arch_timer_of_init(struct device_node *np)
  1160. {
  1161. int i, irq, ret;
  1162. u32 rate;
  1163. bool has_names;
  1164. if (arch_timers_present & ARCH_TIMER_TYPE_CP15) {
  1165. pr_warn("multiple nodes in dt, skipping\n");
  1166. return 0;
  1167. }
  1168. arch_timers_present |= ARCH_TIMER_TYPE_CP15;
  1169. has_names = of_property_read_bool(np, "interrupt-names");
  1170. for (i = ARCH_TIMER_PHYS_SECURE_PPI; i < ARCH_TIMER_MAX_TIMER_PPI; i++) {
  1171. if (has_names)
  1172. irq = of_irq_get_byname(np, arch_timer_ppi_names[i]);
  1173. else
  1174. irq = of_irq_get(np, i);
  1175. if (irq > 0)
  1176. arch_timer_ppi[i] = irq;
  1177. }
  1178. arch_timer_populate_kvm_info();
  1179. rate = arch_timer_get_cntfrq();
  1180. arch_timer_of_configure_rate(rate, np);
  1181. arch_timer_c3stop = !of_property_read_bool(np, "always-on");
  1182. /* Check for globally applicable workarounds */
  1183. arch_timer_check_ool_workaround(ate_match_dt, np);
  1184. /*
  1185. * If we cannot rely on firmware initializing the timer registers then
  1186. * we should use the physical timers instead.
  1187. */
  1188. if (IS_ENABLED(CONFIG_ARM) &&
  1189. of_property_read_bool(np, "arm,cpu-registers-not-fw-configured"))
  1190. arch_timer_uses_ppi = ARCH_TIMER_PHYS_SECURE_PPI;
  1191. else
  1192. arch_timer_uses_ppi = arch_timer_select_ppi();
  1193. if (!arch_timer_ppi[arch_timer_uses_ppi]) {
  1194. pr_err("No interrupt available, giving up\n");
  1195. return -EINVAL;
  1196. }
  1197. /* On some systems, the counter stops ticking when in suspend. */
  1198. arch_counter_suspend_stop = of_property_read_bool(np,
  1199. "arm,no-tick-in-suspend");
  1200. ret = arch_timer_register();
  1201. if (ret)
  1202. return ret;
  1203. if (arch_timer_needs_of_probing())
  1204. return 0;
  1205. return arch_timer_common_init();
  1206. }
  1207. TIMER_OF_DECLARE(armv7_arch_timer, "arm,armv7-timer", arch_timer_of_init);
  1208. TIMER_OF_DECLARE(armv8_arch_timer, "arm,armv8-timer", arch_timer_of_init);
  1209. static u32 __init
  1210. arch_timer_mem_frame_get_cntfrq(struct arch_timer_mem_frame *frame)
  1211. {
  1212. void __iomem *base;
  1213. u32 rate;
  1214. base = ioremap(frame->cntbase, frame->size);
  1215. if (!base) {
  1216. pr_err("Unable to map frame @ %pa\n", &frame->cntbase);
  1217. return 0;
  1218. }
  1219. rate = readl_relaxed(base + CNTFRQ);
  1220. iounmap(base);
  1221. return rate;
  1222. }
  1223. static struct arch_timer_mem_frame * __init
  1224. arch_timer_mem_find_best_frame(struct arch_timer_mem *timer_mem)
  1225. {
  1226. struct arch_timer_mem_frame *frame, *best_frame = NULL;
  1227. void __iomem *cntctlbase;
  1228. u32 cnttidr;
  1229. int i;
  1230. cntctlbase = ioremap(timer_mem->cntctlbase, timer_mem->size);
  1231. if (!cntctlbase) {
  1232. pr_err("Can't map CNTCTLBase @ %pa\n",
  1233. &timer_mem->cntctlbase);
  1234. return NULL;
  1235. }
  1236. cnttidr = readl_relaxed(cntctlbase + CNTTIDR);
  1237. /*
  1238. * Try to find a virtual capable frame. Otherwise fall back to a
  1239. * physical capable frame.
  1240. */
  1241. for (i = 0; i < ARCH_TIMER_MEM_MAX_FRAMES; i++) {
  1242. u32 cntacr = CNTACR_RFRQ | CNTACR_RWPT | CNTACR_RPCT |
  1243. CNTACR_RWVT | CNTACR_RVOFF | CNTACR_RVCT;
  1244. frame = &timer_mem->frame[i];
  1245. if (!frame->valid)
  1246. continue;
  1247. /* Try enabling everything, and see what sticks */
  1248. writel_relaxed(cntacr, cntctlbase + CNTACR(i));
  1249. cntacr = readl_relaxed(cntctlbase + CNTACR(i));
  1250. if ((cnttidr & CNTTIDR_VIRT(i)) &&
  1251. !(~cntacr & (CNTACR_RWVT | CNTACR_RVCT))) {
  1252. best_frame = frame;
  1253. arch_timer_mem_use_virtual = true;
  1254. break;
  1255. }
  1256. if (~cntacr & (CNTACR_RWPT | CNTACR_RPCT))
  1257. continue;
  1258. best_frame = frame;
  1259. }
  1260. iounmap(cntctlbase);
  1261. return best_frame;
  1262. }
  1263. static int __init
  1264. arch_timer_mem_frame_register(struct arch_timer_mem_frame *frame)
  1265. {
  1266. void __iomem *base;
  1267. int ret, irq = 0;
  1268. if (arch_timer_mem_use_virtual)
  1269. irq = frame->virt_irq;
  1270. else
  1271. irq = frame->phys_irq;
  1272. if (!irq) {
  1273. pr_err("Frame missing %s irq.\n",
  1274. arch_timer_mem_use_virtual ? "virt" : "phys");
  1275. return -EINVAL;
  1276. }
  1277. if (!request_mem_region(frame->cntbase, frame->size,
  1278. "arch_mem_timer"))
  1279. return -EBUSY;
  1280. base = ioremap(frame->cntbase, frame->size);
  1281. if (!base) {
  1282. pr_err("Can't map frame's registers\n");
  1283. return -ENXIO;
  1284. }
  1285. ret = arch_timer_mem_register(base, irq);
  1286. if (ret) {
  1287. iounmap(base);
  1288. return ret;
  1289. }
  1290. arch_timers_present |= ARCH_TIMER_TYPE_MEM;
  1291. return 0;
  1292. }
  1293. static int __init arch_timer_mem_of_init(struct device_node *np)
  1294. {
  1295. struct arch_timer_mem *timer_mem;
  1296. struct arch_timer_mem_frame *frame;
  1297. struct device_node *frame_node;
  1298. struct resource res;
  1299. int ret = -EINVAL;
  1300. u32 rate;
  1301. timer_mem = kzalloc(sizeof(*timer_mem), GFP_KERNEL);
  1302. if (!timer_mem)
  1303. return -ENOMEM;
  1304. if (of_address_to_resource(np, 0, &res))
  1305. goto out;
  1306. timer_mem->cntctlbase = res.start;
  1307. timer_mem->size = resource_size(&res);
  1308. for_each_available_child_of_node(np, frame_node) {
  1309. u32 n;
  1310. struct arch_timer_mem_frame *frame;
  1311. if (of_property_read_u32(frame_node, "frame-number", &n)) {
  1312. pr_err(FW_BUG "Missing frame-number.\n");
  1313. of_node_put(frame_node);
  1314. goto out;
  1315. }
  1316. if (n >= ARCH_TIMER_MEM_MAX_FRAMES) {
  1317. pr_err(FW_BUG "Wrong frame-number, only 0-%u are permitted.\n",
  1318. ARCH_TIMER_MEM_MAX_FRAMES - 1);
  1319. of_node_put(frame_node);
  1320. goto out;
  1321. }
  1322. frame = &timer_mem->frame[n];
  1323. if (frame->valid) {
  1324. pr_err(FW_BUG "Duplicated frame-number.\n");
  1325. of_node_put(frame_node);
  1326. goto out;
  1327. }
  1328. if (of_address_to_resource(frame_node, 0, &res)) {
  1329. of_node_put(frame_node);
  1330. goto out;
  1331. }
  1332. frame->cntbase = res.start;
  1333. frame->size = resource_size(&res);
  1334. frame->virt_irq = irq_of_parse_and_map(frame_node,
  1335. ARCH_TIMER_VIRT_SPI);
  1336. frame->phys_irq = irq_of_parse_and_map(frame_node,
  1337. ARCH_TIMER_PHYS_SPI);
  1338. frame->valid = true;
  1339. }
  1340. frame = arch_timer_mem_find_best_frame(timer_mem);
  1341. if (!frame) {
  1342. pr_err("Unable to find a suitable frame in timer @ %pa\n",
  1343. &timer_mem->cntctlbase);
  1344. ret = -EINVAL;
  1345. goto out;
  1346. }
  1347. rate = arch_timer_mem_frame_get_cntfrq(frame);
  1348. arch_timer_of_configure_rate(rate, np);
  1349. ret = arch_timer_mem_frame_register(frame);
  1350. if (!ret && !arch_timer_needs_of_probing())
  1351. ret = arch_timer_common_init();
  1352. out:
  1353. kfree(timer_mem);
  1354. return ret;
  1355. }
  1356. TIMER_OF_DECLARE(armv7_arch_timer_mem, "arm,armv7-timer-mem",
  1357. arch_timer_mem_of_init);
  1358. #ifdef CONFIG_ACPI_GTDT
  1359. static int __init
  1360. arch_timer_mem_verify_cntfrq(struct arch_timer_mem *timer_mem)
  1361. {
  1362. struct arch_timer_mem_frame *frame;
  1363. u32 rate;
  1364. int i;
  1365. for (i = 0; i < ARCH_TIMER_MEM_MAX_FRAMES; i++) {
  1366. frame = &timer_mem->frame[i];
  1367. if (!frame->valid)
  1368. continue;
  1369. rate = arch_timer_mem_frame_get_cntfrq(frame);
  1370. if (rate == arch_timer_rate)
  1371. continue;
  1372. pr_err(FW_BUG "CNTFRQ mismatch: frame @ %pa: (0x%08lx), CPU: (0x%08lx)\n",
  1373. &frame->cntbase,
  1374. (unsigned long)rate, (unsigned long)arch_timer_rate);
  1375. return -EINVAL;
  1376. }
  1377. return 0;
  1378. }
  1379. static int __init arch_timer_mem_acpi_init(int platform_timer_count)
  1380. {
  1381. struct arch_timer_mem *timers, *timer;
  1382. struct arch_timer_mem_frame *frame, *best_frame = NULL;
  1383. int timer_count, i, ret = 0;
  1384. timers = kcalloc(platform_timer_count, sizeof(*timers),
  1385. GFP_KERNEL);
  1386. if (!timers)
  1387. return -ENOMEM;
  1388. ret = acpi_arch_timer_mem_init(timers, &timer_count);
  1389. if (ret || !timer_count)
  1390. goto out;
  1391. /*
  1392. * While unlikely, it's theoretically possible that none of the frames
  1393. * in a timer expose the combination of feature we want.
  1394. */
  1395. for (i = 0; i < timer_count; i++) {
  1396. timer = &timers[i];
  1397. frame = arch_timer_mem_find_best_frame(timer);
  1398. if (!best_frame)
  1399. best_frame = frame;
  1400. ret = arch_timer_mem_verify_cntfrq(timer);
  1401. if (ret) {
  1402. pr_err("Disabling MMIO timers due to CNTFRQ mismatch\n");
  1403. goto out;
  1404. }
  1405. if (!best_frame) /* implies !frame */
  1406. /*
  1407. * Only complain about missing suitable frames if we
  1408. * haven't already found one in a previous iteration.
  1409. */
  1410. pr_err("Unable to find a suitable frame in timer @ %pa\n",
  1411. &timer->cntctlbase);
  1412. }
  1413. if (best_frame)
  1414. ret = arch_timer_mem_frame_register(best_frame);
  1415. out:
  1416. kfree(timers);
  1417. return ret;
  1418. }
  1419. /* Initialize per-processor generic timer and memory-mapped timer(if present) */
  1420. static int __init arch_timer_acpi_init(struct acpi_table_header *table)
  1421. {
  1422. int ret, platform_timer_count;
  1423. if (arch_timers_present & ARCH_TIMER_TYPE_CP15) {
  1424. pr_warn("already initialized, skipping\n");
  1425. return -EINVAL;
  1426. }
  1427. arch_timers_present |= ARCH_TIMER_TYPE_CP15;
  1428. ret = acpi_gtdt_init(table, &platform_timer_count);
  1429. if (ret)
  1430. return ret;
  1431. arch_timer_ppi[ARCH_TIMER_PHYS_NONSECURE_PPI] =
  1432. acpi_gtdt_map_ppi(ARCH_TIMER_PHYS_NONSECURE_PPI);
  1433. arch_timer_ppi[ARCH_TIMER_VIRT_PPI] =
  1434. acpi_gtdt_map_ppi(ARCH_TIMER_VIRT_PPI);
  1435. arch_timer_ppi[ARCH_TIMER_HYP_PPI] =
  1436. acpi_gtdt_map_ppi(ARCH_TIMER_HYP_PPI);
  1437. arch_timer_populate_kvm_info();
  1438. /*
  1439. * When probing via ACPI, we have no mechanism to override the sysreg
  1440. * CNTFRQ value. This *must* be correct.
  1441. */
  1442. arch_timer_rate = arch_timer_get_cntfrq();
  1443. ret = validate_timer_rate();
  1444. if (ret) {
  1445. pr_err(FW_BUG "frequency not available.\n");
  1446. return ret;
  1447. }
  1448. arch_timer_uses_ppi = arch_timer_select_ppi();
  1449. if (!arch_timer_ppi[arch_timer_uses_ppi]) {
  1450. pr_err("No interrupt available, giving up\n");
  1451. return -EINVAL;
  1452. }
  1453. /* Always-on capability */
  1454. arch_timer_c3stop = acpi_gtdt_c3stop(arch_timer_uses_ppi);
  1455. /* Check for globally applicable workarounds */
  1456. arch_timer_check_ool_workaround(ate_match_acpi_oem_info, table);
  1457. ret = arch_timer_register();
  1458. if (ret)
  1459. return ret;
  1460. if (platform_timer_count &&
  1461. arch_timer_mem_acpi_init(platform_timer_count))
  1462. pr_err("Failed to initialize memory-mapped timer.\n");
  1463. return arch_timer_common_init();
  1464. }
  1465. TIMER_ACPI_DECLARE(arch_timer, ACPI_SIG_GTDT, arch_timer_acpi_init);
  1466. #endif
  1467. int kvm_arch_ptp_get_crosststamp(u64 *cycle, struct timespec64 *ts,
  1468. struct clocksource **cs)
  1469. {
  1470. struct arm_smccc_res hvc_res;
  1471. u32 ptp_counter;
  1472. ktime_t ktime;
  1473. if (!IS_ENABLED(CONFIG_HAVE_ARM_SMCCC_DISCOVERY))
  1474. return -EOPNOTSUPP;
  1475. if (arch_timer_uses_ppi == ARCH_TIMER_VIRT_PPI)
  1476. ptp_counter = KVM_PTP_VIRT_COUNTER;
  1477. else
  1478. ptp_counter = KVM_PTP_PHYS_COUNTER;
  1479. arm_smccc_1_1_invoke(ARM_SMCCC_VENDOR_HYP_KVM_PTP_FUNC_ID,
  1480. ptp_counter, &hvc_res);
  1481. if ((int)(hvc_res.a0) < 0)
  1482. return -EOPNOTSUPP;
  1483. ktime = (u64)hvc_res.a0 << 32 | hvc_res.a1;
  1484. *ts = ktime_to_timespec64(ktime);
  1485. if (cycle)
  1486. *cycle = (u64)hvc_res.a2 << 32 | hvc_res.a3;
  1487. if (cs)
  1488. *cs = &clocksource_counter;
  1489. return 0;
  1490. }
  1491. EXPORT_SYMBOL_GPL(kvm_arch_ptp_get_crosststamp);