arm_arch_timer.c 41 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588
  1. /*
  2. * linux/drivers/clocksource/arm_arch_timer.c
  3. *
  4. * Copyright (C) 2011 ARM Ltd.
  5. * All Rights Reserved
  6. *
  7. * This program is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU General Public License version 2 as
  9. * published by the Free Software Foundation.
  10. */
  11. #define pr_fmt(fmt) "arm_arch_timer: " fmt
  12. #include <linux/init.h>
  13. #include <linux/kernel.h>
  14. #include <linux/device.h>
  15. #include <linux/smp.h>
  16. #include <linux/cpu.h>
  17. #include <linux/cpu_pm.h>
  18. #include <linux/clockchips.h>
  19. #include <linux/clocksource.h>
  20. #include <linux/interrupt.h>
  21. #include <linux/of_irq.h>
  22. #include <linux/of_address.h>
  23. #include <linux/io.h>
  24. #include <linux/slab.h>
  25. #include <linux/sched/clock.h>
  26. #include <linux/sched_clock.h>
  27. #include <linux/acpi.h>
  28. #include <asm/arch_timer.h>
  29. #include <asm/virt.h>
  30. #include <clocksource/arm_arch_timer.h>
  31. #undef pr_fmt
  32. #define pr_fmt(fmt) "arch_timer: " fmt
  33. #define CNTTIDR 0x08
  34. #define CNTTIDR_VIRT(n) (BIT(1) << ((n) * 4))
  35. #define CNTACR(n) (0x40 + ((n) * 4))
  36. #define CNTACR_RPCT BIT(0)
  37. #define CNTACR_RVCT BIT(1)
  38. #define CNTACR_RFRQ BIT(2)
  39. #define CNTACR_RVOFF BIT(3)
  40. #define CNTACR_RWVT BIT(4)
  41. #define CNTACR_RWPT BIT(5)
  42. #define CNTVCT_LO 0x08
  43. #define CNTVCT_HI 0x0c
  44. #define CNTFRQ 0x10
  45. #define CNTP_TVAL 0x28
  46. #define CNTP_CTL 0x2c
  47. #define CNTV_TVAL 0x38
  48. #define CNTV_CTL 0x3c
  49. static unsigned arch_timers_present __initdata;
  50. static void __iomem *arch_counter_base;
  51. struct arch_timer {
  52. void __iomem *base;
  53. struct clock_event_device evt;
  54. };
  55. #define to_arch_timer(e) container_of(e, struct arch_timer, evt)
  56. static u32 arch_timer_rate;
  57. static int arch_timer_ppi[ARCH_TIMER_MAX_TIMER_PPI];
  58. static struct clock_event_device __percpu *arch_timer_evt;
  59. static enum arch_timer_ppi_nr arch_timer_uses_ppi = ARCH_TIMER_VIRT_PPI;
  60. static bool arch_timer_c3stop;
  61. static bool arch_timer_mem_use_virtual;
  62. static bool arch_counter_suspend_stop;
  63. static bool vdso_default = true;
  64. static cpumask_t evtstrm_available = CPU_MASK_NONE;
  65. static bool evtstrm_enable = IS_ENABLED(CONFIG_ARM_ARCH_TIMER_EVTSTREAM);
  66. static int __init early_evtstrm_cfg(char *buf)
  67. {
  68. return strtobool(buf, &evtstrm_enable);
  69. }
  70. early_param("clocksource.arm_arch_timer.evtstrm", early_evtstrm_cfg);
  71. /*
  72. * Architected system timer support.
  73. */
  74. static __always_inline
  75. void arch_timer_reg_write(int access, enum arch_timer_reg reg, u32 val,
  76. struct clock_event_device *clk)
  77. {
  78. if (access == ARCH_TIMER_MEM_PHYS_ACCESS) {
  79. struct arch_timer *timer = to_arch_timer(clk);
  80. switch (reg) {
  81. case ARCH_TIMER_REG_CTRL:
  82. writel_relaxed(val, timer->base + CNTP_CTL);
  83. break;
  84. case ARCH_TIMER_REG_TVAL:
  85. writel_relaxed(val, timer->base + CNTP_TVAL);
  86. break;
  87. }
  88. } else if (access == ARCH_TIMER_MEM_VIRT_ACCESS) {
  89. struct arch_timer *timer = to_arch_timer(clk);
  90. switch (reg) {
  91. case ARCH_TIMER_REG_CTRL:
  92. writel_relaxed(val, timer->base + CNTV_CTL);
  93. break;
  94. case ARCH_TIMER_REG_TVAL:
  95. writel_relaxed(val, timer->base + CNTV_TVAL);
  96. break;
  97. }
  98. } else {
  99. arch_timer_reg_write_cp15(access, reg, val);
  100. }
  101. }
  102. static __always_inline
  103. u32 arch_timer_reg_read(int access, enum arch_timer_reg reg,
  104. struct clock_event_device *clk)
  105. {
  106. u32 val;
  107. if (access == ARCH_TIMER_MEM_PHYS_ACCESS) {
  108. struct arch_timer *timer = to_arch_timer(clk);
  109. switch (reg) {
  110. case ARCH_TIMER_REG_CTRL:
  111. val = readl_relaxed(timer->base + CNTP_CTL);
  112. break;
  113. case ARCH_TIMER_REG_TVAL:
  114. val = readl_relaxed(timer->base + CNTP_TVAL);
  115. break;
  116. }
  117. } else if (access == ARCH_TIMER_MEM_VIRT_ACCESS) {
  118. struct arch_timer *timer = to_arch_timer(clk);
  119. switch (reg) {
  120. case ARCH_TIMER_REG_CTRL:
  121. val = readl_relaxed(timer->base + CNTV_CTL);
  122. break;
  123. case ARCH_TIMER_REG_TVAL:
  124. val = readl_relaxed(timer->base + CNTV_TVAL);
  125. break;
  126. }
  127. } else {
  128. val = arch_timer_reg_read_cp15(access, reg);
  129. }
  130. return val;
  131. }
  132. /*
  133. * Default to cp15 based access because arm64 uses this function for
  134. * sched_clock() before DT is probed and the cp15 method is guaranteed
  135. * to exist on arm64. arm doesn't use this before DT is probed so even
  136. * if we don't have the cp15 accessors we won't have a problem.
  137. */
  138. u64 (*arch_timer_read_counter)(void) = arch_counter_get_cntvct;
  139. EXPORT_SYMBOL_GPL(arch_timer_read_counter);
  140. static u64 arch_counter_read(struct clocksource *cs)
  141. {
  142. return arch_timer_read_counter();
  143. }
  144. static u64 arch_counter_read_cc(const struct cyclecounter *cc)
  145. {
  146. return arch_timer_read_counter();
  147. }
  148. static struct clocksource clocksource_counter = {
  149. .name = "arch_sys_counter",
  150. .rating = 400,
  151. .read = arch_counter_read,
  152. .mask = CLOCKSOURCE_MASK(56),
  153. .flags = CLOCK_SOURCE_IS_CONTINUOUS,
  154. };
  155. static struct cyclecounter cyclecounter __ro_after_init = {
  156. .read = arch_counter_read_cc,
  157. .mask = CLOCKSOURCE_MASK(56),
  158. };
  159. struct ate_acpi_oem_info {
  160. char oem_id[ACPI_OEM_ID_SIZE + 1];
  161. char oem_table_id[ACPI_OEM_TABLE_ID_SIZE + 1];
  162. u32 oem_revision;
  163. };
  164. #ifdef CONFIG_FSL_ERRATUM_A008585
  165. /*
  166. * The number of retries is an arbitrary value well beyond the highest number
  167. * of iterations the loop has been observed to take.
  168. */
  169. #define __fsl_a008585_read_reg(reg) ({ \
  170. u64 _old, _new; \
  171. int _retries = 200; \
  172. \
  173. do { \
  174. _old = read_sysreg(reg); \
  175. _new = read_sysreg(reg); \
  176. _retries--; \
  177. } while (unlikely(_old != _new) && _retries); \
  178. \
  179. WARN_ON_ONCE(!_retries); \
  180. _new; \
  181. })
  182. static u32 notrace fsl_a008585_read_cntp_tval_el0(void)
  183. {
  184. return __fsl_a008585_read_reg(cntp_tval_el0);
  185. }
  186. static u32 notrace fsl_a008585_read_cntv_tval_el0(void)
  187. {
  188. return __fsl_a008585_read_reg(cntv_tval_el0);
  189. }
  190. static u64 notrace fsl_a008585_read_cntpct_el0(void)
  191. {
  192. return __fsl_a008585_read_reg(cntpct_el0);
  193. }
  194. static u64 notrace fsl_a008585_read_cntvct_el0(void)
  195. {
  196. return __fsl_a008585_read_reg(cntvct_el0);
  197. }
  198. #endif
  199. #ifdef CONFIG_HISILICON_ERRATUM_161010101
  200. /*
  201. * Verify whether the value of the second read is larger than the first by
  202. * less than 32 is the only way to confirm the value is correct, so clear the
  203. * lower 5 bits to check whether the difference is greater than 32 or not.
  204. * Theoretically the erratum should not occur more than twice in succession
  205. * when reading the system counter, but it is possible that some interrupts
  206. * may lead to more than twice read errors, triggering the warning, so setting
  207. * the number of retries far beyond the number of iterations the loop has been
  208. * observed to take.
  209. */
  210. #define __hisi_161010101_read_reg(reg) ({ \
  211. u64 _old, _new; \
  212. int _retries = 50; \
  213. \
  214. do { \
  215. _old = read_sysreg(reg); \
  216. _new = read_sysreg(reg); \
  217. _retries--; \
  218. } while (unlikely((_new - _old) >> 5) && _retries); \
  219. \
  220. WARN_ON_ONCE(!_retries); \
  221. _new; \
  222. })
  223. static u32 notrace hisi_161010101_read_cntp_tval_el0(void)
  224. {
  225. return __hisi_161010101_read_reg(cntp_tval_el0);
  226. }
  227. static u32 notrace hisi_161010101_read_cntv_tval_el0(void)
  228. {
  229. return __hisi_161010101_read_reg(cntv_tval_el0);
  230. }
  231. static u64 notrace hisi_161010101_read_cntpct_el0(void)
  232. {
  233. return __hisi_161010101_read_reg(cntpct_el0);
  234. }
  235. static u64 notrace hisi_161010101_read_cntvct_el0(void)
  236. {
  237. return __hisi_161010101_read_reg(cntvct_el0);
  238. }
  239. static struct ate_acpi_oem_info hisi_161010101_oem_info[] = {
  240. /*
  241. * Note that trailing spaces are required to properly match
  242. * the OEM table information.
  243. */
  244. {
  245. .oem_id = "HISI ",
  246. .oem_table_id = "HIP05 ",
  247. .oem_revision = 0,
  248. },
  249. {
  250. .oem_id = "HISI ",
  251. .oem_table_id = "HIP06 ",
  252. .oem_revision = 0,
  253. },
  254. {
  255. .oem_id = "HISI ",
  256. .oem_table_id = "HIP07 ",
  257. .oem_revision = 0,
  258. },
  259. { /* Sentinel indicating the end of the OEM array */ },
  260. };
  261. #endif
  262. #ifdef CONFIG_ARM64_ERRATUM_858921
  263. static u64 notrace arm64_858921_read_cntpct_el0(void)
  264. {
  265. u64 old, new;
  266. old = read_sysreg(cntpct_el0);
  267. new = read_sysreg(cntpct_el0);
  268. return (((old ^ new) >> 32) & 1) ? old : new;
  269. }
  270. static u64 notrace arm64_858921_read_cntvct_el0(void)
  271. {
  272. u64 old, new;
  273. old = read_sysreg(cntvct_el0);
  274. new = read_sysreg(cntvct_el0);
  275. return (((old ^ new) >> 32) & 1) ? old : new;
  276. }
  277. #endif
  278. #ifdef CONFIG_ARM64_ERRATUM_1188873
  279. static u64 notrace arm64_1188873_read_cntvct_el0(void)
  280. {
  281. return read_sysreg(cntvct_el0);
  282. }
  283. #endif
  284. #ifdef CONFIG_ARM_ARCH_TIMER_OOL_WORKAROUND
  285. DEFINE_PER_CPU(const struct arch_timer_erratum_workaround *, timer_unstable_counter_workaround);
  286. EXPORT_SYMBOL_GPL(timer_unstable_counter_workaround);
  287. DEFINE_STATIC_KEY_FALSE(arch_timer_read_ool_enabled);
  288. EXPORT_SYMBOL_GPL(arch_timer_read_ool_enabled);
  289. static void erratum_set_next_event_tval_generic(const int access, unsigned long evt,
  290. struct clock_event_device *clk)
  291. {
  292. unsigned long ctrl;
  293. u64 cval;
  294. ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL, clk);
  295. ctrl |= ARCH_TIMER_CTRL_ENABLE;
  296. ctrl &= ~ARCH_TIMER_CTRL_IT_MASK;
  297. if (access == ARCH_TIMER_PHYS_ACCESS) {
  298. cval = evt + arch_counter_get_cntpct();
  299. write_sysreg(cval, cntp_cval_el0);
  300. } else {
  301. cval = evt + arch_counter_get_cntvct();
  302. write_sysreg(cval, cntv_cval_el0);
  303. }
  304. arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl, clk);
  305. }
  306. static __maybe_unused int erratum_set_next_event_tval_virt(unsigned long evt,
  307. struct clock_event_device *clk)
  308. {
  309. erratum_set_next_event_tval_generic(ARCH_TIMER_VIRT_ACCESS, evt, clk);
  310. return 0;
  311. }
  312. static __maybe_unused int erratum_set_next_event_tval_phys(unsigned long evt,
  313. struct clock_event_device *clk)
  314. {
  315. erratum_set_next_event_tval_generic(ARCH_TIMER_PHYS_ACCESS, evt, clk);
  316. return 0;
  317. }
  318. static const struct arch_timer_erratum_workaround ool_workarounds[] = {
  319. #ifdef CONFIG_FSL_ERRATUM_A008585
  320. {
  321. .match_type = ate_match_dt,
  322. .id = "fsl,erratum-a008585",
  323. .desc = "Freescale erratum a005858",
  324. .read_cntp_tval_el0 = fsl_a008585_read_cntp_tval_el0,
  325. .read_cntv_tval_el0 = fsl_a008585_read_cntv_tval_el0,
  326. .read_cntpct_el0 = fsl_a008585_read_cntpct_el0,
  327. .read_cntvct_el0 = fsl_a008585_read_cntvct_el0,
  328. .set_next_event_phys = erratum_set_next_event_tval_phys,
  329. .set_next_event_virt = erratum_set_next_event_tval_virt,
  330. },
  331. #endif
  332. #ifdef CONFIG_HISILICON_ERRATUM_161010101
  333. {
  334. .match_type = ate_match_dt,
  335. .id = "hisilicon,erratum-161010101",
  336. .desc = "HiSilicon erratum 161010101",
  337. .read_cntp_tval_el0 = hisi_161010101_read_cntp_tval_el0,
  338. .read_cntv_tval_el0 = hisi_161010101_read_cntv_tval_el0,
  339. .read_cntpct_el0 = hisi_161010101_read_cntpct_el0,
  340. .read_cntvct_el0 = hisi_161010101_read_cntvct_el0,
  341. .set_next_event_phys = erratum_set_next_event_tval_phys,
  342. .set_next_event_virt = erratum_set_next_event_tval_virt,
  343. },
  344. {
  345. .match_type = ate_match_acpi_oem_info,
  346. .id = hisi_161010101_oem_info,
  347. .desc = "HiSilicon erratum 161010101",
  348. .read_cntp_tval_el0 = hisi_161010101_read_cntp_tval_el0,
  349. .read_cntv_tval_el0 = hisi_161010101_read_cntv_tval_el0,
  350. .read_cntpct_el0 = hisi_161010101_read_cntpct_el0,
  351. .read_cntvct_el0 = hisi_161010101_read_cntvct_el0,
  352. .set_next_event_phys = erratum_set_next_event_tval_phys,
  353. .set_next_event_virt = erratum_set_next_event_tval_virt,
  354. },
  355. #endif
  356. #ifdef CONFIG_ARM64_ERRATUM_858921
  357. {
  358. .match_type = ate_match_local_cap_id,
  359. .id = (void *)ARM64_WORKAROUND_858921,
  360. .desc = "ARM erratum 858921",
  361. .read_cntpct_el0 = arm64_858921_read_cntpct_el0,
  362. .read_cntvct_el0 = arm64_858921_read_cntvct_el0,
  363. },
  364. #endif
  365. #ifdef CONFIG_ARM64_ERRATUM_1188873
  366. {
  367. .match_type = ate_match_local_cap_id,
  368. .id = (void *)ARM64_WORKAROUND_1188873,
  369. .desc = "ARM erratum 1188873",
  370. .read_cntvct_el0 = arm64_1188873_read_cntvct_el0,
  371. },
  372. #endif
  373. };
  374. typedef bool (*ate_match_fn_t)(const struct arch_timer_erratum_workaround *,
  375. const void *);
  376. static
  377. bool arch_timer_check_dt_erratum(const struct arch_timer_erratum_workaround *wa,
  378. const void *arg)
  379. {
  380. const struct device_node *np = arg;
  381. return of_property_read_bool(np, wa->id);
  382. }
  383. static
  384. bool arch_timer_check_local_cap_erratum(const struct arch_timer_erratum_workaround *wa,
  385. const void *arg)
  386. {
  387. return this_cpu_has_cap((uintptr_t)wa->id);
  388. }
  389. static
  390. bool arch_timer_check_acpi_oem_erratum(const struct arch_timer_erratum_workaround *wa,
  391. const void *arg)
  392. {
  393. static const struct ate_acpi_oem_info empty_oem_info = {};
  394. const struct ate_acpi_oem_info *info = wa->id;
  395. const struct acpi_table_header *table = arg;
  396. /* Iterate over the ACPI OEM info array, looking for a match */
  397. while (memcmp(info, &empty_oem_info, sizeof(*info))) {
  398. if (!memcmp(info->oem_id, table->oem_id, ACPI_OEM_ID_SIZE) &&
  399. !memcmp(info->oem_table_id, table->oem_table_id, ACPI_OEM_TABLE_ID_SIZE) &&
  400. info->oem_revision == table->oem_revision)
  401. return true;
  402. info++;
  403. }
  404. return false;
  405. }
  406. static const struct arch_timer_erratum_workaround *
  407. arch_timer_iterate_errata(enum arch_timer_erratum_match_type type,
  408. ate_match_fn_t match_fn,
  409. void *arg)
  410. {
  411. int i;
  412. for (i = 0; i < ARRAY_SIZE(ool_workarounds); i++) {
  413. if (ool_workarounds[i].match_type != type)
  414. continue;
  415. if (match_fn(&ool_workarounds[i], arg))
  416. return &ool_workarounds[i];
  417. }
  418. return NULL;
  419. }
  420. static
  421. void arch_timer_enable_workaround(const struct arch_timer_erratum_workaround *wa,
  422. bool local)
  423. {
  424. int i;
  425. if (local) {
  426. __this_cpu_write(timer_unstable_counter_workaround, wa);
  427. } else {
  428. for_each_possible_cpu(i)
  429. per_cpu(timer_unstable_counter_workaround, i) = wa;
  430. }
  431. /*
  432. * Use the locked version, as we're called from the CPU
  433. * hotplug framework. Otherwise, we end-up in deadlock-land.
  434. */
  435. static_branch_enable_cpuslocked(&arch_timer_read_ool_enabled);
  436. /*
  437. * Don't use the vdso fastpath if errata require using the
  438. * out-of-line counter accessor. We may change our mind pretty
  439. * late in the game (with a per-CPU erratum, for example), so
  440. * change both the default value and the vdso itself.
  441. */
  442. if (wa->read_cntvct_el0) {
  443. clocksource_counter.archdata.vdso_direct = false;
  444. vdso_default = false;
  445. }
  446. }
  447. static void arch_timer_check_ool_workaround(enum arch_timer_erratum_match_type type,
  448. void *arg)
  449. {
  450. const struct arch_timer_erratum_workaround *wa;
  451. ate_match_fn_t match_fn = NULL;
  452. bool local = false;
  453. switch (type) {
  454. case ate_match_dt:
  455. match_fn = arch_timer_check_dt_erratum;
  456. break;
  457. case ate_match_local_cap_id:
  458. match_fn = arch_timer_check_local_cap_erratum;
  459. local = true;
  460. break;
  461. case ate_match_acpi_oem_info:
  462. match_fn = arch_timer_check_acpi_oem_erratum;
  463. break;
  464. default:
  465. WARN_ON(1);
  466. return;
  467. }
  468. wa = arch_timer_iterate_errata(type, match_fn, arg);
  469. if (!wa)
  470. return;
  471. if (needs_unstable_timer_counter_workaround()) {
  472. const struct arch_timer_erratum_workaround *__wa;
  473. __wa = __this_cpu_read(timer_unstable_counter_workaround);
  474. if (__wa && wa != __wa)
  475. pr_warn("Can't enable workaround for %s (clashes with %s\n)",
  476. wa->desc, __wa->desc);
  477. if (__wa)
  478. return;
  479. }
  480. arch_timer_enable_workaround(wa, local);
  481. pr_info("Enabling %s workaround for %s\n",
  482. local ? "local" : "global", wa->desc);
  483. }
  484. #define erratum_handler(fn, r, ...) \
  485. ({ \
  486. bool __val; \
  487. if (needs_unstable_timer_counter_workaround()) { \
  488. const struct arch_timer_erratum_workaround *__wa; \
  489. __wa = __this_cpu_read(timer_unstable_counter_workaround); \
  490. if (__wa && __wa->fn) { \
  491. r = __wa->fn(__VA_ARGS__); \
  492. __val = true; \
  493. } else { \
  494. __val = false; \
  495. } \
  496. } else { \
  497. __val = false; \
  498. } \
  499. __val; \
  500. })
  501. static bool arch_timer_this_cpu_has_cntvct_wa(void)
  502. {
  503. const struct arch_timer_erratum_workaround *wa;
  504. wa = __this_cpu_read(timer_unstable_counter_workaround);
  505. return wa && wa->read_cntvct_el0;
  506. }
  507. #else
  508. #define arch_timer_check_ool_workaround(t,a) do { } while(0)
  509. #define erratum_set_next_event_tval_virt(...) ({BUG(); 0;})
  510. #define erratum_set_next_event_tval_phys(...) ({BUG(); 0;})
  511. #define erratum_handler(fn, r, ...) ({false;})
  512. #define arch_timer_this_cpu_has_cntvct_wa() ({false;})
  513. #endif /* CONFIG_ARM_ARCH_TIMER_OOL_WORKAROUND */
  514. static __always_inline irqreturn_t timer_handler(const int access,
  515. struct clock_event_device *evt)
  516. {
  517. unsigned long ctrl;
  518. ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL, evt);
  519. if (ctrl & ARCH_TIMER_CTRL_IT_STAT) {
  520. ctrl |= ARCH_TIMER_CTRL_IT_MASK;
  521. arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl, evt);
  522. evt->event_handler(evt);
  523. return IRQ_HANDLED;
  524. }
  525. return IRQ_NONE;
  526. }
  527. static irqreturn_t arch_timer_handler_virt(int irq, void *dev_id)
  528. {
  529. struct clock_event_device *evt = dev_id;
  530. return timer_handler(ARCH_TIMER_VIRT_ACCESS, evt);
  531. }
  532. static irqreturn_t arch_timer_handler_phys(int irq, void *dev_id)
  533. {
  534. struct clock_event_device *evt = dev_id;
  535. return timer_handler(ARCH_TIMER_PHYS_ACCESS, evt);
  536. }
  537. static irqreturn_t arch_timer_handler_phys_mem(int irq, void *dev_id)
  538. {
  539. struct clock_event_device *evt = dev_id;
  540. return timer_handler(ARCH_TIMER_MEM_PHYS_ACCESS, evt);
  541. }
  542. static irqreturn_t arch_timer_handler_virt_mem(int irq, void *dev_id)
  543. {
  544. struct clock_event_device *evt = dev_id;
  545. return timer_handler(ARCH_TIMER_MEM_VIRT_ACCESS, evt);
  546. }
  547. static __always_inline int timer_shutdown(const int access,
  548. struct clock_event_device *clk)
  549. {
  550. unsigned long ctrl;
  551. ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL, clk);
  552. ctrl &= ~ARCH_TIMER_CTRL_ENABLE;
  553. arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl, clk);
  554. return 0;
  555. }
  556. static int arch_timer_shutdown_virt(struct clock_event_device *clk)
  557. {
  558. return timer_shutdown(ARCH_TIMER_VIRT_ACCESS, clk);
  559. }
  560. static int arch_timer_shutdown_phys(struct clock_event_device *clk)
  561. {
  562. return timer_shutdown(ARCH_TIMER_PHYS_ACCESS, clk);
  563. }
  564. static int arch_timer_shutdown_virt_mem(struct clock_event_device *clk)
  565. {
  566. return timer_shutdown(ARCH_TIMER_MEM_VIRT_ACCESS, clk);
  567. }
  568. static int arch_timer_shutdown_phys_mem(struct clock_event_device *clk)
  569. {
  570. return timer_shutdown(ARCH_TIMER_MEM_PHYS_ACCESS, clk);
  571. }
  572. static __always_inline void set_next_event(const int access, unsigned long evt,
  573. struct clock_event_device *clk)
  574. {
  575. unsigned long ctrl;
  576. ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL, clk);
  577. ctrl |= ARCH_TIMER_CTRL_ENABLE;
  578. ctrl &= ~ARCH_TIMER_CTRL_IT_MASK;
  579. arch_timer_reg_write(access, ARCH_TIMER_REG_TVAL, evt, clk);
  580. arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl, clk);
  581. }
  582. static int arch_timer_set_next_event_virt(unsigned long evt,
  583. struct clock_event_device *clk)
  584. {
  585. int ret;
  586. if (erratum_handler(set_next_event_virt, ret, evt, clk))
  587. return ret;
  588. set_next_event(ARCH_TIMER_VIRT_ACCESS, evt, clk);
  589. return 0;
  590. }
  591. static int arch_timer_set_next_event_phys(unsigned long evt,
  592. struct clock_event_device *clk)
  593. {
  594. int ret;
  595. if (erratum_handler(set_next_event_phys, ret, evt, clk))
  596. return ret;
  597. set_next_event(ARCH_TIMER_PHYS_ACCESS, evt, clk);
  598. return 0;
  599. }
  600. static int arch_timer_set_next_event_virt_mem(unsigned long evt,
  601. struct clock_event_device *clk)
  602. {
  603. set_next_event(ARCH_TIMER_MEM_VIRT_ACCESS, evt, clk);
  604. return 0;
  605. }
  606. static int arch_timer_set_next_event_phys_mem(unsigned long evt,
  607. struct clock_event_device *clk)
  608. {
  609. set_next_event(ARCH_TIMER_MEM_PHYS_ACCESS, evt, clk);
  610. return 0;
  611. }
  612. static void __arch_timer_setup(unsigned type,
  613. struct clock_event_device *clk)
  614. {
  615. clk->features = CLOCK_EVT_FEAT_ONESHOT;
  616. if (type == ARCH_TIMER_TYPE_CP15) {
  617. if (arch_timer_c3stop)
  618. clk->features |= CLOCK_EVT_FEAT_C3STOP;
  619. clk->name = "arch_sys_timer";
  620. clk->rating = 450;
  621. clk->cpumask = cpumask_of(smp_processor_id());
  622. clk->irq = arch_timer_ppi[arch_timer_uses_ppi];
  623. switch (arch_timer_uses_ppi) {
  624. case ARCH_TIMER_VIRT_PPI:
  625. clk->set_state_shutdown = arch_timer_shutdown_virt;
  626. clk->set_state_oneshot_stopped = arch_timer_shutdown_virt;
  627. clk->set_next_event = arch_timer_set_next_event_virt;
  628. break;
  629. case ARCH_TIMER_PHYS_SECURE_PPI:
  630. case ARCH_TIMER_PHYS_NONSECURE_PPI:
  631. case ARCH_TIMER_HYP_PPI:
  632. clk->set_state_shutdown = arch_timer_shutdown_phys;
  633. clk->set_state_oneshot_stopped = arch_timer_shutdown_phys;
  634. clk->set_next_event = arch_timer_set_next_event_phys;
  635. break;
  636. default:
  637. BUG();
  638. }
  639. arch_timer_check_ool_workaround(ate_match_local_cap_id, NULL);
  640. } else {
  641. clk->features |= CLOCK_EVT_FEAT_DYNIRQ;
  642. clk->name = "arch_mem_timer";
  643. clk->rating = 400;
  644. clk->cpumask = cpu_possible_mask;
  645. if (arch_timer_mem_use_virtual) {
  646. clk->set_state_shutdown = arch_timer_shutdown_virt_mem;
  647. clk->set_state_oneshot_stopped = arch_timer_shutdown_virt_mem;
  648. clk->set_next_event =
  649. arch_timer_set_next_event_virt_mem;
  650. } else {
  651. clk->set_state_shutdown = arch_timer_shutdown_phys_mem;
  652. clk->set_state_oneshot_stopped = arch_timer_shutdown_phys_mem;
  653. clk->set_next_event =
  654. arch_timer_set_next_event_phys_mem;
  655. }
  656. }
  657. clk->set_state_shutdown(clk);
  658. clockevents_config_and_register(clk, arch_timer_rate, 0xf, 0x7fffffff);
  659. }
  660. static void arch_timer_evtstrm_enable(int divider)
  661. {
  662. u32 cntkctl = arch_timer_get_cntkctl();
  663. cntkctl &= ~ARCH_TIMER_EVT_TRIGGER_MASK;
  664. /* Set the divider and enable virtual event stream */
  665. cntkctl |= (divider << ARCH_TIMER_EVT_TRIGGER_SHIFT)
  666. | ARCH_TIMER_VIRT_EVT_EN;
  667. arch_timer_set_cntkctl(cntkctl);
  668. elf_hwcap |= HWCAP_EVTSTRM;
  669. #ifdef CONFIG_COMPAT
  670. compat_elf_hwcap |= COMPAT_HWCAP_EVTSTRM;
  671. #endif
  672. cpumask_set_cpu(smp_processor_id(), &evtstrm_available);
  673. }
  674. static void arch_timer_configure_evtstream(void)
  675. {
  676. int evt_stream_div, pos;
  677. /* Find the closest power of two to the divisor */
  678. evt_stream_div = arch_timer_rate / ARCH_TIMER_EVT_STREAM_FREQ;
  679. pos = fls(evt_stream_div);
  680. if (pos > 1 && !(evt_stream_div & (1 << (pos - 2))))
  681. pos--;
  682. /* enable event stream */
  683. arch_timer_evtstrm_enable(min(pos, 15));
  684. }
  685. static void arch_counter_set_user_access(void)
  686. {
  687. u32 cntkctl = arch_timer_get_cntkctl();
  688. /* Disable user access to the timers and both counters */
  689. /* Also disable virtual event stream */
  690. cntkctl &= ~(ARCH_TIMER_USR_PT_ACCESS_EN
  691. | ARCH_TIMER_USR_VT_ACCESS_EN
  692. | ARCH_TIMER_USR_VCT_ACCESS_EN
  693. | ARCH_TIMER_VIRT_EVT_EN
  694. | ARCH_TIMER_USR_PCT_ACCESS_EN);
  695. /*
  696. * Enable user access to the virtual counter if it doesn't
  697. * need to be workaround. The vdso may have been already
  698. * disabled though.
  699. */
  700. if (arch_timer_this_cpu_has_cntvct_wa())
  701. pr_info("CPU%d: Trapping CNTVCT access\n", smp_processor_id());
  702. else
  703. cntkctl |= ARCH_TIMER_USR_VCT_ACCESS_EN;
  704. arch_timer_set_cntkctl(cntkctl);
  705. }
  706. static bool arch_timer_has_nonsecure_ppi(void)
  707. {
  708. return (arch_timer_uses_ppi == ARCH_TIMER_PHYS_SECURE_PPI &&
  709. arch_timer_ppi[ARCH_TIMER_PHYS_NONSECURE_PPI]);
  710. }
  711. static u32 check_ppi_trigger(int irq)
  712. {
  713. u32 flags = irq_get_trigger_type(irq);
  714. if (flags != IRQF_TRIGGER_HIGH && flags != IRQF_TRIGGER_LOW) {
  715. pr_warn("WARNING: Invalid trigger for IRQ%d, assuming level low\n", irq);
  716. pr_warn("WARNING: Please fix your firmware\n");
  717. flags = IRQF_TRIGGER_LOW;
  718. }
  719. return flags;
  720. }
  721. static int arch_timer_starting_cpu(unsigned int cpu)
  722. {
  723. struct clock_event_device *clk = this_cpu_ptr(arch_timer_evt);
  724. u32 flags;
  725. __arch_timer_setup(ARCH_TIMER_TYPE_CP15, clk);
  726. flags = check_ppi_trigger(arch_timer_ppi[arch_timer_uses_ppi]);
  727. enable_percpu_irq(arch_timer_ppi[arch_timer_uses_ppi], flags);
  728. if (arch_timer_has_nonsecure_ppi()) {
  729. flags = check_ppi_trigger(arch_timer_ppi[ARCH_TIMER_PHYS_NONSECURE_PPI]);
  730. enable_percpu_irq(arch_timer_ppi[ARCH_TIMER_PHYS_NONSECURE_PPI],
  731. flags);
  732. }
  733. arch_counter_set_user_access();
  734. if (evtstrm_enable)
  735. arch_timer_configure_evtstream();
  736. return 0;
  737. }
  738. /*
  739. * For historical reasons, when probing with DT we use whichever (non-zero)
  740. * rate was probed first, and don't verify that others match. If the first node
  741. * probed has a clock-frequency property, this overrides the HW register.
  742. */
  743. static void arch_timer_of_configure_rate(u32 rate, struct device_node *np)
  744. {
  745. /* Who has more than one independent system counter? */
  746. if (arch_timer_rate)
  747. return;
  748. if (of_property_read_u32(np, "clock-frequency", &arch_timer_rate))
  749. arch_timer_rate = rate;
  750. /* Check the timer frequency. */
  751. if (arch_timer_rate == 0)
  752. pr_warn("frequency not available\n");
  753. }
  754. static void arch_timer_banner(unsigned type)
  755. {
  756. pr_info("%s%s%s timer(s) running at %lu.%02luMHz (%s%s%s).\n",
  757. type & ARCH_TIMER_TYPE_CP15 ? "cp15" : "",
  758. type == (ARCH_TIMER_TYPE_CP15 | ARCH_TIMER_TYPE_MEM) ?
  759. " and " : "",
  760. type & ARCH_TIMER_TYPE_MEM ? "mmio" : "",
  761. (unsigned long)arch_timer_rate / 1000000,
  762. (unsigned long)(arch_timer_rate / 10000) % 100,
  763. type & ARCH_TIMER_TYPE_CP15 ?
  764. (arch_timer_uses_ppi == ARCH_TIMER_VIRT_PPI) ? "virt" : "phys" :
  765. "",
  766. type == (ARCH_TIMER_TYPE_CP15 | ARCH_TIMER_TYPE_MEM) ? "/" : "",
  767. type & ARCH_TIMER_TYPE_MEM ?
  768. arch_timer_mem_use_virtual ? "virt" : "phys" :
  769. "");
  770. }
  771. u32 arch_timer_get_rate(void)
  772. {
  773. return arch_timer_rate;
  774. }
  775. bool arch_timer_evtstrm_available(void)
  776. {
  777. /*
  778. * We might get called from a preemptible context. This is fine
  779. * because availability of the event stream should be always the same
  780. * for a preemptible context and context where we might resume a task.
  781. */
  782. return cpumask_test_cpu(raw_smp_processor_id(), &evtstrm_available);
  783. }
  784. static u64 arch_counter_get_cntvct_mem(void)
  785. {
  786. u32 vct_lo, vct_hi, tmp_hi;
  787. do {
  788. vct_hi = readl_relaxed(arch_counter_base + CNTVCT_HI);
  789. vct_lo = readl_relaxed(arch_counter_base + CNTVCT_LO);
  790. tmp_hi = readl_relaxed(arch_counter_base + CNTVCT_HI);
  791. } while (vct_hi != tmp_hi);
  792. return ((u64) vct_hi << 32) | vct_lo;
  793. }
  794. static struct arch_timer_kvm_info arch_timer_kvm_info;
  795. struct arch_timer_kvm_info *arch_timer_get_kvm_info(void)
  796. {
  797. return &arch_timer_kvm_info;
  798. }
  799. static void __init arch_counter_register(unsigned type)
  800. {
  801. u64 start_count;
  802. /* Register the CP15 based counter if we have one */
  803. if (type & ARCH_TIMER_TYPE_CP15) {
  804. if ((IS_ENABLED(CONFIG_ARM64) && !is_hyp_mode_available()) ||
  805. arch_timer_uses_ppi == ARCH_TIMER_VIRT_PPI)
  806. arch_timer_read_counter = arch_counter_get_cntvct;
  807. else
  808. arch_timer_read_counter = arch_counter_get_cntpct;
  809. clocksource_counter.archdata.vdso_direct = vdso_default;
  810. } else {
  811. arch_timer_read_counter = arch_counter_get_cntvct_mem;
  812. }
  813. if (!arch_counter_suspend_stop)
  814. clocksource_counter.flags |= CLOCK_SOURCE_SUSPEND_NONSTOP;
  815. start_count = arch_timer_read_counter();
  816. clocksource_register_hz(&clocksource_counter, arch_timer_rate);
  817. cyclecounter.mult = clocksource_counter.mult;
  818. cyclecounter.shift = clocksource_counter.shift;
  819. timecounter_init(&arch_timer_kvm_info.timecounter,
  820. &cyclecounter, start_count);
  821. /* 56 bits minimum, so we assume worst case rollover */
  822. sched_clock_register(arch_timer_read_counter, 56, arch_timer_rate);
  823. }
  824. static void arch_timer_stop(struct clock_event_device *clk)
  825. {
  826. pr_debug("disable IRQ%d cpu #%d\n", clk->irq, smp_processor_id());
  827. disable_percpu_irq(arch_timer_ppi[arch_timer_uses_ppi]);
  828. if (arch_timer_has_nonsecure_ppi())
  829. disable_percpu_irq(arch_timer_ppi[ARCH_TIMER_PHYS_NONSECURE_PPI]);
  830. clk->set_state_shutdown(clk);
  831. }
  832. static int arch_timer_dying_cpu(unsigned int cpu)
  833. {
  834. struct clock_event_device *clk = this_cpu_ptr(arch_timer_evt);
  835. cpumask_clear_cpu(smp_processor_id(), &evtstrm_available);
  836. arch_timer_stop(clk);
  837. return 0;
  838. }
  839. #ifdef CONFIG_CPU_PM
  840. static DEFINE_PER_CPU(unsigned long, saved_cntkctl);
  841. static int arch_timer_cpu_pm_notify(struct notifier_block *self,
  842. unsigned long action, void *hcpu)
  843. {
  844. if (action == CPU_PM_ENTER) {
  845. __this_cpu_write(saved_cntkctl, arch_timer_get_cntkctl());
  846. cpumask_clear_cpu(smp_processor_id(), &evtstrm_available);
  847. } else if (action == CPU_PM_ENTER_FAILED || action == CPU_PM_EXIT) {
  848. arch_timer_set_cntkctl(__this_cpu_read(saved_cntkctl));
  849. if (elf_hwcap & HWCAP_EVTSTRM)
  850. cpumask_set_cpu(smp_processor_id(), &evtstrm_available);
  851. }
  852. return NOTIFY_OK;
  853. }
  854. static struct notifier_block arch_timer_cpu_pm_notifier = {
  855. .notifier_call = arch_timer_cpu_pm_notify,
  856. };
  857. static int __init arch_timer_cpu_pm_init(void)
  858. {
  859. return cpu_pm_register_notifier(&arch_timer_cpu_pm_notifier);
  860. }
  861. static void __init arch_timer_cpu_pm_deinit(void)
  862. {
  863. WARN_ON(cpu_pm_unregister_notifier(&arch_timer_cpu_pm_notifier));
  864. }
  865. #else
  866. static int __init arch_timer_cpu_pm_init(void)
  867. {
  868. return 0;
  869. }
  870. static void __init arch_timer_cpu_pm_deinit(void)
  871. {
  872. }
  873. #endif
  874. static int __init arch_timer_register(void)
  875. {
  876. int err;
  877. int ppi;
  878. arch_timer_evt = alloc_percpu(struct clock_event_device);
  879. if (!arch_timer_evt) {
  880. err = -ENOMEM;
  881. goto out;
  882. }
  883. ppi = arch_timer_ppi[arch_timer_uses_ppi];
  884. switch (arch_timer_uses_ppi) {
  885. case ARCH_TIMER_VIRT_PPI:
  886. err = request_percpu_irq(ppi, arch_timer_handler_virt,
  887. "arch_timer", arch_timer_evt);
  888. break;
  889. case ARCH_TIMER_PHYS_SECURE_PPI:
  890. case ARCH_TIMER_PHYS_NONSECURE_PPI:
  891. err = request_percpu_irq(ppi, arch_timer_handler_phys,
  892. "arch_timer", arch_timer_evt);
  893. if (!err && arch_timer_has_nonsecure_ppi()) {
  894. ppi = arch_timer_ppi[ARCH_TIMER_PHYS_NONSECURE_PPI];
  895. err = request_percpu_irq(ppi, arch_timer_handler_phys,
  896. "arch_timer", arch_timer_evt);
  897. if (err)
  898. free_percpu_irq(arch_timer_ppi[ARCH_TIMER_PHYS_SECURE_PPI],
  899. arch_timer_evt);
  900. }
  901. break;
  902. case ARCH_TIMER_HYP_PPI:
  903. err = request_percpu_irq(ppi, arch_timer_handler_phys,
  904. "arch_timer", arch_timer_evt);
  905. break;
  906. default:
  907. BUG();
  908. }
  909. if (err) {
  910. pr_err("can't register interrupt %d (%d)\n", ppi, err);
  911. goto out_free;
  912. }
  913. err = arch_timer_cpu_pm_init();
  914. if (err)
  915. goto out_unreg_notify;
  916. /* Register and immediately configure the timer on the boot CPU */
  917. err = cpuhp_setup_state(CPUHP_AP_ARM_ARCH_TIMER_STARTING,
  918. "clockevents/arm/arch_timer:starting",
  919. arch_timer_starting_cpu, arch_timer_dying_cpu);
  920. if (err)
  921. goto out_unreg_cpupm;
  922. return 0;
  923. out_unreg_cpupm:
  924. arch_timer_cpu_pm_deinit();
  925. out_unreg_notify:
  926. free_percpu_irq(arch_timer_ppi[arch_timer_uses_ppi], arch_timer_evt);
  927. if (arch_timer_has_nonsecure_ppi())
  928. free_percpu_irq(arch_timer_ppi[ARCH_TIMER_PHYS_NONSECURE_PPI],
  929. arch_timer_evt);
  930. out_free:
  931. free_percpu(arch_timer_evt);
  932. out:
  933. return err;
  934. }
  935. static int __init arch_timer_mem_register(void __iomem *base, unsigned int irq)
  936. {
  937. int ret;
  938. irq_handler_t func;
  939. struct arch_timer *t;
  940. t = kzalloc(sizeof(*t), GFP_KERNEL);
  941. if (!t)
  942. return -ENOMEM;
  943. t->base = base;
  944. t->evt.irq = irq;
  945. __arch_timer_setup(ARCH_TIMER_TYPE_MEM, &t->evt);
  946. if (arch_timer_mem_use_virtual)
  947. func = arch_timer_handler_virt_mem;
  948. else
  949. func = arch_timer_handler_phys_mem;
  950. ret = request_irq(irq, func, IRQF_TIMER, "arch_mem_timer", &t->evt);
  951. if (ret) {
  952. pr_err("Failed to request mem timer irq\n");
  953. kfree(t);
  954. }
  955. return ret;
  956. }
  957. static const struct of_device_id arch_timer_of_match[] __initconst = {
  958. { .compatible = "arm,armv7-timer", },
  959. { .compatible = "arm,armv8-timer", },
  960. {},
  961. };
  962. static const struct of_device_id arch_timer_mem_of_match[] __initconst = {
  963. { .compatible = "arm,armv7-timer-mem", },
  964. {},
  965. };
  966. static bool __init arch_timer_needs_of_probing(void)
  967. {
  968. struct device_node *dn;
  969. bool needs_probing = false;
  970. unsigned int mask = ARCH_TIMER_TYPE_CP15 | ARCH_TIMER_TYPE_MEM;
  971. /* We have two timers, and both device-tree nodes are probed. */
  972. if ((arch_timers_present & mask) == mask)
  973. return false;
  974. /*
  975. * Only one type of timer is probed,
  976. * check if we have another type of timer node in device-tree.
  977. */
  978. if (arch_timers_present & ARCH_TIMER_TYPE_CP15)
  979. dn = of_find_matching_node(NULL, arch_timer_mem_of_match);
  980. else
  981. dn = of_find_matching_node(NULL, arch_timer_of_match);
  982. if (dn && of_device_is_available(dn))
  983. needs_probing = true;
  984. of_node_put(dn);
  985. return needs_probing;
  986. }
  987. static int __init arch_timer_common_init(void)
  988. {
  989. arch_timer_banner(arch_timers_present);
  990. arch_counter_register(arch_timers_present);
  991. return arch_timer_arch_init();
  992. }
  993. /**
  994. * arch_timer_select_ppi() - Select suitable PPI for the current system.
  995. *
  996. * If HYP mode is available, we know that the physical timer
  997. * has been configured to be accessible from PL1. Use it, so
  998. * that a guest can use the virtual timer instead.
  999. *
  1000. * On ARMv8.1 with VH extensions, the kernel runs in HYP. VHE
  1001. * accesses to CNTP_*_EL1 registers are silently redirected to
  1002. * their CNTHP_*_EL2 counterparts, and use a different PPI
  1003. * number.
  1004. *
  1005. * If no interrupt provided for virtual timer, we'll have to
  1006. * stick to the physical timer. It'd better be accessible...
  1007. * For arm64 we never use the secure interrupt.
  1008. *
  1009. * Return: a suitable PPI type for the current system.
  1010. */
  1011. static enum arch_timer_ppi_nr __init arch_timer_select_ppi(void)
  1012. {
  1013. if (is_kernel_in_hyp_mode())
  1014. return ARCH_TIMER_HYP_PPI;
  1015. if (!is_hyp_mode_available() && arch_timer_ppi[ARCH_TIMER_VIRT_PPI])
  1016. return ARCH_TIMER_VIRT_PPI;
  1017. if (IS_ENABLED(CONFIG_ARM64))
  1018. return ARCH_TIMER_PHYS_NONSECURE_PPI;
  1019. return ARCH_TIMER_PHYS_SECURE_PPI;
  1020. }
  1021. static int __init arch_timer_of_init(struct device_node *np)
  1022. {
  1023. int i, ret;
  1024. u32 rate;
  1025. if (arch_timers_present & ARCH_TIMER_TYPE_CP15) {
  1026. pr_warn("multiple nodes in dt, skipping\n");
  1027. return 0;
  1028. }
  1029. arch_timers_present |= ARCH_TIMER_TYPE_CP15;
  1030. for (i = ARCH_TIMER_PHYS_SECURE_PPI; i < ARCH_TIMER_MAX_TIMER_PPI; i++)
  1031. arch_timer_ppi[i] = irq_of_parse_and_map(np, i);
  1032. arch_timer_kvm_info.virtual_irq = arch_timer_ppi[ARCH_TIMER_VIRT_PPI];
  1033. rate = arch_timer_get_cntfrq();
  1034. arch_timer_of_configure_rate(rate, np);
  1035. arch_timer_c3stop = !of_property_read_bool(np, "always-on");
  1036. /* Check for globally applicable workarounds */
  1037. arch_timer_check_ool_workaround(ate_match_dt, np);
  1038. /*
  1039. * If we cannot rely on firmware initializing the timer registers then
  1040. * we should use the physical timers instead.
  1041. */
  1042. if (IS_ENABLED(CONFIG_ARM) &&
  1043. of_property_read_bool(np, "arm,cpu-registers-not-fw-configured"))
  1044. arch_timer_uses_ppi = ARCH_TIMER_PHYS_SECURE_PPI;
  1045. else
  1046. arch_timer_uses_ppi = arch_timer_select_ppi();
  1047. if (!arch_timer_ppi[arch_timer_uses_ppi]) {
  1048. pr_err("No interrupt available, giving up\n");
  1049. return -EINVAL;
  1050. }
  1051. /* On some systems, the counter stops ticking when in suspend. */
  1052. arch_counter_suspend_stop = of_property_read_bool(np,
  1053. "arm,no-tick-in-suspend");
  1054. ret = arch_timer_register();
  1055. if (ret)
  1056. return ret;
  1057. if (arch_timer_needs_of_probing())
  1058. return 0;
  1059. return arch_timer_common_init();
  1060. }
  1061. TIMER_OF_DECLARE(armv7_arch_timer, "arm,armv7-timer", arch_timer_of_init);
  1062. TIMER_OF_DECLARE(armv8_arch_timer, "arm,armv8-timer", arch_timer_of_init);
  1063. static u32 __init
  1064. arch_timer_mem_frame_get_cntfrq(struct arch_timer_mem_frame *frame)
  1065. {
  1066. void __iomem *base;
  1067. u32 rate;
  1068. base = ioremap(frame->cntbase, frame->size);
  1069. if (!base) {
  1070. pr_err("Unable to map frame @ %pa\n", &frame->cntbase);
  1071. return 0;
  1072. }
  1073. rate = readl_relaxed(base + CNTFRQ);
  1074. iounmap(base);
  1075. return rate;
  1076. }
  1077. static struct arch_timer_mem_frame * __init
  1078. arch_timer_mem_find_best_frame(struct arch_timer_mem *timer_mem)
  1079. {
  1080. struct arch_timer_mem_frame *frame, *best_frame = NULL;
  1081. void __iomem *cntctlbase;
  1082. u32 cnttidr;
  1083. int i;
  1084. cntctlbase = ioremap(timer_mem->cntctlbase, timer_mem->size);
  1085. if (!cntctlbase) {
  1086. pr_err("Can't map CNTCTLBase @ %pa\n",
  1087. &timer_mem->cntctlbase);
  1088. return NULL;
  1089. }
  1090. cnttidr = readl_relaxed(cntctlbase + CNTTIDR);
  1091. /*
  1092. * Try to find a virtual capable frame. Otherwise fall back to a
  1093. * physical capable frame.
  1094. */
  1095. for (i = 0; i < ARCH_TIMER_MEM_MAX_FRAMES; i++) {
  1096. u32 cntacr = CNTACR_RFRQ | CNTACR_RWPT | CNTACR_RPCT |
  1097. CNTACR_RWVT | CNTACR_RVOFF | CNTACR_RVCT;
  1098. frame = &timer_mem->frame[i];
  1099. if (!frame->valid)
  1100. continue;
  1101. /* Try enabling everything, and see what sticks */
  1102. writel_relaxed(cntacr, cntctlbase + CNTACR(i));
  1103. cntacr = readl_relaxed(cntctlbase + CNTACR(i));
  1104. if ((cnttidr & CNTTIDR_VIRT(i)) &&
  1105. !(~cntacr & (CNTACR_RWVT | CNTACR_RVCT))) {
  1106. best_frame = frame;
  1107. arch_timer_mem_use_virtual = true;
  1108. break;
  1109. }
  1110. if (~cntacr & (CNTACR_RWPT | CNTACR_RPCT))
  1111. continue;
  1112. best_frame = frame;
  1113. }
  1114. iounmap(cntctlbase);
  1115. return best_frame;
  1116. }
  1117. static int __init
  1118. arch_timer_mem_frame_register(struct arch_timer_mem_frame *frame)
  1119. {
  1120. void __iomem *base;
  1121. int ret, irq = 0;
  1122. if (arch_timer_mem_use_virtual)
  1123. irq = frame->virt_irq;
  1124. else
  1125. irq = frame->phys_irq;
  1126. if (!irq) {
  1127. pr_err("Frame missing %s irq.\n",
  1128. arch_timer_mem_use_virtual ? "virt" : "phys");
  1129. return -EINVAL;
  1130. }
  1131. if (!request_mem_region(frame->cntbase, frame->size,
  1132. "arch_mem_timer"))
  1133. return -EBUSY;
  1134. base = ioremap(frame->cntbase, frame->size);
  1135. if (!base) {
  1136. pr_err("Can't map frame's registers\n");
  1137. return -ENXIO;
  1138. }
  1139. ret = arch_timer_mem_register(base, irq);
  1140. if (ret) {
  1141. iounmap(base);
  1142. return ret;
  1143. }
  1144. arch_counter_base = base;
  1145. arch_timers_present |= ARCH_TIMER_TYPE_MEM;
  1146. return 0;
  1147. }
  1148. static int __init arch_timer_mem_of_init(struct device_node *np)
  1149. {
  1150. struct arch_timer_mem *timer_mem;
  1151. struct arch_timer_mem_frame *frame;
  1152. struct device_node *frame_node;
  1153. struct resource res;
  1154. int ret = -EINVAL;
  1155. u32 rate;
  1156. timer_mem = kzalloc(sizeof(*timer_mem), GFP_KERNEL);
  1157. if (!timer_mem)
  1158. return -ENOMEM;
  1159. if (of_address_to_resource(np, 0, &res))
  1160. goto out;
  1161. timer_mem->cntctlbase = res.start;
  1162. timer_mem->size = resource_size(&res);
  1163. for_each_available_child_of_node(np, frame_node) {
  1164. u32 n;
  1165. struct arch_timer_mem_frame *frame;
  1166. if (of_property_read_u32(frame_node, "frame-number", &n)) {
  1167. pr_err(FW_BUG "Missing frame-number.\n");
  1168. of_node_put(frame_node);
  1169. goto out;
  1170. }
  1171. if (n >= ARCH_TIMER_MEM_MAX_FRAMES) {
  1172. pr_err(FW_BUG "Wrong frame-number, only 0-%u are permitted.\n",
  1173. ARCH_TIMER_MEM_MAX_FRAMES - 1);
  1174. of_node_put(frame_node);
  1175. goto out;
  1176. }
  1177. frame = &timer_mem->frame[n];
  1178. if (frame->valid) {
  1179. pr_err(FW_BUG "Duplicated frame-number.\n");
  1180. of_node_put(frame_node);
  1181. goto out;
  1182. }
  1183. if (of_address_to_resource(frame_node, 0, &res)) {
  1184. of_node_put(frame_node);
  1185. goto out;
  1186. }
  1187. frame->cntbase = res.start;
  1188. frame->size = resource_size(&res);
  1189. frame->virt_irq = irq_of_parse_and_map(frame_node,
  1190. ARCH_TIMER_VIRT_SPI);
  1191. frame->phys_irq = irq_of_parse_and_map(frame_node,
  1192. ARCH_TIMER_PHYS_SPI);
  1193. frame->valid = true;
  1194. }
  1195. frame = arch_timer_mem_find_best_frame(timer_mem);
  1196. if (!frame) {
  1197. pr_err("Unable to find a suitable frame in timer @ %pa\n",
  1198. &timer_mem->cntctlbase);
  1199. ret = -EINVAL;
  1200. goto out;
  1201. }
  1202. rate = arch_timer_mem_frame_get_cntfrq(frame);
  1203. arch_timer_of_configure_rate(rate, np);
  1204. ret = arch_timer_mem_frame_register(frame);
  1205. if (!ret && !arch_timer_needs_of_probing())
  1206. ret = arch_timer_common_init();
  1207. out:
  1208. kfree(timer_mem);
  1209. return ret;
  1210. }
  1211. TIMER_OF_DECLARE(armv7_arch_timer_mem, "arm,armv7-timer-mem",
  1212. arch_timer_mem_of_init);
  1213. #ifdef CONFIG_ACPI_GTDT
  1214. static int __init
  1215. arch_timer_mem_verify_cntfrq(struct arch_timer_mem *timer_mem)
  1216. {
  1217. struct arch_timer_mem_frame *frame;
  1218. u32 rate;
  1219. int i;
  1220. for (i = 0; i < ARCH_TIMER_MEM_MAX_FRAMES; i++) {
  1221. frame = &timer_mem->frame[i];
  1222. if (!frame->valid)
  1223. continue;
  1224. rate = arch_timer_mem_frame_get_cntfrq(frame);
  1225. if (rate == arch_timer_rate)
  1226. continue;
  1227. pr_err(FW_BUG "CNTFRQ mismatch: frame @ %pa: (0x%08lx), CPU: (0x%08lx)\n",
  1228. &frame->cntbase,
  1229. (unsigned long)rate, (unsigned long)arch_timer_rate);
  1230. return -EINVAL;
  1231. }
  1232. return 0;
  1233. }
  1234. static int __init arch_timer_mem_acpi_init(int platform_timer_count)
  1235. {
  1236. struct arch_timer_mem *timers, *timer;
  1237. struct arch_timer_mem_frame *frame, *best_frame = NULL;
  1238. int timer_count, i, ret = 0;
  1239. timers = kcalloc(platform_timer_count, sizeof(*timers),
  1240. GFP_KERNEL);
  1241. if (!timers)
  1242. return -ENOMEM;
  1243. ret = acpi_arch_timer_mem_init(timers, &timer_count);
  1244. if (ret || !timer_count)
  1245. goto out;
  1246. /*
  1247. * While unlikely, it's theoretically possible that none of the frames
  1248. * in a timer expose the combination of feature we want.
  1249. */
  1250. for (i = 0; i < timer_count; i++) {
  1251. timer = &timers[i];
  1252. frame = arch_timer_mem_find_best_frame(timer);
  1253. if (!best_frame)
  1254. best_frame = frame;
  1255. ret = arch_timer_mem_verify_cntfrq(timer);
  1256. if (ret) {
  1257. pr_err("Disabling MMIO timers due to CNTFRQ mismatch\n");
  1258. goto out;
  1259. }
  1260. if (!best_frame) /* implies !frame */
  1261. /*
  1262. * Only complain about missing suitable frames if we
  1263. * haven't already found one in a previous iteration.
  1264. */
  1265. pr_err("Unable to find a suitable frame in timer @ %pa\n",
  1266. &timer->cntctlbase);
  1267. }
  1268. if (best_frame)
  1269. ret = arch_timer_mem_frame_register(best_frame);
  1270. out:
  1271. kfree(timers);
  1272. return ret;
  1273. }
  1274. /* Initialize per-processor generic timer and memory-mapped timer(if present) */
  1275. static int __init arch_timer_acpi_init(struct acpi_table_header *table)
  1276. {
  1277. int ret, platform_timer_count;
  1278. if (arch_timers_present & ARCH_TIMER_TYPE_CP15) {
  1279. pr_warn("already initialized, skipping\n");
  1280. return -EINVAL;
  1281. }
  1282. arch_timers_present |= ARCH_TIMER_TYPE_CP15;
  1283. ret = acpi_gtdt_init(table, &platform_timer_count);
  1284. if (ret) {
  1285. pr_err("Failed to init GTDT table.\n");
  1286. return ret;
  1287. }
  1288. arch_timer_ppi[ARCH_TIMER_PHYS_NONSECURE_PPI] =
  1289. acpi_gtdt_map_ppi(ARCH_TIMER_PHYS_NONSECURE_PPI);
  1290. arch_timer_ppi[ARCH_TIMER_VIRT_PPI] =
  1291. acpi_gtdt_map_ppi(ARCH_TIMER_VIRT_PPI);
  1292. arch_timer_ppi[ARCH_TIMER_HYP_PPI] =
  1293. acpi_gtdt_map_ppi(ARCH_TIMER_HYP_PPI);
  1294. arch_timer_kvm_info.virtual_irq = arch_timer_ppi[ARCH_TIMER_VIRT_PPI];
  1295. /*
  1296. * When probing via ACPI, we have no mechanism to override the sysreg
  1297. * CNTFRQ value. This *must* be correct.
  1298. */
  1299. arch_timer_rate = arch_timer_get_cntfrq();
  1300. if (!arch_timer_rate) {
  1301. pr_err(FW_BUG "frequency not available.\n");
  1302. return -EINVAL;
  1303. }
  1304. arch_timer_uses_ppi = arch_timer_select_ppi();
  1305. if (!arch_timer_ppi[arch_timer_uses_ppi]) {
  1306. pr_err("No interrupt available, giving up\n");
  1307. return -EINVAL;
  1308. }
  1309. /* Always-on capability */
  1310. arch_timer_c3stop = acpi_gtdt_c3stop(arch_timer_uses_ppi);
  1311. /* Check for globally applicable workarounds */
  1312. arch_timer_check_ool_workaround(ate_match_acpi_oem_info, table);
  1313. ret = arch_timer_register();
  1314. if (ret)
  1315. return ret;
  1316. if (platform_timer_count &&
  1317. arch_timer_mem_acpi_init(platform_timer_count))
  1318. pr_err("Failed to initialize memory-mapped timer.\n");
  1319. return arch_timer_common_init();
  1320. }
  1321. TIMER_ACPI_DECLARE(arch_timer, ACPI_SIG_GTDT, arch_timer_acpi_init);
  1322. #endif