arm_arch_timer.c 42 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628
  1. /*
  2. * linux/drivers/clocksource/arm_arch_timer.c
  3. *
  4. * Copyright (C) 2011 ARM Ltd.
  5. * All Rights Reserved
  6. *
  7. * This program is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU General Public License version 2 as
  9. * published by the Free Software Foundation.
  10. */
  11. #define pr_fmt(fmt) "arm_arch_timer: " fmt
  12. #include <linux/init.h>
  13. #include <linux/kernel.h>
  14. #include <linux/device.h>
  15. #include <linux/smp.h>
  16. #include <linux/cpu.h>
  17. #include <linux/cpu_pm.h>
  18. #include <linux/clockchips.h>
  19. #include <linux/clocksource.h>
  20. #include <linux/interrupt.h>
  21. #include <linux/of_irq.h>
  22. #include <linux/of_address.h>
  23. #include <linux/io.h>
  24. #include <linux/slab.h>
  25. #include <linux/sched/clock.h>
  26. #include <linux/sched_clock.h>
  27. #include <linux/acpi.h>
  28. #include <asm/arch_timer.h>
  29. #include <asm/virt.h>
  30. #include <clocksource/arm_arch_timer.h>
  31. #undef pr_fmt
  32. #define pr_fmt(fmt) "arch_timer: " fmt
  33. #define CNTTIDR 0x08
  34. #define CNTTIDR_VIRT(n) (BIT(1) << ((n) * 4))
  35. #define CNTACR(n) (0x40 + ((n) * 4))
  36. #define CNTACR_RPCT BIT(0)
  37. #define CNTACR_RVCT BIT(1)
  38. #define CNTACR_RFRQ BIT(2)
  39. #define CNTACR_RVOFF BIT(3)
  40. #define CNTACR_RWVT BIT(4)
  41. #define CNTACR_RWPT BIT(5)
  42. #define CNTVCT_LO 0x08
  43. #define CNTVCT_HI 0x0c
  44. #define CNTFRQ 0x10
  45. #define CNTP_TVAL 0x28
  46. #define CNTP_CTL 0x2c
  47. #define CNTV_TVAL 0x38
  48. #define CNTV_CTL 0x3c
  49. static unsigned arch_timers_present __initdata;
  50. static void __iomem *arch_counter_base;
  51. struct arch_timer {
  52. void __iomem *base;
  53. struct clock_event_device evt;
  54. };
  55. #define to_arch_timer(e) container_of(e, struct arch_timer, evt)
  56. static u32 arch_timer_rate;
  57. static int arch_timer_ppi[ARCH_TIMER_MAX_TIMER_PPI];
  58. static struct clock_event_device __percpu *arch_timer_evt;
  59. static enum arch_timer_ppi_nr arch_timer_uses_ppi = ARCH_TIMER_VIRT_PPI;
  60. static bool arch_timer_c3stop;
  61. static bool arch_timer_mem_use_virtual;
  62. static bool arch_counter_suspend_stop;
  63. static bool vdso_default = true;
  64. static cpumask_t evtstrm_available = CPU_MASK_NONE;
  65. static bool evtstrm_enable = IS_ENABLED(CONFIG_ARM_ARCH_TIMER_EVTSTREAM);
  66. static int __init early_evtstrm_cfg(char *buf)
  67. {
  68. return strtobool(buf, &evtstrm_enable);
  69. }
  70. early_param("clocksource.arm_arch_timer.evtstrm", early_evtstrm_cfg);
  71. /*
  72. * Architected system timer support.
  73. */
  74. static __always_inline
  75. void arch_timer_reg_write(int access, enum arch_timer_reg reg, u32 val,
  76. struct clock_event_device *clk)
  77. {
  78. if (access == ARCH_TIMER_MEM_PHYS_ACCESS) {
  79. struct arch_timer *timer = to_arch_timer(clk);
  80. switch (reg) {
  81. case ARCH_TIMER_REG_CTRL:
  82. writel_relaxed(val, timer->base + CNTP_CTL);
  83. break;
  84. case ARCH_TIMER_REG_TVAL:
  85. writel_relaxed(val, timer->base + CNTP_TVAL);
  86. break;
  87. }
  88. } else if (access == ARCH_TIMER_MEM_VIRT_ACCESS) {
  89. struct arch_timer *timer = to_arch_timer(clk);
  90. switch (reg) {
  91. case ARCH_TIMER_REG_CTRL:
  92. writel_relaxed(val, timer->base + CNTV_CTL);
  93. break;
  94. case ARCH_TIMER_REG_TVAL:
  95. writel_relaxed(val, timer->base + CNTV_TVAL);
  96. break;
  97. }
  98. } else {
  99. arch_timer_reg_write_cp15(access, reg, val);
  100. }
  101. }
  102. static __always_inline
  103. u32 arch_timer_reg_read(int access, enum arch_timer_reg reg,
  104. struct clock_event_device *clk)
  105. {
  106. u32 val;
  107. if (access == ARCH_TIMER_MEM_PHYS_ACCESS) {
  108. struct arch_timer *timer = to_arch_timer(clk);
  109. switch (reg) {
  110. case ARCH_TIMER_REG_CTRL:
  111. val = readl_relaxed(timer->base + CNTP_CTL);
  112. break;
  113. case ARCH_TIMER_REG_TVAL:
  114. val = readl_relaxed(timer->base + CNTP_TVAL);
  115. break;
  116. }
  117. } else if (access == ARCH_TIMER_MEM_VIRT_ACCESS) {
  118. struct arch_timer *timer = to_arch_timer(clk);
  119. switch (reg) {
  120. case ARCH_TIMER_REG_CTRL:
  121. val = readl_relaxed(timer->base + CNTV_CTL);
  122. break;
  123. case ARCH_TIMER_REG_TVAL:
  124. val = readl_relaxed(timer->base + CNTV_TVAL);
  125. break;
  126. }
  127. } else {
  128. val = arch_timer_reg_read_cp15(access, reg);
  129. }
  130. return val;
  131. }
  132. /*
  133. * Default to cp15 based access because arm64 uses this function for
  134. * sched_clock() before DT is probed and the cp15 method is guaranteed
  135. * to exist on arm64. arm doesn't use this before DT is probed so even
  136. * if we don't have the cp15 accessors we won't have a problem.
  137. */
  138. u64 (*arch_timer_read_counter)(void) = arch_counter_get_cntvct;
  139. EXPORT_SYMBOL_GPL(arch_timer_read_counter);
  140. static u64 arch_counter_read(struct clocksource *cs)
  141. {
  142. return arch_timer_read_counter();
  143. }
  144. static u64 arch_counter_read_cc(const struct cyclecounter *cc)
  145. {
  146. return arch_timer_read_counter();
  147. }
  148. static struct clocksource clocksource_counter = {
  149. .name = "arch_sys_counter",
  150. .rating = 400,
  151. .read = arch_counter_read,
  152. .mask = CLOCKSOURCE_MASK(56),
  153. .flags = CLOCK_SOURCE_IS_CONTINUOUS,
  154. };
  155. static struct cyclecounter cyclecounter __ro_after_init = {
  156. .read = arch_counter_read_cc,
  157. .mask = CLOCKSOURCE_MASK(56),
  158. };
  159. struct ate_acpi_oem_info {
  160. char oem_id[ACPI_OEM_ID_SIZE + 1];
  161. char oem_table_id[ACPI_OEM_TABLE_ID_SIZE + 1];
  162. u32 oem_revision;
  163. };
  164. #ifdef CONFIG_FSL_ERRATUM_A008585
  165. /*
  166. * The number of retries is an arbitrary value well beyond the highest number
  167. * of iterations the loop has been observed to take.
  168. */
  169. #define __fsl_a008585_read_reg(reg) ({ \
  170. u64 _old, _new; \
  171. int _retries = 200; \
  172. \
  173. do { \
  174. _old = read_sysreg(reg); \
  175. _new = read_sysreg(reg); \
  176. _retries--; \
  177. } while (unlikely(_old != _new) && _retries); \
  178. \
  179. WARN_ON_ONCE(!_retries); \
  180. _new; \
  181. })
  182. static u32 notrace fsl_a008585_read_cntp_tval_el0(void)
  183. {
  184. return __fsl_a008585_read_reg(cntp_tval_el0);
  185. }
  186. static u32 notrace fsl_a008585_read_cntv_tval_el0(void)
  187. {
  188. return __fsl_a008585_read_reg(cntv_tval_el0);
  189. }
  190. static u64 notrace fsl_a008585_read_cntpct_el0(void)
  191. {
  192. return __fsl_a008585_read_reg(cntpct_el0);
  193. }
  194. static u64 notrace fsl_a008585_read_cntvct_el0(void)
  195. {
  196. return __fsl_a008585_read_reg(cntvct_el0);
  197. }
  198. #endif
  199. #ifdef CONFIG_HISILICON_ERRATUM_161010101
  200. /*
  201. * Verify whether the value of the second read is larger than the first by
  202. * less than 32 is the only way to confirm the value is correct, so clear the
  203. * lower 5 bits to check whether the difference is greater than 32 or not.
  204. * Theoretically the erratum should not occur more than twice in succession
  205. * when reading the system counter, but it is possible that some interrupts
  206. * may lead to more than twice read errors, triggering the warning, so setting
  207. * the number of retries far beyond the number of iterations the loop has been
  208. * observed to take.
  209. */
  210. #define __hisi_161010101_read_reg(reg) ({ \
  211. u64 _old, _new; \
  212. int _retries = 50; \
  213. \
  214. do { \
  215. _old = read_sysreg(reg); \
  216. _new = read_sysreg(reg); \
  217. _retries--; \
  218. } while (unlikely((_new - _old) >> 5) && _retries); \
  219. \
  220. WARN_ON_ONCE(!_retries); \
  221. _new; \
  222. })
  223. static u32 notrace hisi_161010101_read_cntp_tval_el0(void)
  224. {
  225. return __hisi_161010101_read_reg(cntp_tval_el0);
  226. }
  227. static u32 notrace hisi_161010101_read_cntv_tval_el0(void)
  228. {
  229. return __hisi_161010101_read_reg(cntv_tval_el0);
  230. }
  231. static u64 notrace hisi_161010101_read_cntpct_el0(void)
  232. {
  233. return __hisi_161010101_read_reg(cntpct_el0);
  234. }
  235. static u64 notrace hisi_161010101_read_cntvct_el0(void)
  236. {
  237. return __hisi_161010101_read_reg(cntvct_el0);
  238. }
  239. static struct ate_acpi_oem_info hisi_161010101_oem_info[] = {
  240. /*
  241. * Note that trailing spaces are required to properly match
  242. * the OEM table information.
  243. */
  244. {
  245. .oem_id = "HISI ",
  246. .oem_table_id = "HIP05 ",
  247. .oem_revision = 0,
  248. },
  249. {
  250. .oem_id = "HISI ",
  251. .oem_table_id = "HIP06 ",
  252. .oem_revision = 0,
  253. },
  254. {
  255. .oem_id = "HISI ",
  256. .oem_table_id = "HIP07 ",
  257. .oem_revision = 0,
  258. },
  259. { /* Sentinel indicating the end of the OEM array */ },
  260. };
  261. #endif
  262. #ifdef CONFIG_ARM64_ERRATUM_858921
  263. static u64 notrace arm64_858921_read_cntpct_el0(void)
  264. {
  265. u64 old, new;
  266. old = read_sysreg(cntpct_el0);
  267. new = read_sysreg(cntpct_el0);
  268. return (((old ^ new) >> 32) & 1) ? old : new;
  269. }
  270. static u64 notrace arm64_858921_read_cntvct_el0(void)
  271. {
  272. u64 old, new;
  273. old = read_sysreg(cntvct_el0);
  274. new = read_sysreg(cntvct_el0);
  275. return (((old ^ new) >> 32) & 1) ? old : new;
  276. }
  277. #endif
  278. #ifdef CONFIG_SUN50I_ERRATUM_UNKNOWN1
  279. /*
  280. * The low bits of the counter registers are indeterminate while bit 10 or
  281. * greater is rolling over. Since the counter value can jump both backward
  282. * (7ff -> 000 -> 800) and forward (7ff -> fff -> 800), ignore register values
  283. * with all ones or all zeros in the low bits. Bound the loop by the maximum
  284. * number of CPU cycles in 3 consecutive 24 MHz counter periods.
  285. */
  286. #define __sun50i_a64_read_reg(reg) ({ \
  287. u64 _val; \
  288. int _retries = 150; \
  289. \
  290. do { \
  291. _val = read_sysreg(reg); \
  292. _retries--; \
  293. } while (((_val + 1) & GENMASK(9, 0)) <= 1 && _retries); \
  294. \
  295. WARN_ON_ONCE(!_retries); \
  296. _val; \
  297. })
  298. static u64 notrace sun50i_a64_read_cntpct_el0(void)
  299. {
  300. return __sun50i_a64_read_reg(cntpct_el0);
  301. }
  302. static u64 notrace sun50i_a64_read_cntvct_el0(void)
  303. {
  304. return __sun50i_a64_read_reg(cntvct_el0);
  305. }
  306. static u32 notrace sun50i_a64_read_cntp_tval_el0(void)
  307. {
  308. return read_sysreg(cntp_cval_el0) - sun50i_a64_read_cntpct_el0();
  309. }
  310. static u32 notrace sun50i_a64_read_cntv_tval_el0(void)
  311. {
  312. return read_sysreg(cntv_cval_el0) - sun50i_a64_read_cntvct_el0();
  313. }
  314. #endif
  315. #ifdef CONFIG_ARM_ARCH_TIMER_OOL_WORKAROUND
  316. DEFINE_PER_CPU(const struct arch_timer_erratum_workaround *, timer_unstable_counter_workaround);
  317. EXPORT_SYMBOL_GPL(timer_unstable_counter_workaround);
  318. DEFINE_STATIC_KEY_FALSE(arch_timer_read_ool_enabled);
  319. EXPORT_SYMBOL_GPL(arch_timer_read_ool_enabled);
  320. static void erratum_set_next_event_tval_generic(const int access, unsigned long evt,
  321. struct clock_event_device *clk)
  322. {
  323. unsigned long ctrl;
  324. u64 cval;
  325. ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL, clk);
  326. ctrl |= ARCH_TIMER_CTRL_ENABLE;
  327. ctrl &= ~ARCH_TIMER_CTRL_IT_MASK;
  328. if (access == ARCH_TIMER_PHYS_ACCESS) {
  329. cval = evt + arch_counter_get_cntpct();
  330. write_sysreg(cval, cntp_cval_el0);
  331. } else {
  332. cval = evt + arch_counter_get_cntvct();
  333. write_sysreg(cval, cntv_cval_el0);
  334. }
  335. arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl, clk);
  336. }
  337. static __maybe_unused int erratum_set_next_event_tval_virt(unsigned long evt,
  338. struct clock_event_device *clk)
  339. {
  340. erratum_set_next_event_tval_generic(ARCH_TIMER_VIRT_ACCESS, evt, clk);
  341. return 0;
  342. }
  343. static __maybe_unused int erratum_set_next_event_tval_phys(unsigned long evt,
  344. struct clock_event_device *clk)
  345. {
  346. erratum_set_next_event_tval_generic(ARCH_TIMER_PHYS_ACCESS, evt, clk);
  347. return 0;
  348. }
  349. static const struct arch_timer_erratum_workaround ool_workarounds[] = {
  350. #ifdef CONFIG_FSL_ERRATUM_A008585
  351. {
  352. .match_type = ate_match_dt,
  353. .id = "fsl,erratum-a008585",
  354. .desc = "Freescale erratum a005858",
  355. .read_cntp_tval_el0 = fsl_a008585_read_cntp_tval_el0,
  356. .read_cntv_tval_el0 = fsl_a008585_read_cntv_tval_el0,
  357. .read_cntpct_el0 = fsl_a008585_read_cntpct_el0,
  358. .read_cntvct_el0 = fsl_a008585_read_cntvct_el0,
  359. .set_next_event_phys = erratum_set_next_event_tval_phys,
  360. .set_next_event_virt = erratum_set_next_event_tval_virt,
  361. },
  362. #endif
  363. #ifdef CONFIG_HISILICON_ERRATUM_161010101
  364. {
  365. .match_type = ate_match_dt,
  366. .id = "hisilicon,erratum-161010101",
  367. .desc = "HiSilicon erratum 161010101",
  368. .read_cntp_tval_el0 = hisi_161010101_read_cntp_tval_el0,
  369. .read_cntv_tval_el0 = hisi_161010101_read_cntv_tval_el0,
  370. .read_cntpct_el0 = hisi_161010101_read_cntpct_el0,
  371. .read_cntvct_el0 = hisi_161010101_read_cntvct_el0,
  372. .set_next_event_phys = erratum_set_next_event_tval_phys,
  373. .set_next_event_virt = erratum_set_next_event_tval_virt,
  374. },
  375. {
  376. .match_type = ate_match_acpi_oem_info,
  377. .id = hisi_161010101_oem_info,
  378. .desc = "HiSilicon erratum 161010101",
  379. .read_cntp_tval_el0 = hisi_161010101_read_cntp_tval_el0,
  380. .read_cntv_tval_el0 = hisi_161010101_read_cntv_tval_el0,
  381. .read_cntpct_el0 = hisi_161010101_read_cntpct_el0,
  382. .read_cntvct_el0 = hisi_161010101_read_cntvct_el0,
  383. .set_next_event_phys = erratum_set_next_event_tval_phys,
  384. .set_next_event_virt = erratum_set_next_event_tval_virt,
  385. },
  386. #endif
  387. #ifdef CONFIG_ARM64_ERRATUM_858921
  388. {
  389. .match_type = ate_match_local_cap_id,
  390. .id = (void *)ARM64_WORKAROUND_858921,
  391. .desc = "ARM erratum 858921",
  392. .read_cntpct_el0 = arm64_858921_read_cntpct_el0,
  393. .read_cntvct_el0 = arm64_858921_read_cntvct_el0,
  394. },
  395. #endif
  396. #ifdef CONFIG_SUN50I_ERRATUM_UNKNOWN1
  397. {
  398. .match_type = ate_match_dt,
  399. .id = "allwinner,erratum-unknown1",
  400. .desc = "Allwinner erratum UNKNOWN1",
  401. .read_cntp_tval_el0 = sun50i_a64_read_cntp_tval_el0,
  402. .read_cntv_tval_el0 = sun50i_a64_read_cntv_tval_el0,
  403. .read_cntpct_el0 = sun50i_a64_read_cntpct_el0,
  404. .read_cntvct_el0 = sun50i_a64_read_cntvct_el0,
  405. .set_next_event_phys = erratum_set_next_event_tval_phys,
  406. .set_next_event_virt = erratum_set_next_event_tval_virt,
  407. },
  408. #endif
  409. };
  410. typedef bool (*ate_match_fn_t)(const struct arch_timer_erratum_workaround *,
  411. const void *);
  412. static
  413. bool arch_timer_check_dt_erratum(const struct arch_timer_erratum_workaround *wa,
  414. const void *arg)
  415. {
  416. const struct device_node *np = arg;
  417. return of_property_read_bool(np, wa->id);
  418. }
  419. static
  420. bool arch_timer_check_local_cap_erratum(const struct arch_timer_erratum_workaround *wa,
  421. const void *arg)
  422. {
  423. return this_cpu_has_cap((uintptr_t)wa->id);
  424. }
  425. static
  426. bool arch_timer_check_acpi_oem_erratum(const struct arch_timer_erratum_workaround *wa,
  427. const void *arg)
  428. {
  429. static const struct ate_acpi_oem_info empty_oem_info = {};
  430. const struct ate_acpi_oem_info *info = wa->id;
  431. const struct acpi_table_header *table = arg;
  432. /* Iterate over the ACPI OEM info array, looking for a match */
  433. while (memcmp(info, &empty_oem_info, sizeof(*info))) {
  434. if (!memcmp(info->oem_id, table->oem_id, ACPI_OEM_ID_SIZE) &&
  435. !memcmp(info->oem_table_id, table->oem_table_id, ACPI_OEM_TABLE_ID_SIZE) &&
  436. info->oem_revision == table->oem_revision)
  437. return true;
  438. info++;
  439. }
  440. return false;
  441. }
  442. static const struct arch_timer_erratum_workaround *
  443. arch_timer_iterate_errata(enum arch_timer_erratum_match_type type,
  444. ate_match_fn_t match_fn,
  445. void *arg)
  446. {
  447. int i;
  448. for (i = 0; i < ARRAY_SIZE(ool_workarounds); i++) {
  449. if (ool_workarounds[i].match_type != type)
  450. continue;
  451. if (match_fn(&ool_workarounds[i], arg))
  452. return &ool_workarounds[i];
  453. }
  454. return NULL;
  455. }
  456. static
  457. void arch_timer_enable_workaround(const struct arch_timer_erratum_workaround *wa,
  458. bool local)
  459. {
  460. int i;
  461. if (local) {
  462. __this_cpu_write(timer_unstable_counter_workaround, wa);
  463. } else {
  464. for_each_possible_cpu(i)
  465. per_cpu(timer_unstable_counter_workaround, i) = wa;
  466. }
  467. /*
  468. * Use the locked version, as we're called from the CPU
  469. * hotplug framework. Otherwise, we end-up in deadlock-land.
  470. */
  471. static_branch_enable_cpuslocked(&arch_timer_read_ool_enabled);
  472. /*
  473. * Don't use the vdso fastpath if errata require using the
  474. * out-of-line counter accessor. We may change our mind pretty
  475. * late in the game (with a per-CPU erratum, for example), so
  476. * change both the default value and the vdso itself.
  477. */
  478. if (wa->read_cntvct_el0) {
  479. clocksource_counter.archdata.vdso_direct = false;
  480. vdso_default = false;
  481. }
  482. }
  483. static void arch_timer_check_ool_workaround(enum arch_timer_erratum_match_type type,
  484. void *arg)
  485. {
  486. const struct arch_timer_erratum_workaround *wa;
  487. ate_match_fn_t match_fn = NULL;
  488. bool local = false;
  489. switch (type) {
  490. case ate_match_dt:
  491. match_fn = arch_timer_check_dt_erratum;
  492. break;
  493. case ate_match_local_cap_id:
  494. match_fn = arch_timer_check_local_cap_erratum;
  495. local = true;
  496. break;
  497. case ate_match_acpi_oem_info:
  498. match_fn = arch_timer_check_acpi_oem_erratum;
  499. break;
  500. default:
  501. WARN_ON(1);
  502. return;
  503. }
  504. wa = arch_timer_iterate_errata(type, match_fn, arg);
  505. if (!wa)
  506. return;
  507. if (needs_unstable_timer_counter_workaround()) {
  508. const struct arch_timer_erratum_workaround *__wa;
  509. __wa = __this_cpu_read(timer_unstable_counter_workaround);
  510. if (__wa && wa != __wa)
  511. pr_warn("Can't enable workaround for %s (clashes with %s\n)",
  512. wa->desc, __wa->desc);
  513. if (__wa)
  514. return;
  515. }
  516. arch_timer_enable_workaround(wa, local);
  517. pr_info("Enabling %s workaround for %s\n",
  518. local ? "local" : "global", wa->desc);
  519. }
  520. #define erratum_handler(fn, r, ...) \
  521. ({ \
  522. bool __val; \
  523. if (needs_unstable_timer_counter_workaround()) { \
  524. const struct arch_timer_erratum_workaround *__wa; \
  525. __wa = __this_cpu_read(timer_unstable_counter_workaround); \
  526. if (__wa && __wa->fn) { \
  527. r = __wa->fn(__VA_ARGS__); \
  528. __val = true; \
  529. } else { \
  530. __val = false; \
  531. } \
  532. } else { \
  533. __val = false; \
  534. } \
  535. __val; \
  536. })
  537. static bool arch_timer_this_cpu_has_cntvct_wa(void)
  538. {
  539. const struct arch_timer_erratum_workaround *wa;
  540. wa = __this_cpu_read(timer_unstable_counter_workaround);
  541. return wa && wa->read_cntvct_el0;
  542. }
  543. #else
  544. #define arch_timer_check_ool_workaround(t,a) do { } while(0)
  545. #define erratum_set_next_event_tval_virt(...) ({BUG(); 0;})
  546. #define erratum_set_next_event_tval_phys(...) ({BUG(); 0;})
  547. #define erratum_handler(fn, r, ...) ({false;})
  548. #define arch_timer_this_cpu_has_cntvct_wa() ({false;})
  549. #endif /* CONFIG_ARM_ARCH_TIMER_OOL_WORKAROUND */
  550. static __always_inline irqreturn_t timer_handler(const int access,
  551. struct clock_event_device *evt)
  552. {
  553. unsigned long ctrl;
  554. ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL, evt);
  555. if (ctrl & ARCH_TIMER_CTRL_IT_STAT) {
  556. ctrl |= ARCH_TIMER_CTRL_IT_MASK;
  557. arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl, evt);
  558. evt->event_handler(evt);
  559. return IRQ_HANDLED;
  560. }
  561. return IRQ_NONE;
  562. }
  563. static irqreturn_t arch_timer_handler_virt(int irq, void *dev_id)
  564. {
  565. struct clock_event_device *evt = dev_id;
  566. return timer_handler(ARCH_TIMER_VIRT_ACCESS, evt);
  567. }
  568. static irqreturn_t arch_timer_handler_phys(int irq, void *dev_id)
  569. {
  570. struct clock_event_device *evt = dev_id;
  571. return timer_handler(ARCH_TIMER_PHYS_ACCESS, evt);
  572. }
  573. static irqreturn_t arch_timer_handler_phys_mem(int irq, void *dev_id)
  574. {
  575. struct clock_event_device *evt = dev_id;
  576. return timer_handler(ARCH_TIMER_MEM_PHYS_ACCESS, evt);
  577. }
  578. static irqreturn_t arch_timer_handler_virt_mem(int irq, void *dev_id)
  579. {
  580. struct clock_event_device *evt = dev_id;
  581. return timer_handler(ARCH_TIMER_MEM_VIRT_ACCESS, evt);
  582. }
  583. static __always_inline int timer_shutdown(const int access,
  584. struct clock_event_device *clk)
  585. {
  586. unsigned long ctrl;
  587. ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL, clk);
  588. ctrl &= ~ARCH_TIMER_CTRL_ENABLE;
  589. arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl, clk);
  590. return 0;
  591. }
  592. static int arch_timer_shutdown_virt(struct clock_event_device *clk)
  593. {
  594. return timer_shutdown(ARCH_TIMER_VIRT_ACCESS, clk);
  595. }
  596. static int arch_timer_shutdown_phys(struct clock_event_device *clk)
  597. {
  598. return timer_shutdown(ARCH_TIMER_PHYS_ACCESS, clk);
  599. }
  600. static int arch_timer_shutdown_virt_mem(struct clock_event_device *clk)
  601. {
  602. return timer_shutdown(ARCH_TIMER_MEM_VIRT_ACCESS, clk);
  603. }
  604. static int arch_timer_shutdown_phys_mem(struct clock_event_device *clk)
  605. {
  606. return timer_shutdown(ARCH_TIMER_MEM_PHYS_ACCESS, clk);
  607. }
  608. static __always_inline void set_next_event(const int access, unsigned long evt,
  609. struct clock_event_device *clk)
  610. {
  611. unsigned long ctrl;
  612. ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL, clk);
  613. ctrl |= ARCH_TIMER_CTRL_ENABLE;
  614. ctrl &= ~ARCH_TIMER_CTRL_IT_MASK;
  615. arch_timer_reg_write(access, ARCH_TIMER_REG_TVAL, evt, clk);
  616. arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl, clk);
  617. }
  618. static int arch_timer_set_next_event_virt(unsigned long evt,
  619. struct clock_event_device *clk)
  620. {
  621. int ret;
  622. if (erratum_handler(set_next_event_virt, ret, evt, clk))
  623. return ret;
  624. set_next_event(ARCH_TIMER_VIRT_ACCESS, evt, clk);
  625. return 0;
  626. }
  627. static int arch_timer_set_next_event_phys(unsigned long evt,
  628. struct clock_event_device *clk)
  629. {
  630. int ret;
  631. if (erratum_handler(set_next_event_phys, ret, evt, clk))
  632. return ret;
  633. set_next_event(ARCH_TIMER_PHYS_ACCESS, evt, clk);
  634. return 0;
  635. }
  636. static int arch_timer_set_next_event_virt_mem(unsigned long evt,
  637. struct clock_event_device *clk)
  638. {
  639. set_next_event(ARCH_TIMER_MEM_VIRT_ACCESS, evt, clk);
  640. return 0;
  641. }
  642. static int arch_timer_set_next_event_phys_mem(unsigned long evt,
  643. struct clock_event_device *clk)
  644. {
  645. set_next_event(ARCH_TIMER_MEM_PHYS_ACCESS, evt, clk);
  646. return 0;
  647. }
  648. static void __arch_timer_setup(unsigned type,
  649. struct clock_event_device *clk)
  650. {
  651. clk->features = CLOCK_EVT_FEAT_ONESHOT;
  652. if (type == ARCH_TIMER_TYPE_CP15) {
  653. if (arch_timer_c3stop)
  654. clk->features |= CLOCK_EVT_FEAT_C3STOP;
  655. clk->name = "arch_sys_timer";
  656. clk->rating = 450;
  657. clk->cpumask = cpumask_of(smp_processor_id());
  658. clk->irq = arch_timer_ppi[arch_timer_uses_ppi];
  659. switch (arch_timer_uses_ppi) {
  660. case ARCH_TIMER_VIRT_PPI:
  661. clk->set_state_shutdown = arch_timer_shutdown_virt;
  662. clk->set_state_oneshot_stopped = arch_timer_shutdown_virt;
  663. clk->set_next_event = arch_timer_set_next_event_virt;
  664. break;
  665. case ARCH_TIMER_PHYS_SECURE_PPI:
  666. case ARCH_TIMER_PHYS_NONSECURE_PPI:
  667. case ARCH_TIMER_HYP_PPI:
  668. clk->set_state_shutdown = arch_timer_shutdown_phys;
  669. clk->set_state_oneshot_stopped = arch_timer_shutdown_phys;
  670. clk->set_next_event = arch_timer_set_next_event_phys;
  671. break;
  672. default:
  673. BUG();
  674. }
  675. arch_timer_check_ool_workaround(ate_match_local_cap_id, NULL);
  676. } else {
  677. clk->features |= CLOCK_EVT_FEAT_DYNIRQ;
  678. clk->name = "arch_mem_timer";
  679. clk->rating = 400;
  680. clk->cpumask = cpu_possible_mask;
  681. if (arch_timer_mem_use_virtual) {
  682. clk->set_state_shutdown = arch_timer_shutdown_virt_mem;
  683. clk->set_state_oneshot_stopped = arch_timer_shutdown_virt_mem;
  684. clk->set_next_event =
  685. arch_timer_set_next_event_virt_mem;
  686. } else {
  687. clk->set_state_shutdown = arch_timer_shutdown_phys_mem;
  688. clk->set_state_oneshot_stopped = arch_timer_shutdown_phys_mem;
  689. clk->set_next_event =
  690. arch_timer_set_next_event_phys_mem;
  691. }
  692. }
  693. clk->set_state_shutdown(clk);
  694. clockevents_config_and_register(clk, arch_timer_rate, 0xf, 0x7fffffff);
  695. }
  696. static void arch_timer_evtstrm_enable(int divider)
  697. {
  698. u32 cntkctl = arch_timer_get_cntkctl();
  699. cntkctl &= ~ARCH_TIMER_EVT_TRIGGER_MASK;
  700. /* Set the divider and enable virtual event stream */
  701. cntkctl |= (divider << ARCH_TIMER_EVT_TRIGGER_SHIFT)
  702. | ARCH_TIMER_VIRT_EVT_EN;
  703. arch_timer_set_cntkctl(cntkctl);
  704. elf_hwcap |= HWCAP_EVTSTRM;
  705. #ifdef CONFIG_COMPAT
  706. compat_elf_hwcap |= COMPAT_HWCAP_EVTSTRM;
  707. #endif
  708. cpumask_set_cpu(smp_processor_id(), &evtstrm_available);
  709. }
  710. static void arch_timer_configure_evtstream(void)
  711. {
  712. int evt_stream_div, pos;
  713. /* Find the closest power of two to the divisor */
  714. evt_stream_div = arch_timer_rate / ARCH_TIMER_EVT_STREAM_FREQ;
  715. pos = fls(evt_stream_div);
  716. if (pos > 1 && !(evt_stream_div & (1 << (pos - 2))))
  717. pos--;
  718. /* enable event stream */
  719. arch_timer_evtstrm_enable(min(pos, 15));
  720. }
  721. static void arch_counter_set_user_access(void)
  722. {
  723. u32 cntkctl = arch_timer_get_cntkctl();
  724. /* Disable user access to the timers and both counters */
  725. /* Also disable virtual event stream */
  726. cntkctl &= ~(ARCH_TIMER_USR_PT_ACCESS_EN
  727. | ARCH_TIMER_USR_VT_ACCESS_EN
  728. | ARCH_TIMER_USR_VCT_ACCESS_EN
  729. | ARCH_TIMER_VIRT_EVT_EN
  730. | ARCH_TIMER_USR_PCT_ACCESS_EN);
  731. /*
  732. * Enable user access to the virtual counter if it doesn't
  733. * need to be workaround. The vdso may have been already
  734. * disabled though.
  735. */
  736. if (arch_timer_this_cpu_has_cntvct_wa())
  737. pr_info("CPU%d: Trapping CNTVCT access\n", smp_processor_id());
  738. else
  739. cntkctl |= ARCH_TIMER_USR_VCT_ACCESS_EN;
  740. arch_timer_set_cntkctl(cntkctl);
  741. }
  742. static bool arch_timer_has_nonsecure_ppi(void)
  743. {
  744. return (arch_timer_uses_ppi == ARCH_TIMER_PHYS_SECURE_PPI &&
  745. arch_timer_ppi[ARCH_TIMER_PHYS_NONSECURE_PPI]);
  746. }
  747. static u32 check_ppi_trigger(int irq)
  748. {
  749. u32 flags = irq_get_trigger_type(irq);
  750. if (flags != IRQF_TRIGGER_HIGH && flags != IRQF_TRIGGER_LOW) {
  751. pr_warn("WARNING: Invalid trigger for IRQ%d, assuming level low\n", irq);
  752. pr_warn("WARNING: Please fix your firmware\n");
  753. flags = IRQF_TRIGGER_LOW;
  754. }
  755. return flags;
  756. }
  757. static int arch_timer_starting_cpu(unsigned int cpu)
  758. {
  759. struct clock_event_device *clk = this_cpu_ptr(arch_timer_evt);
  760. u32 flags;
  761. __arch_timer_setup(ARCH_TIMER_TYPE_CP15, clk);
  762. flags = check_ppi_trigger(arch_timer_ppi[arch_timer_uses_ppi]);
  763. enable_percpu_irq(arch_timer_ppi[arch_timer_uses_ppi], flags);
  764. if (arch_timer_has_nonsecure_ppi()) {
  765. flags = check_ppi_trigger(arch_timer_ppi[ARCH_TIMER_PHYS_NONSECURE_PPI]);
  766. enable_percpu_irq(arch_timer_ppi[ARCH_TIMER_PHYS_NONSECURE_PPI],
  767. flags);
  768. }
  769. arch_counter_set_user_access();
  770. if (evtstrm_enable)
  771. arch_timer_configure_evtstream();
  772. return 0;
  773. }
  774. /*
  775. * For historical reasons, when probing with DT we use whichever (non-zero)
  776. * rate was probed first, and don't verify that others match. If the first node
  777. * probed has a clock-frequency property, this overrides the HW register.
  778. */
  779. static void arch_timer_of_configure_rate(u32 rate, struct device_node *np)
  780. {
  781. /* Who has more than one independent system counter? */
  782. if (arch_timer_rate)
  783. return;
  784. if (of_property_read_u32(np, "clock-frequency", &arch_timer_rate))
  785. arch_timer_rate = rate;
  786. /* Check the timer frequency. */
  787. if (arch_timer_rate == 0)
  788. pr_warn("frequency not available\n");
  789. }
  790. static void arch_timer_banner(unsigned type)
  791. {
  792. pr_info("%s%s%s timer(s) running at %lu.%02luMHz (%s%s%s).\n",
  793. type & ARCH_TIMER_TYPE_CP15 ? "cp15" : "",
  794. type == (ARCH_TIMER_TYPE_CP15 | ARCH_TIMER_TYPE_MEM) ?
  795. " and " : "",
  796. type & ARCH_TIMER_TYPE_MEM ? "mmio" : "",
  797. (unsigned long)arch_timer_rate / 1000000,
  798. (unsigned long)(arch_timer_rate / 10000) % 100,
  799. type & ARCH_TIMER_TYPE_CP15 ?
  800. (arch_timer_uses_ppi == ARCH_TIMER_VIRT_PPI) ? "virt" : "phys" :
  801. "",
  802. type == (ARCH_TIMER_TYPE_CP15 | ARCH_TIMER_TYPE_MEM) ? "/" : "",
  803. type & ARCH_TIMER_TYPE_MEM ?
  804. arch_timer_mem_use_virtual ? "virt" : "phys" :
  805. "");
  806. }
  807. u32 arch_timer_get_rate(void)
  808. {
  809. return arch_timer_rate;
  810. }
  811. bool arch_timer_evtstrm_available(void)
  812. {
  813. /*
  814. * We might get called from a preemptible context. This is fine
  815. * because availability of the event stream should be always the same
  816. * for a preemptible context and context where we might resume a task.
  817. */
  818. return cpumask_test_cpu(raw_smp_processor_id(), &evtstrm_available);
  819. }
  820. static u64 arch_counter_get_cntvct_mem(void)
  821. {
  822. u32 vct_lo, vct_hi, tmp_hi;
  823. do {
  824. vct_hi = readl_relaxed(arch_counter_base + CNTVCT_HI);
  825. vct_lo = readl_relaxed(arch_counter_base + CNTVCT_LO);
  826. tmp_hi = readl_relaxed(arch_counter_base + CNTVCT_HI);
  827. } while (vct_hi != tmp_hi);
  828. return ((u64) vct_hi << 32) | vct_lo;
  829. }
  830. static struct arch_timer_kvm_info arch_timer_kvm_info;
  831. struct arch_timer_kvm_info *arch_timer_get_kvm_info(void)
  832. {
  833. return &arch_timer_kvm_info;
  834. }
  835. static void __init arch_counter_register(unsigned type)
  836. {
  837. u64 start_count;
  838. /* Register the CP15 based counter if we have one */
  839. if (type & ARCH_TIMER_TYPE_CP15) {
  840. if ((IS_ENABLED(CONFIG_ARM64) && !is_hyp_mode_available()) ||
  841. arch_timer_uses_ppi == ARCH_TIMER_VIRT_PPI)
  842. arch_timer_read_counter = arch_counter_get_cntvct;
  843. else
  844. arch_timer_read_counter = arch_counter_get_cntpct;
  845. clocksource_counter.archdata.vdso_direct = vdso_default;
  846. } else {
  847. arch_timer_read_counter = arch_counter_get_cntvct_mem;
  848. }
  849. if (!arch_counter_suspend_stop)
  850. clocksource_counter.flags |= CLOCK_SOURCE_SUSPEND_NONSTOP;
  851. start_count = arch_timer_read_counter();
  852. clocksource_register_hz(&clocksource_counter, arch_timer_rate);
  853. cyclecounter.mult = clocksource_counter.mult;
  854. cyclecounter.shift = clocksource_counter.shift;
  855. timecounter_init(&arch_timer_kvm_info.timecounter,
  856. &cyclecounter, start_count);
  857. /* 56 bits minimum, so we assume worst case rollover */
  858. sched_clock_register(arch_timer_read_counter, 56, arch_timer_rate);
  859. }
  860. static void arch_timer_stop(struct clock_event_device *clk)
  861. {
  862. pr_debug("disable IRQ%d cpu #%d\n", clk->irq, smp_processor_id());
  863. disable_percpu_irq(arch_timer_ppi[arch_timer_uses_ppi]);
  864. if (arch_timer_has_nonsecure_ppi())
  865. disable_percpu_irq(arch_timer_ppi[ARCH_TIMER_PHYS_NONSECURE_PPI]);
  866. clk->set_state_shutdown(clk);
  867. }
  868. static int arch_timer_dying_cpu(unsigned int cpu)
  869. {
  870. struct clock_event_device *clk = this_cpu_ptr(arch_timer_evt);
  871. cpumask_clear_cpu(smp_processor_id(), &evtstrm_available);
  872. arch_timer_stop(clk);
  873. return 0;
  874. }
  875. #ifdef CONFIG_CPU_PM
  876. static DEFINE_PER_CPU(unsigned long, saved_cntkctl);
  877. static int arch_timer_cpu_pm_notify(struct notifier_block *self,
  878. unsigned long action, void *hcpu)
  879. {
  880. if (action == CPU_PM_ENTER) {
  881. __this_cpu_write(saved_cntkctl, arch_timer_get_cntkctl());
  882. cpumask_clear_cpu(smp_processor_id(), &evtstrm_available);
  883. } else if (action == CPU_PM_ENTER_FAILED || action == CPU_PM_EXIT) {
  884. arch_timer_set_cntkctl(__this_cpu_read(saved_cntkctl));
  885. if (elf_hwcap & HWCAP_EVTSTRM)
  886. cpumask_set_cpu(smp_processor_id(), &evtstrm_available);
  887. }
  888. return NOTIFY_OK;
  889. }
  890. static struct notifier_block arch_timer_cpu_pm_notifier = {
  891. .notifier_call = arch_timer_cpu_pm_notify,
  892. };
  893. static int __init arch_timer_cpu_pm_init(void)
  894. {
  895. return cpu_pm_register_notifier(&arch_timer_cpu_pm_notifier);
  896. }
  897. static void __init arch_timer_cpu_pm_deinit(void)
  898. {
  899. WARN_ON(cpu_pm_unregister_notifier(&arch_timer_cpu_pm_notifier));
  900. }
  901. #else
  902. static int __init arch_timer_cpu_pm_init(void)
  903. {
  904. return 0;
  905. }
  906. static void __init arch_timer_cpu_pm_deinit(void)
  907. {
  908. }
  909. #endif
  910. static int __init arch_timer_register(void)
  911. {
  912. int err;
  913. int ppi;
  914. arch_timer_evt = alloc_percpu(struct clock_event_device);
  915. if (!arch_timer_evt) {
  916. err = -ENOMEM;
  917. goto out;
  918. }
  919. ppi = arch_timer_ppi[arch_timer_uses_ppi];
  920. switch (arch_timer_uses_ppi) {
  921. case ARCH_TIMER_VIRT_PPI:
  922. err = request_percpu_irq(ppi, arch_timer_handler_virt,
  923. "arch_timer", arch_timer_evt);
  924. break;
  925. case ARCH_TIMER_PHYS_SECURE_PPI:
  926. case ARCH_TIMER_PHYS_NONSECURE_PPI:
  927. err = request_percpu_irq(ppi, arch_timer_handler_phys,
  928. "arch_timer", arch_timer_evt);
  929. if (!err && arch_timer_has_nonsecure_ppi()) {
  930. ppi = arch_timer_ppi[ARCH_TIMER_PHYS_NONSECURE_PPI];
  931. err = request_percpu_irq(ppi, arch_timer_handler_phys,
  932. "arch_timer", arch_timer_evt);
  933. if (err)
  934. free_percpu_irq(arch_timer_ppi[ARCH_TIMER_PHYS_SECURE_PPI],
  935. arch_timer_evt);
  936. }
  937. break;
  938. case ARCH_TIMER_HYP_PPI:
  939. err = request_percpu_irq(ppi, arch_timer_handler_phys,
  940. "arch_timer", arch_timer_evt);
  941. break;
  942. default:
  943. BUG();
  944. }
  945. if (err) {
  946. pr_err("can't register interrupt %d (%d)\n", ppi, err);
  947. goto out_free;
  948. }
  949. err = arch_timer_cpu_pm_init();
  950. if (err)
  951. goto out_unreg_notify;
  952. /* Register and immediately configure the timer on the boot CPU */
  953. err = cpuhp_setup_state(CPUHP_AP_ARM_ARCH_TIMER_STARTING,
  954. "clockevents/arm/arch_timer:starting",
  955. arch_timer_starting_cpu, arch_timer_dying_cpu);
  956. if (err)
  957. goto out_unreg_cpupm;
  958. return 0;
  959. out_unreg_cpupm:
  960. arch_timer_cpu_pm_deinit();
  961. out_unreg_notify:
  962. free_percpu_irq(arch_timer_ppi[arch_timer_uses_ppi], arch_timer_evt);
  963. if (arch_timer_has_nonsecure_ppi())
  964. free_percpu_irq(arch_timer_ppi[ARCH_TIMER_PHYS_NONSECURE_PPI],
  965. arch_timer_evt);
  966. out_free:
  967. free_percpu(arch_timer_evt);
  968. out:
  969. return err;
  970. }
  971. static int __init arch_timer_mem_register(void __iomem *base, unsigned int irq)
  972. {
  973. int ret;
  974. irq_handler_t func;
  975. struct arch_timer *t;
  976. t = kzalloc(sizeof(*t), GFP_KERNEL);
  977. if (!t)
  978. return -ENOMEM;
  979. t->base = base;
  980. t->evt.irq = irq;
  981. __arch_timer_setup(ARCH_TIMER_TYPE_MEM, &t->evt);
  982. if (arch_timer_mem_use_virtual)
  983. func = arch_timer_handler_virt_mem;
  984. else
  985. func = arch_timer_handler_phys_mem;
  986. ret = request_irq(irq, func, IRQF_TIMER, "arch_mem_timer", &t->evt);
  987. if (ret) {
  988. pr_err("Failed to request mem timer irq\n");
  989. kfree(t);
  990. }
  991. return ret;
  992. }
  993. static const struct of_device_id arch_timer_of_match[] __initconst = {
  994. { .compatible = "arm,armv7-timer", },
  995. { .compatible = "arm,armv8-timer", },
  996. {},
  997. };
  998. static const struct of_device_id arch_timer_mem_of_match[] __initconst = {
  999. { .compatible = "arm,armv7-timer-mem", },
  1000. {},
  1001. };
  1002. static bool __init arch_timer_needs_of_probing(void)
  1003. {
  1004. struct device_node *dn;
  1005. bool needs_probing = false;
  1006. unsigned int mask = ARCH_TIMER_TYPE_CP15 | ARCH_TIMER_TYPE_MEM;
  1007. /* We have two timers, and both device-tree nodes are probed. */
  1008. if ((arch_timers_present & mask) == mask)
  1009. return false;
  1010. /*
  1011. * Only one type of timer is probed,
  1012. * check if we have another type of timer node in device-tree.
  1013. */
  1014. if (arch_timers_present & ARCH_TIMER_TYPE_CP15)
  1015. dn = of_find_matching_node(NULL, arch_timer_mem_of_match);
  1016. else
  1017. dn = of_find_matching_node(NULL, arch_timer_of_match);
  1018. if (dn && of_device_is_available(dn))
  1019. needs_probing = true;
  1020. of_node_put(dn);
  1021. return needs_probing;
  1022. }
  1023. static int __init arch_timer_common_init(void)
  1024. {
  1025. arch_timer_banner(arch_timers_present);
  1026. arch_counter_register(arch_timers_present);
  1027. return arch_timer_arch_init();
  1028. }
  1029. /**
  1030. * arch_timer_select_ppi() - Select suitable PPI for the current system.
  1031. *
  1032. * If HYP mode is available, we know that the physical timer
  1033. * has been configured to be accessible from PL1. Use it, so
  1034. * that a guest can use the virtual timer instead.
  1035. *
  1036. * On ARMv8.1 with VH extensions, the kernel runs in HYP. VHE
  1037. * accesses to CNTP_*_EL1 registers are silently redirected to
  1038. * their CNTHP_*_EL2 counterparts, and use a different PPI
  1039. * number.
  1040. *
  1041. * If no interrupt provided for virtual timer, we'll have to
  1042. * stick to the physical timer. It'd better be accessible...
  1043. * For arm64 we never use the secure interrupt.
  1044. *
  1045. * Return: a suitable PPI type for the current system.
  1046. */
  1047. static enum arch_timer_ppi_nr __init arch_timer_select_ppi(void)
  1048. {
  1049. if (is_kernel_in_hyp_mode())
  1050. return ARCH_TIMER_HYP_PPI;
  1051. if (!is_hyp_mode_available() && arch_timer_ppi[ARCH_TIMER_VIRT_PPI])
  1052. return ARCH_TIMER_VIRT_PPI;
  1053. if (IS_ENABLED(CONFIG_ARM64))
  1054. return ARCH_TIMER_PHYS_NONSECURE_PPI;
  1055. return ARCH_TIMER_PHYS_SECURE_PPI;
  1056. }
  1057. static int __init arch_timer_of_init(struct device_node *np)
  1058. {
  1059. int i, ret;
  1060. u32 rate;
  1061. if (arch_timers_present & ARCH_TIMER_TYPE_CP15) {
  1062. pr_warn("multiple nodes in dt, skipping\n");
  1063. return 0;
  1064. }
  1065. arch_timers_present |= ARCH_TIMER_TYPE_CP15;
  1066. for (i = ARCH_TIMER_PHYS_SECURE_PPI; i < ARCH_TIMER_MAX_TIMER_PPI; i++)
  1067. arch_timer_ppi[i] = irq_of_parse_and_map(np, i);
  1068. arch_timer_kvm_info.virtual_irq = arch_timer_ppi[ARCH_TIMER_VIRT_PPI];
  1069. rate = arch_timer_get_cntfrq();
  1070. arch_timer_of_configure_rate(rate, np);
  1071. arch_timer_c3stop = !of_property_read_bool(np, "always-on");
  1072. /* Check for globally applicable workarounds */
  1073. arch_timer_check_ool_workaround(ate_match_dt, np);
  1074. /*
  1075. * If we cannot rely on firmware initializing the timer registers then
  1076. * we should use the physical timers instead.
  1077. */
  1078. if (IS_ENABLED(CONFIG_ARM) &&
  1079. of_property_read_bool(np, "arm,cpu-registers-not-fw-configured"))
  1080. arch_timer_uses_ppi = ARCH_TIMER_PHYS_SECURE_PPI;
  1081. else
  1082. arch_timer_uses_ppi = arch_timer_select_ppi();
  1083. if (!arch_timer_ppi[arch_timer_uses_ppi]) {
  1084. pr_err("No interrupt available, giving up\n");
  1085. return -EINVAL;
  1086. }
  1087. /* On some systems, the counter stops ticking when in suspend. */
  1088. arch_counter_suspend_stop = of_property_read_bool(np,
  1089. "arm,no-tick-in-suspend");
  1090. ret = arch_timer_register();
  1091. if (ret)
  1092. return ret;
  1093. if (arch_timer_needs_of_probing())
  1094. return 0;
  1095. return arch_timer_common_init();
  1096. }
  1097. TIMER_OF_DECLARE(armv7_arch_timer, "arm,armv7-timer", arch_timer_of_init);
  1098. TIMER_OF_DECLARE(armv8_arch_timer, "arm,armv8-timer", arch_timer_of_init);
  1099. static u32 __init
  1100. arch_timer_mem_frame_get_cntfrq(struct arch_timer_mem_frame *frame)
  1101. {
  1102. void __iomem *base;
  1103. u32 rate;
  1104. base = ioremap(frame->cntbase, frame->size);
  1105. if (!base) {
  1106. pr_err("Unable to map frame @ %pa\n", &frame->cntbase);
  1107. return 0;
  1108. }
  1109. rate = readl_relaxed(base + CNTFRQ);
  1110. iounmap(base);
  1111. return rate;
  1112. }
  1113. static struct arch_timer_mem_frame * __init
  1114. arch_timer_mem_find_best_frame(struct arch_timer_mem *timer_mem)
  1115. {
  1116. struct arch_timer_mem_frame *frame, *best_frame = NULL;
  1117. void __iomem *cntctlbase;
  1118. u32 cnttidr;
  1119. int i;
  1120. cntctlbase = ioremap(timer_mem->cntctlbase, timer_mem->size);
  1121. if (!cntctlbase) {
  1122. pr_err("Can't map CNTCTLBase @ %pa\n",
  1123. &timer_mem->cntctlbase);
  1124. return NULL;
  1125. }
  1126. cnttidr = readl_relaxed(cntctlbase + CNTTIDR);
  1127. /*
  1128. * Try to find a virtual capable frame. Otherwise fall back to a
  1129. * physical capable frame.
  1130. */
  1131. for (i = 0; i < ARCH_TIMER_MEM_MAX_FRAMES; i++) {
  1132. u32 cntacr = CNTACR_RFRQ | CNTACR_RWPT | CNTACR_RPCT |
  1133. CNTACR_RWVT | CNTACR_RVOFF | CNTACR_RVCT;
  1134. frame = &timer_mem->frame[i];
  1135. if (!frame->valid)
  1136. continue;
  1137. /* Try enabling everything, and see what sticks */
  1138. writel_relaxed(cntacr, cntctlbase + CNTACR(i));
  1139. cntacr = readl_relaxed(cntctlbase + CNTACR(i));
  1140. if ((cnttidr & CNTTIDR_VIRT(i)) &&
  1141. !(~cntacr & (CNTACR_RWVT | CNTACR_RVCT))) {
  1142. best_frame = frame;
  1143. arch_timer_mem_use_virtual = true;
  1144. break;
  1145. }
  1146. if (~cntacr & (CNTACR_RWPT | CNTACR_RPCT))
  1147. continue;
  1148. best_frame = frame;
  1149. }
  1150. iounmap(cntctlbase);
  1151. return best_frame;
  1152. }
  1153. static int __init
  1154. arch_timer_mem_frame_register(struct arch_timer_mem_frame *frame)
  1155. {
  1156. void __iomem *base;
  1157. int ret, irq = 0;
  1158. if (arch_timer_mem_use_virtual)
  1159. irq = frame->virt_irq;
  1160. else
  1161. irq = frame->phys_irq;
  1162. if (!irq) {
  1163. pr_err("Frame missing %s irq.\n",
  1164. arch_timer_mem_use_virtual ? "virt" : "phys");
  1165. return -EINVAL;
  1166. }
  1167. if (!request_mem_region(frame->cntbase, frame->size,
  1168. "arch_mem_timer"))
  1169. return -EBUSY;
  1170. base = ioremap(frame->cntbase, frame->size);
  1171. if (!base) {
  1172. pr_err("Can't map frame's registers\n");
  1173. return -ENXIO;
  1174. }
  1175. ret = arch_timer_mem_register(base, irq);
  1176. if (ret) {
  1177. iounmap(base);
  1178. return ret;
  1179. }
  1180. arch_counter_base = base;
  1181. arch_timers_present |= ARCH_TIMER_TYPE_MEM;
  1182. return 0;
  1183. }
  1184. static int __init arch_timer_mem_of_init(struct device_node *np)
  1185. {
  1186. struct arch_timer_mem *timer_mem;
  1187. struct arch_timer_mem_frame *frame;
  1188. struct device_node *frame_node;
  1189. struct resource res;
  1190. int ret = -EINVAL;
  1191. u32 rate;
  1192. timer_mem = kzalloc(sizeof(*timer_mem), GFP_KERNEL);
  1193. if (!timer_mem)
  1194. return -ENOMEM;
  1195. if (of_address_to_resource(np, 0, &res))
  1196. goto out;
  1197. timer_mem->cntctlbase = res.start;
  1198. timer_mem->size = resource_size(&res);
  1199. for_each_available_child_of_node(np, frame_node) {
  1200. u32 n;
  1201. struct arch_timer_mem_frame *frame;
  1202. if (of_property_read_u32(frame_node, "frame-number", &n)) {
  1203. pr_err(FW_BUG "Missing frame-number.\n");
  1204. of_node_put(frame_node);
  1205. goto out;
  1206. }
  1207. if (n >= ARCH_TIMER_MEM_MAX_FRAMES) {
  1208. pr_err(FW_BUG "Wrong frame-number, only 0-%u are permitted.\n",
  1209. ARCH_TIMER_MEM_MAX_FRAMES - 1);
  1210. of_node_put(frame_node);
  1211. goto out;
  1212. }
  1213. frame = &timer_mem->frame[n];
  1214. if (frame->valid) {
  1215. pr_err(FW_BUG "Duplicated frame-number.\n");
  1216. of_node_put(frame_node);
  1217. goto out;
  1218. }
  1219. if (of_address_to_resource(frame_node, 0, &res)) {
  1220. of_node_put(frame_node);
  1221. goto out;
  1222. }
  1223. frame->cntbase = res.start;
  1224. frame->size = resource_size(&res);
  1225. frame->virt_irq = irq_of_parse_and_map(frame_node,
  1226. ARCH_TIMER_VIRT_SPI);
  1227. frame->phys_irq = irq_of_parse_and_map(frame_node,
  1228. ARCH_TIMER_PHYS_SPI);
  1229. frame->valid = true;
  1230. }
  1231. frame = arch_timer_mem_find_best_frame(timer_mem);
  1232. if (!frame) {
  1233. pr_err("Unable to find a suitable frame in timer @ %pa\n",
  1234. &timer_mem->cntctlbase);
  1235. ret = -EINVAL;
  1236. goto out;
  1237. }
  1238. rate = arch_timer_mem_frame_get_cntfrq(frame);
  1239. arch_timer_of_configure_rate(rate, np);
  1240. ret = arch_timer_mem_frame_register(frame);
  1241. if (!ret && !arch_timer_needs_of_probing())
  1242. ret = arch_timer_common_init();
  1243. out:
  1244. kfree(timer_mem);
  1245. return ret;
  1246. }
  1247. TIMER_OF_DECLARE(armv7_arch_timer_mem, "arm,armv7-timer-mem",
  1248. arch_timer_mem_of_init);
  1249. #ifdef CONFIG_ACPI_GTDT
  1250. static int __init
  1251. arch_timer_mem_verify_cntfrq(struct arch_timer_mem *timer_mem)
  1252. {
  1253. struct arch_timer_mem_frame *frame;
  1254. u32 rate;
  1255. int i;
  1256. for (i = 0; i < ARCH_TIMER_MEM_MAX_FRAMES; i++) {
  1257. frame = &timer_mem->frame[i];
  1258. if (!frame->valid)
  1259. continue;
  1260. rate = arch_timer_mem_frame_get_cntfrq(frame);
  1261. if (rate == arch_timer_rate)
  1262. continue;
  1263. pr_err(FW_BUG "CNTFRQ mismatch: frame @ %pa: (0x%08lx), CPU: (0x%08lx)\n",
  1264. &frame->cntbase,
  1265. (unsigned long)rate, (unsigned long)arch_timer_rate);
  1266. return -EINVAL;
  1267. }
  1268. return 0;
  1269. }
  1270. static int __init arch_timer_mem_acpi_init(int platform_timer_count)
  1271. {
  1272. struct arch_timer_mem *timers, *timer;
  1273. struct arch_timer_mem_frame *frame, *best_frame = NULL;
  1274. int timer_count, i, ret = 0;
  1275. timers = kcalloc(platform_timer_count, sizeof(*timers),
  1276. GFP_KERNEL);
  1277. if (!timers)
  1278. return -ENOMEM;
  1279. ret = acpi_arch_timer_mem_init(timers, &timer_count);
  1280. if (ret || !timer_count)
  1281. goto out;
  1282. /*
  1283. * While unlikely, it's theoretically possible that none of the frames
  1284. * in a timer expose the combination of feature we want.
  1285. */
  1286. for (i = 0; i < timer_count; i++) {
  1287. timer = &timers[i];
  1288. frame = arch_timer_mem_find_best_frame(timer);
  1289. if (!best_frame)
  1290. best_frame = frame;
  1291. ret = arch_timer_mem_verify_cntfrq(timer);
  1292. if (ret) {
  1293. pr_err("Disabling MMIO timers due to CNTFRQ mismatch\n");
  1294. goto out;
  1295. }
  1296. if (!best_frame) /* implies !frame */
  1297. /*
  1298. * Only complain about missing suitable frames if we
  1299. * haven't already found one in a previous iteration.
  1300. */
  1301. pr_err("Unable to find a suitable frame in timer @ %pa\n",
  1302. &timer->cntctlbase);
  1303. }
  1304. if (best_frame)
  1305. ret = arch_timer_mem_frame_register(best_frame);
  1306. out:
  1307. kfree(timers);
  1308. return ret;
  1309. }
  1310. /* Initialize per-processor generic timer and memory-mapped timer(if present) */
  1311. static int __init arch_timer_acpi_init(struct acpi_table_header *table)
  1312. {
  1313. int ret, platform_timer_count;
  1314. if (arch_timers_present & ARCH_TIMER_TYPE_CP15) {
  1315. pr_warn("already initialized, skipping\n");
  1316. return -EINVAL;
  1317. }
  1318. arch_timers_present |= ARCH_TIMER_TYPE_CP15;
  1319. ret = acpi_gtdt_init(table, &platform_timer_count);
  1320. if (ret) {
  1321. pr_err("Failed to init GTDT table.\n");
  1322. return ret;
  1323. }
  1324. arch_timer_ppi[ARCH_TIMER_PHYS_NONSECURE_PPI] =
  1325. acpi_gtdt_map_ppi(ARCH_TIMER_PHYS_NONSECURE_PPI);
  1326. arch_timer_ppi[ARCH_TIMER_VIRT_PPI] =
  1327. acpi_gtdt_map_ppi(ARCH_TIMER_VIRT_PPI);
  1328. arch_timer_ppi[ARCH_TIMER_HYP_PPI] =
  1329. acpi_gtdt_map_ppi(ARCH_TIMER_HYP_PPI);
  1330. arch_timer_kvm_info.virtual_irq = arch_timer_ppi[ARCH_TIMER_VIRT_PPI];
  1331. /*
  1332. * When probing via ACPI, we have no mechanism to override the sysreg
  1333. * CNTFRQ value. This *must* be correct.
  1334. */
  1335. arch_timer_rate = arch_timer_get_cntfrq();
  1336. if (!arch_timer_rate) {
  1337. pr_err(FW_BUG "frequency not available.\n");
  1338. return -EINVAL;
  1339. }
  1340. arch_timer_uses_ppi = arch_timer_select_ppi();
  1341. if (!arch_timer_ppi[arch_timer_uses_ppi]) {
  1342. pr_err("No interrupt available, giving up\n");
  1343. return -EINVAL;
  1344. }
  1345. /* Always-on capability */
  1346. arch_timer_c3stop = acpi_gtdt_c3stop(arch_timer_uses_ppi);
  1347. /* Check for globally applicable workarounds */
  1348. arch_timer_check_ool_workaround(ate_match_acpi_oem_info, table);
  1349. ret = arch_timer_register();
  1350. if (ret)
  1351. return ret;
  1352. if (platform_timer_count &&
  1353. arch_timer_mem_acpi_init(platform_timer_count))
  1354. pr_err("Failed to initialize memory-mapped timer.\n");
  1355. return arch_timer_common_init();
  1356. }
  1357. TIMER_ACPI_DECLARE(arch_timer, ACPI_SIG_GTDT, arch_timer_acpi_init);
  1358. #endif