arm_arch_timer.c 27 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056
  1. /*
  2. * linux/drivers/clocksource/arm_arch_timer.c
  3. *
  4. * Copyright (C) 2011 ARM Ltd.
  5. * All Rights Reserved
  6. *
  7. * This program is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU General Public License version 2 as
  9. * published by the Free Software Foundation.
  10. */
  11. #define pr_fmt(fmt) "arm_arch_timer: " fmt
  12. #include <linux/init.h>
  13. #include <linux/kernel.h>
  14. #include <linux/device.h>
  15. #include <linux/smp.h>
  16. #include <linux/cpu.h>
  17. #include <linux/cpu_pm.h>
  18. #include <linux/clockchips.h>
  19. #include <linux/clocksource.h>
  20. #include <linux/interrupt.h>
  21. #include <linux/of_irq.h>
  22. #include <linux/of_address.h>
  23. #include <linux/io.h>
  24. #include <linux/slab.h>
  25. #include <linux/sched_clock.h>
  26. #include <linux/acpi.h>
  27. #include <asm/arch_timer.h>
  28. #include <asm/virt.h>
  29. #include <clocksource/arm_arch_timer.h>
  30. #define CNTTIDR 0x08
  31. #define CNTTIDR_VIRT(n) (BIT(1) << ((n) * 4))
  32. #define CNTACR(n) (0x40 + ((n) * 4))
  33. #define CNTACR_RPCT BIT(0)
  34. #define CNTACR_RVCT BIT(1)
  35. #define CNTACR_RFRQ BIT(2)
  36. #define CNTACR_RVOFF BIT(3)
  37. #define CNTACR_RWVT BIT(4)
  38. #define CNTACR_RWPT BIT(5)
  39. #define CNTVCT_LO 0x08
  40. #define CNTVCT_HI 0x0c
  41. #define CNTFRQ 0x10
  42. #define CNTP_TVAL 0x28
  43. #define CNTP_CTL 0x2c
  44. #define CNTV_TVAL 0x38
  45. #define CNTV_CTL 0x3c
  46. #define ARCH_CP15_TIMER BIT(0)
  47. #define ARCH_MEM_TIMER BIT(1)
  48. static unsigned arch_timers_present __initdata;
  49. static void __iomem *arch_counter_base;
  50. struct arch_timer {
  51. void __iomem *base;
  52. struct clock_event_device evt;
  53. };
  54. #define to_arch_timer(e) container_of(e, struct arch_timer, evt)
  55. static u32 arch_timer_rate;
  56. enum ppi_nr {
  57. PHYS_SECURE_PPI,
  58. PHYS_NONSECURE_PPI,
  59. VIRT_PPI,
  60. HYP_PPI,
  61. MAX_TIMER_PPI
  62. };
  63. static int arch_timer_ppi[MAX_TIMER_PPI];
  64. static struct clock_event_device __percpu *arch_timer_evt;
  65. static enum ppi_nr arch_timer_uses_ppi = VIRT_PPI;
  66. static bool arch_timer_c3stop;
  67. static bool arch_timer_mem_use_virtual;
  68. static bool evtstrm_enable = IS_ENABLED(CONFIG_ARM_ARCH_TIMER_EVTSTREAM);
  69. static int __init early_evtstrm_cfg(char *buf)
  70. {
  71. return strtobool(buf, &evtstrm_enable);
  72. }
  73. early_param("clocksource.arm_arch_timer.evtstrm", early_evtstrm_cfg);
  74. /*
  75. * Architected system timer support.
  76. */
  77. #ifdef CONFIG_FSL_ERRATUM_A008585
  78. DEFINE_STATIC_KEY_FALSE(arch_timer_read_ool_enabled);
  79. EXPORT_SYMBOL_GPL(arch_timer_read_ool_enabled);
  80. static int fsl_a008585_enable = -1;
  81. static int __init early_fsl_a008585_cfg(char *buf)
  82. {
  83. int ret;
  84. bool val;
  85. ret = strtobool(buf, &val);
  86. if (ret)
  87. return ret;
  88. fsl_a008585_enable = val;
  89. return 0;
  90. }
  91. early_param("clocksource.arm_arch_timer.fsl-a008585", early_fsl_a008585_cfg);
  92. u32 __fsl_a008585_read_cntp_tval_el0(void)
  93. {
  94. return __fsl_a008585_read_reg(cntp_tval_el0);
  95. }
  96. u32 __fsl_a008585_read_cntv_tval_el0(void)
  97. {
  98. return __fsl_a008585_read_reg(cntv_tval_el0);
  99. }
  100. u64 __fsl_a008585_read_cntvct_el0(void)
  101. {
  102. return __fsl_a008585_read_reg(cntvct_el0);
  103. }
  104. EXPORT_SYMBOL(__fsl_a008585_read_cntvct_el0);
  105. #endif /* CONFIG_FSL_ERRATUM_A008585 */
  106. static __always_inline
  107. void arch_timer_reg_write(int access, enum arch_timer_reg reg, u32 val,
  108. struct clock_event_device *clk)
  109. {
  110. if (access == ARCH_TIMER_MEM_PHYS_ACCESS) {
  111. struct arch_timer *timer = to_arch_timer(clk);
  112. switch (reg) {
  113. case ARCH_TIMER_REG_CTRL:
  114. writel_relaxed(val, timer->base + CNTP_CTL);
  115. break;
  116. case ARCH_TIMER_REG_TVAL:
  117. writel_relaxed(val, timer->base + CNTP_TVAL);
  118. break;
  119. }
  120. } else if (access == ARCH_TIMER_MEM_VIRT_ACCESS) {
  121. struct arch_timer *timer = to_arch_timer(clk);
  122. switch (reg) {
  123. case ARCH_TIMER_REG_CTRL:
  124. writel_relaxed(val, timer->base + CNTV_CTL);
  125. break;
  126. case ARCH_TIMER_REG_TVAL:
  127. writel_relaxed(val, timer->base + CNTV_TVAL);
  128. break;
  129. }
  130. } else {
  131. arch_timer_reg_write_cp15(access, reg, val);
  132. }
  133. }
  134. static __always_inline
  135. u32 arch_timer_reg_read(int access, enum arch_timer_reg reg,
  136. struct clock_event_device *clk)
  137. {
  138. u32 val;
  139. if (access == ARCH_TIMER_MEM_PHYS_ACCESS) {
  140. struct arch_timer *timer = to_arch_timer(clk);
  141. switch (reg) {
  142. case ARCH_TIMER_REG_CTRL:
  143. val = readl_relaxed(timer->base + CNTP_CTL);
  144. break;
  145. case ARCH_TIMER_REG_TVAL:
  146. val = readl_relaxed(timer->base + CNTP_TVAL);
  147. break;
  148. }
  149. } else if (access == ARCH_TIMER_MEM_VIRT_ACCESS) {
  150. struct arch_timer *timer = to_arch_timer(clk);
  151. switch (reg) {
  152. case ARCH_TIMER_REG_CTRL:
  153. val = readl_relaxed(timer->base + CNTV_CTL);
  154. break;
  155. case ARCH_TIMER_REG_TVAL:
  156. val = readl_relaxed(timer->base + CNTV_TVAL);
  157. break;
  158. }
  159. } else {
  160. val = arch_timer_reg_read_cp15(access, reg);
  161. }
  162. return val;
  163. }
  164. static __always_inline irqreturn_t timer_handler(const int access,
  165. struct clock_event_device *evt)
  166. {
  167. unsigned long ctrl;
  168. ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL, evt);
  169. if (ctrl & ARCH_TIMER_CTRL_IT_STAT) {
  170. ctrl |= ARCH_TIMER_CTRL_IT_MASK;
  171. arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl, evt);
  172. evt->event_handler(evt);
  173. return IRQ_HANDLED;
  174. }
  175. return IRQ_NONE;
  176. }
  177. static irqreturn_t arch_timer_handler_virt(int irq, void *dev_id)
  178. {
  179. struct clock_event_device *evt = dev_id;
  180. return timer_handler(ARCH_TIMER_VIRT_ACCESS, evt);
  181. }
  182. static irqreturn_t arch_timer_handler_phys(int irq, void *dev_id)
  183. {
  184. struct clock_event_device *evt = dev_id;
  185. return timer_handler(ARCH_TIMER_PHYS_ACCESS, evt);
  186. }
  187. static irqreturn_t arch_timer_handler_phys_mem(int irq, void *dev_id)
  188. {
  189. struct clock_event_device *evt = dev_id;
  190. return timer_handler(ARCH_TIMER_MEM_PHYS_ACCESS, evt);
  191. }
  192. static irqreturn_t arch_timer_handler_virt_mem(int irq, void *dev_id)
  193. {
  194. struct clock_event_device *evt = dev_id;
  195. return timer_handler(ARCH_TIMER_MEM_VIRT_ACCESS, evt);
  196. }
  197. static __always_inline int timer_shutdown(const int access,
  198. struct clock_event_device *clk)
  199. {
  200. unsigned long ctrl;
  201. ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL, clk);
  202. ctrl &= ~ARCH_TIMER_CTRL_ENABLE;
  203. arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl, clk);
  204. return 0;
  205. }
  206. static int arch_timer_shutdown_virt(struct clock_event_device *clk)
  207. {
  208. return timer_shutdown(ARCH_TIMER_VIRT_ACCESS, clk);
  209. }
  210. static int arch_timer_shutdown_phys(struct clock_event_device *clk)
  211. {
  212. return timer_shutdown(ARCH_TIMER_PHYS_ACCESS, clk);
  213. }
  214. static int arch_timer_shutdown_virt_mem(struct clock_event_device *clk)
  215. {
  216. return timer_shutdown(ARCH_TIMER_MEM_VIRT_ACCESS, clk);
  217. }
  218. static int arch_timer_shutdown_phys_mem(struct clock_event_device *clk)
  219. {
  220. return timer_shutdown(ARCH_TIMER_MEM_PHYS_ACCESS, clk);
  221. }
  222. static __always_inline void set_next_event(const int access, unsigned long evt,
  223. struct clock_event_device *clk)
  224. {
  225. unsigned long ctrl;
  226. ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL, clk);
  227. ctrl |= ARCH_TIMER_CTRL_ENABLE;
  228. ctrl &= ~ARCH_TIMER_CTRL_IT_MASK;
  229. arch_timer_reg_write(access, ARCH_TIMER_REG_TVAL, evt, clk);
  230. arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl, clk);
  231. }
  232. #ifdef CONFIG_FSL_ERRATUM_A008585
  233. static __always_inline void fsl_a008585_set_next_event(const int access,
  234. unsigned long evt, struct clock_event_device *clk)
  235. {
  236. unsigned long ctrl;
  237. u64 cval = evt + arch_counter_get_cntvct();
  238. ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL, clk);
  239. ctrl |= ARCH_TIMER_CTRL_ENABLE;
  240. ctrl &= ~ARCH_TIMER_CTRL_IT_MASK;
  241. if (access == ARCH_TIMER_PHYS_ACCESS)
  242. write_sysreg(cval, cntp_cval_el0);
  243. else if (access == ARCH_TIMER_VIRT_ACCESS)
  244. write_sysreg(cval, cntv_cval_el0);
  245. arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl, clk);
  246. }
  247. static int fsl_a008585_set_next_event_virt(unsigned long evt,
  248. struct clock_event_device *clk)
  249. {
  250. fsl_a008585_set_next_event(ARCH_TIMER_VIRT_ACCESS, evt, clk);
  251. return 0;
  252. }
  253. static int fsl_a008585_set_next_event_phys(unsigned long evt,
  254. struct clock_event_device *clk)
  255. {
  256. fsl_a008585_set_next_event(ARCH_TIMER_PHYS_ACCESS, evt, clk);
  257. return 0;
  258. }
  259. #endif /* CONFIG_FSL_ERRATUM_A008585 */
  260. static int arch_timer_set_next_event_virt(unsigned long evt,
  261. struct clock_event_device *clk)
  262. {
  263. set_next_event(ARCH_TIMER_VIRT_ACCESS, evt, clk);
  264. return 0;
  265. }
  266. static int arch_timer_set_next_event_phys(unsigned long evt,
  267. struct clock_event_device *clk)
  268. {
  269. set_next_event(ARCH_TIMER_PHYS_ACCESS, evt, clk);
  270. return 0;
  271. }
  272. static int arch_timer_set_next_event_virt_mem(unsigned long evt,
  273. struct clock_event_device *clk)
  274. {
  275. set_next_event(ARCH_TIMER_MEM_VIRT_ACCESS, evt, clk);
  276. return 0;
  277. }
  278. static int arch_timer_set_next_event_phys_mem(unsigned long evt,
  279. struct clock_event_device *clk)
  280. {
  281. set_next_event(ARCH_TIMER_MEM_PHYS_ACCESS, evt, clk);
  282. return 0;
  283. }
  284. static void fsl_a008585_set_sne(struct clock_event_device *clk)
  285. {
  286. #ifdef CONFIG_FSL_ERRATUM_A008585
  287. if (!static_branch_unlikely(&arch_timer_read_ool_enabled))
  288. return;
  289. if (arch_timer_uses_ppi == VIRT_PPI)
  290. clk->set_next_event = fsl_a008585_set_next_event_virt;
  291. else
  292. clk->set_next_event = fsl_a008585_set_next_event_phys;
  293. #endif
  294. }
  295. static void __arch_timer_setup(unsigned type,
  296. struct clock_event_device *clk)
  297. {
  298. clk->features = CLOCK_EVT_FEAT_ONESHOT;
  299. if (type == ARCH_CP15_TIMER) {
  300. if (arch_timer_c3stop)
  301. clk->features |= CLOCK_EVT_FEAT_C3STOP;
  302. clk->name = "arch_sys_timer";
  303. clk->rating = 450;
  304. clk->cpumask = cpumask_of(smp_processor_id());
  305. clk->irq = arch_timer_ppi[arch_timer_uses_ppi];
  306. switch (arch_timer_uses_ppi) {
  307. case VIRT_PPI:
  308. clk->set_state_shutdown = arch_timer_shutdown_virt;
  309. clk->set_state_oneshot_stopped = arch_timer_shutdown_virt;
  310. clk->set_next_event = arch_timer_set_next_event_virt;
  311. break;
  312. case PHYS_SECURE_PPI:
  313. case PHYS_NONSECURE_PPI:
  314. case HYP_PPI:
  315. clk->set_state_shutdown = arch_timer_shutdown_phys;
  316. clk->set_state_oneshot_stopped = arch_timer_shutdown_phys;
  317. clk->set_next_event = arch_timer_set_next_event_phys;
  318. break;
  319. default:
  320. BUG();
  321. }
  322. fsl_a008585_set_sne(clk);
  323. } else {
  324. clk->features |= CLOCK_EVT_FEAT_DYNIRQ;
  325. clk->name = "arch_mem_timer";
  326. clk->rating = 400;
  327. clk->cpumask = cpu_all_mask;
  328. if (arch_timer_mem_use_virtual) {
  329. clk->set_state_shutdown = arch_timer_shutdown_virt_mem;
  330. clk->set_state_oneshot_stopped = arch_timer_shutdown_virt_mem;
  331. clk->set_next_event =
  332. arch_timer_set_next_event_virt_mem;
  333. } else {
  334. clk->set_state_shutdown = arch_timer_shutdown_phys_mem;
  335. clk->set_state_oneshot_stopped = arch_timer_shutdown_phys_mem;
  336. clk->set_next_event =
  337. arch_timer_set_next_event_phys_mem;
  338. }
  339. }
  340. clk->set_state_shutdown(clk);
  341. clockevents_config_and_register(clk, arch_timer_rate, 0xf, 0x7fffffff);
  342. }
  343. static void arch_timer_evtstrm_enable(int divider)
  344. {
  345. u32 cntkctl = arch_timer_get_cntkctl();
  346. cntkctl &= ~ARCH_TIMER_EVT_TRIGGER_MASK;
  347. /* Set the divider and enable virtual event stream */
  348. cntkctl |= (divider << ARCH_TIMER_EVT_TRIGGER_SHIFT)
  349. | ARCH_TIMER_VIRT_EVT_EN;
  350. arch_timer_set_cntkctl(cntkctl);
  351. elf_hwcap |= HWCAP_EVTSTRM;
  352. #ifdef CONFIG_COMPAT
  353. compat_elf_hwcap |= COMPAT_HWCAP_EVTSTRM;
  354. #endif
  355. }
  356. static void arch_timer_configure_evtstream(void)
  357. {
  358. int evt_stream_div, pos;
  359. /* Find the closest power of two to the divisor */
  360. evt_stream_div = arch_timer_rate / ARCH_TIMER_EVT_STREAM_FREQ;
  361. pos = fls(evt_stream_div);
  362. if (pos > 1 && !(evt_stream_div & (1 << (pos - 2))))
  363. pos--;
  364. /* enable event stream */
  365. arch_timer_evtstrm_enable(min(pos, 15));
  366. }
  367. static void arch_counter_set_user_access(void)
  368. {
  369. u32 cntkctl = arch_timer_get_cntkctl();
  370. /* Disable user access to the timers and the physical counter */
  371. /* Also disable virtual event stream */
  372. cntkctl &= ~(ARCH_TIMER_USR_PT_ACCESS_EN
  373. | ARCH_TIMER_USR_VT_ACCESS_EN
  374. | ARCH_TIMER_VIRT_EVT_EN
  375. | ARCH_TIMER_USR_PCT_ACCESS_EN);
  376. /* Enable user access to the virtual counter */
  377. cntkctl |= ARCH_TIMER_USR_VCT_ACCESS_EN;
  378. arch_timer_set_cntkctl(cntkctl);
  379. }
  380. static bool arch_timer_has_nonsecure_ppi(void)
  381. {
  382. return (arch_timer_uses_ppi == PHYS_SECURE_PPI &&
  383. arch_timer_ppi[PHYS_NONSECURE_PPI]);
  384. }
  385. static u32 check_ppi_trigger(int irq)
  386. {
  387. u32 flags = irq_get_trigger_type(irq);
  388. if (flags != IRQF_TRIGGER_HIGH && flags != IRQF_TRIGGER_LOW) {
  389. pr_warn("WARNING: Invalid trigger for IRQ%d, assuming level low\n", irq);
  390. pr_warn("WARNING: Please fix your firmware\n");
  391. flags = IRQF_TRIGGER_LOW;
  392. }
  393. return flags;
  394. }
  395. static int arch_timer_starting_cpu(unsigned int cpu)
  396. {
  397. struct clock_event_device *clk = this_cpu_ptr(arch_timer_evt);
  398. u32 flags;
  399. __arch_timer_setup(ARCH_CP15_TIMER, clk);
  400. flags = check_ppi_trigger(arch_timer_ppi[arch_timer_uses_ppi]);
  401. enable_percpu_irq(arch_timer_ppi[arch_timer_uses_ppi], flags);
  402. if (arch_timer_has_nonsecure_ppi()) {
  403. flags = check_ppi_trigger(arch_timer_ppi[PHYS_NONSECURE_PPI]);
  404. enable_percpu_irq(arch_timer_ppi[PHYS_NONSECURE_PPI], flags);
  405. }
  406. arch_counter_set_user_access();
  407. if (evtstrm_enable)
  408. arch_timer_configure_evtstream();
  409. return 0;
  410. }
  411. static void
  412. arch_timer_detect_rate(void __iomem *cntbase, struct device_node *np)
  413. {
  414. /* Who has more than one independent system counter? */
  415. if (arch_timer_rate)
  416. return;
  417. /*
  418. * Try to determine the frequency from the device tree or CNTFRQ,
  419. * if ACPI is enabled, get the frequency from CNTFRQ ONLY.
  420. */
  421. if (!acpi_disabled ||
  422. of_property_read_u32(np, "clock-frequency", &arch_timer_rate)) {
  423. if (cntbase)
  424. arch_timer_rate = readl_relaxed(cntbase + CNTFRQ);
  425. else
  426. arch_timer_rate = arch_timer_get_cntfrq();
  427. }
  428. /* Check the timer frequency. */
  429. if (arch_timer_rate == 0)
  430. pr_warn("Architected timer frequency not available\n");
  431. }
  432. static void arch_timer_banner(unsigned type)
  433. {
  434. pr_info("Architected %s%s%s timer(s) running at %lu.%02luMHz (%s%s%s).\n",
  435. type & ARCH_CP15_TIMER ? "cp15" : "",
  436. type == (ARCH_CP15_TIMER | ARCH_MEM_TIMER) ? " and " : "",
  437. type & ARCH_MEM_TIMER ? "mmio" : "",
  438. (unsigned long)arch_timer_rate / 1000000,
  439. (unsigned long)(arch_timer_rate / 10000) % 100,
  440. type & ARCH_CP15_TIMER ?
  441. (arch_timer_uses_ppi == VIRT_PPI) ? "virt" : "phys" :
  442. "",
  443. type == (ARCH_CP15_TIMER | ARCH_MEM_TIMER) ? "/" : "",
  444. type & ARCH_MEM_TIMER ?
  445. arch_timer_mem_use_virtual ? "virt" : "phys" :
  446. "");
  447. }
  448. u32 arch_timer_get_rate(void)
  449. {
  450. return arch_timer_rate;
  451. }
  452. static u64 arch_counter_get_cntvct_mem(void)
  453. {
  454. u32 vct_lo, vct_hi, tmp_hi;
  455. do {
  456. vct_hi = readl_relaxed(arch_counter_base + CNTVCT_HI);
  457. vct_lo = readl_relaxed(arch_counter_base + CNTVCT_LO);
  458. tmp_hi = readl_relaxed(arch_counter_base + CNTVCT_HI);
  459. } while (vct_hi != tmp_hi);
  460. return ((u64) vct_hi << 32) | vct_lo;
  461. }
  462. /*
  463. * Default to cp15 based access because arm64 uses this function for
  464. * sched_clock() before DT is probed and the cp15 method is guaranteed
  465. * to exist on arm64. arm doesn't use this before DT is probed so even
  466. * if we don't have the cp15 accessors we won't have a problem.
  467. */
  468. u64 (*arch_timer_read_counter)(void) = arch_counter_get_cntvct;
  469. static cycle_t arch_counter_read(struct clocksource *cs)
  470. {
  471. return arch_timer_read_counter();
  472. }
  473. static cycle_t arch_counter_read_cc(const struct cyclecounter *cc)
  474. {
  475. return arch_timer_read_counter();
  476. }
  477. static struct clocksource clocksource_counter = {
  478. .name = "arch_sys_counter",
  479. .rating = 400,
  480. .read = arch_counter_read,
  481. .mask = CLOCKSOURCE_MASK(56),
  482. .flags = CLOCK_SOURCE_IS_CONTINUOUS | CLOCK_SOURCE_SUSPEND_NONSTOP,
  483. };
  484. static struct cyclecounter cyclecounter = {
  485. .read = arch_counter_read_cc,
  486. .mask = CLOCKSOURCE_MASK(56),
  487. };
  488. static struct arch_timer_kvm_info arch_timer_kvm_info;
  489. struct arch_timer_kvm_info *arch_timer_get_kvm_info(void)
  490. {
  491. return &arch_timer_kvm_info;
  492. }
  493. static void __init arch_counter_register(unsigned type)
  494. {
  495. u64 start_count;
  496. /* Register the CP15 based counter if we have one */
  497. if (type & ARCH_CP15_TIMER) {
  498. if (IS_ENABLED(CONFIG_ARM64) || arch_timer_uses_ppi == VIRT_PPI)
  499. arch_timer_read_counter = arch_counter_get_cntvct;
  500. else
  501. arch_timer_read_counter = arch_counter_get_cntpct;
  502. clocksource_counter.archdata.vdso_direct = true;
  503. #ifdef CONFIG_FSL_ERRATUM_A008585
  504. /*
  505. * Don't use the vdso fastpath if errata require using
  506. * the out-of-line counter accessor.
  507. */
  508. if (static_branch_unlikely(&arch_timer_read_ool_enabled))
  509. clocksource_counter.archdata.vdso_direct = false;
  510. #endif
  511. } else {
  512. arch_timer_read_counter = arch_counter_get_cntvct_mem;
  513. }
  514. start_count = arch_timer_read_counter();
  515. clocksource_register_hz(&clocksource_counter, arch_timer_rate);
  516. cyclecounter.mult = clocksource_counter.mult;
  517. cyclecounter.shift = clocksource_counter.shift;
  518. timecounter_init(&arch_timer_kvm_info.timecounter,
  519. &cyclecounter, start_count);
  520. /* 56 bits minimum, so we assume worst case rollover */
  521. sched_clock_register(arch_timer_read_counter, 56, arch_timer_rate);
  522. }
  523. static void arch_timer_stop(struct clock_event_device *clk)
  524. {
  525. pr_debug("arch_timer_teardown disable IRQ%d cpu #%d\n",
  526. clk->irq, smp_processor_id());
  527. disable_percpu_irq(arch_timer_ppi[arch_timer_uses_ppi]);
  528. if (arch_timer_has_nonsecure_ppi())
  529. disable_percpu_irq(arch_timer_ppi[PHYS_NONSECURE_PPI]);
  530. clk->set_state_shutdown(clk);
  531. }
  532. static int arch_timer_dying_cpu(unsigned int cpu)
  533. {
  534. struct clock_event_device *clk = this_cpu_ptr(arch_timer_evt);
  535. arch_timer_stop(clk);
  536. return 0;
  537. }
  538. #ifdef CONFIG_CPU_PM
  539. static unsigned int saved_cntkctl;
  540. static int arch_timer_cpu_pm_notify(struct notifier_block *self,
  541. unsigned long action, void *hcpu)
  542. {
  543. if (action == CPU_PM_ENTER)
  544. saved_cntkctl = arch_timer_get_cntkctl();
  545. else if (action == CPU_PM_ENTER_FAILED || action == CPU_PM_EXIT)
  546. arch_timer_set_cntkctl(saved_cntkctl);
  547. return NOTIFY_OK;
  548. }
  549. static struct notifier_block arch_timer_cpu_pm_notifier = {
  550. .notifier_call = arch_timer_cpu_pm_notify,
  551. };
  552. static int __init arch_timer_cpu_pm_init(void)
  553. {
  554. return cpu_pm_register_notifier(&arch_timer_cpu_pm_notifier);
  555. }
  556. static void __init arch_timer_cpu_pm_deinit(void)
  557. {
  558. WARN_ON(cpu_pm_unregister_notifier(&arch_timer_cpu_pm_notifier));
  559. }
  560. #else
  561. static int __init arch_timer_cpu_pm_init(void)
  562. {
  563. return 0;
  564. }
  565. static void __init arch_timer_cpu_pm_deinit(void)
  566. {
  567. }
  568. #endif
  569. static int __init arch_timer_register(void)
  570. {
  571. int err;
  572. int ppi;
  573. arch_timer_evt = alloc_percpu(struct clock_event_device);
  574. if (!arch_timer_evt) {
  575. err = -ENOMEM;
  576. goto out;
  577. }
  578. ppi = arch_timer_ppi[arch_timer_uses_ppi];
  579. switch (arch_timer_uses_ppi) {
  580. case VIRT_PPI:
  581. err = request_percpu_irq(ppi, arch_timer_handler_virt,
  582. "arch_timer", arch_timer_evt);
  583. break;
  584. case PHYS_SECURE_PPI:
  585. case PHYS_NONSECURE_PPI:
  586. err = request_percpu_irq(ppi, arch_timer_handler_phys,
  587. "arch_timer", arch_timer_evt);
  588. if (!err && arch_timer_ppi[PHYS_NONSECURE_PPI]) {
  589. ppi = arch_timer_ppi[PHYS_NONSECURE_PPI];
  590. err = request_percpu_irq(ppi, arch_timer_handler_phys,
  591. "arch_timer", arch_timer_evt);
  592. if (err)
  593. free_percpu_irq(arch_timer_ppi[PHYS_SECURE_PPI],
  594. arch_timer_evt);
  595. }
  596. break;
  597. case HYP_PPI:
  598. err = request_percpu_irq(ppi, arch_timer_handler_phys,
  599. "arch_timer", arch_timer_evt);
  600. break;
  601. default:
  602. BUG();
  603. }
  604. if (err) {
  605. pr_err("arch_timer: can't register interrupt %d (%d)\n",
  606. ppi, err);
  607. goto out_free;
  608. }
  609. err = arch_timer_cpu_pm_init();
  610. if (err)
  611. goto out_unreg_notify;
  612. /* Register and immediately configure the timer on the boot CPU */
  613. err = cpuhp_setup_state(CPUHP_AP_ARM_ARCH_TIMER_STARTING,
  614. "AP_ARM_ARCH_TIMER_STARTING",
  615. arch_timer_starting_cpu, arch_timer_dying_cpu);
  616. if (err)
  617. goto out_unreg_cpupm;
  618. return 0;
  619. out_unreg_cpupm:
  620. arch_timer_cpu_pm_deinit();
  621. out_unreg_notify:
  622. free_percpu_irq(arch_timer_ppi[arch_timer_uses_ppi], arch_timer_evt);
  623. if (arch_timer_has_nonsecure_ppi())
  624. free_percpu_irq(arch_timer_ppi[PHYS_NONSECURE_PPI],
  625. arch_timer_evt);
  626. out_free:
  627. free_percpu(arch_timer_evt);
  628. out:
  629. return err;
  630. }
  631. static int __init arch_timer_mem_register(void __iomem *base, unsigned int irq)
  632. {
  633. int ret;
  634. irq_handler_t func;
  635. struct arch_timer *t;
  636. t = kzalloc(sizeof(*t), GFP_KERNEL);
  637. if (!t)
  638. return -ENOMEM;
  639. t->base = base;
  640. t->evt.irq = irq;
  641. __arch_timer_setup(ARCH_MEM_TIMER, &t->evt);
  642. if (arch_timer_mem_use_virtual)
  643. func = arch_timer_handler_virt_mem;
  644. else
  645. func = arch_timer_handler_phys_mem;
  646. ret = request_irq(irq, func, IRQF_TIMER, "arch_mem_timer", &t->evt);
  647. if (ret) {
  648. pr_err("arch_timer: Failed to request mem timer irq\n");
  649. kfree(t);
  650. }
  651. return ret;
  652. }
  653. static const struct of_device_id arch_timer_of_match[] __initconst = {
  654. { .compatible = "arm,armv7-timer", },
  655. { .compatible = "arm,armv8-timer", },
  656. {},
  657. };
  658. static const struct of_device_id arch_timer_mem_of_match[] __initconst = {
  659. { .compatible = "arm,armv7-timer-mem", },
  660. {},
  661. };
  662. static bool __init
  663. arch_timer_needs_probing(int type, const struct of_device_id *matches)
  664. {
  665. struct device_node *dn;
  666. bool needs_probing = false;
  667. dn = of_find_matching_node(NULL, matches);
  668. if (dn && of_device_is_available(dn) && !(arch_timers_present & type))
  669. needs_probing = true;
  670. of_node_put(dn);
  671. return needs_probing;
  672. }
  673. static int __init arch_timer_common_init(void)
  674. {
  675. unsigned mask = ARCH_CP15_TIMER | ARCH_MEM_TIMER;
  676. /* Wait until both nodes are probed if we have two timers */
  677. if ((arch_timers_present & mask) != mask) {
  678. if (arch_timer_needs_probing(ARCH_MEM_TIMER, arch_timer_mem_of_match))
  679. return 0;
  680. if (arch_timer_needs_probing(ARCH_CP15_TIMER, arch_timer_of_match))
  681. return 0;
  682. }
  683. arch_timer_banner(arch_timers_present);
  684. arch_counter_register(arch_timers_present);
  685. return arch_timer_arch_init();
  686. }
  687. static int __init arch_timer_init(void)
  688. {
  689. int ret;
  690. /*
  691. * If HYP mode is available, we know that the physical timer
  692. * has been configured to be accessible from PL1. Use it, so
  693. * that a guest can use the virtual timer instead.
  694. *
  695. * If no interrupt provided for virtual timer, we'll have to
  696. * stick to the physical timer. It'd better be accessible...
  697. *
  698. * On ARMv8.1 with VH extensions, the kernel runs in HYP. VHE
  699. * accesses to CNTP_*_EL1 registers are silently redirected to
  700. * their CNTHP_*_EL2 counterparts, and use a different PPI
  701. * number.
  702. */
  703. if (is_hyp_mode_available() || !arch_timer_ppi[VIRT_PPI]) {
  704. bool has_ppi;
  705. if (is_kernel_in_hyp_mode()) {
  706. arch_timer_uses_ppi = HYP_PPI;
  707. has_ppi = !!arch_timer_ppi[HYP_PPI];
  708. } else {
  709. arch_timer_uses_ppi = PHYS_SECURE_PPI;
  710. has_ppi = (!!arch_timer_ppi[PHYS_SECURE_PPI] ||
  711. !!arch_timer_ppi[PHYS_NONSECURE_PPI]);
  712. }
  713. if (!has_ppi) {
  714. pr_warn("arch_timer: No interrupt available, giving up\n");
  715. return -EINVAL;
  716. }
  717. }
  718. ret = arch_timer_register();
  719. if (ret)
  720. return ret;
  721. ret = arch_timer_common_init();
  722. if (ret)
  723. return ret;
  724. arch_timer_kvm_info.virtual_irq = arch_timer_ppi[VIRT_PPI];
  725. return 0;
  726. }
  727. static int __init arch_timer_of_init(struct device_node *np)
  728. {
  729. int i;
  730. if (arch_timers_present & ARCH_CP15_TIMER) {
  731. pr_warn("arch_timer: multiple nodes in dt, skipping\n");
  732. return 0;
  733. }
  734. arch_timers_present |= ARCH_CP15_TIMER;
  735. for (i = PHYS_SECURE_PPI; i < MAX_TIMER_PPI; i++)
  736. arch_timer_ppi[i] = irq_of_parse_and_map(np, i);
  737. arch_timer_detect_rate(NULL, np);
  738. arch_timer_c3stop = !of_property_read_bool(np, "always-on");
  739. #ifdef CONFIG_FSL_ERRATUM_A008585
  740. if (fsl_a008585_enable < 0)
  741. fsl_a008585_enable = of_property_read_bool(np, "fsl,erratum-a008585");
  742. if (fsl_a008585_enable) {
  743. static_branch_enable(&arch_timer_read_ool_enabled);
  744. pr_info("Enabling workaround for FSL erratum A-008585\n");
  745. }
  746. #endif
  747. /*
  748. * If we cannot rely on firmware initializing the timer registers then
  749. * we should use the physical timers instead.
  750. */
  751. if (IS_ENABLED(CONFIG_ARM) &&
  752. of_property_read_bool(np, "arm,cpu-registers-not-fw-configured"))
  753. arch_timer_uses_ppi = PHYS_SECURE_PPI;
  754. return arch_timer_init();
  755. }
  756. CLOCKSOURCE_OF_DECLARE(armv7_arch_timer, "arm,armv7-timer", arch_timer_of_init);
  757. CLOCKSOURCE_OF_DECLARE(armv8_arch_timer, "arm,armv8-timer", arch_timer_of_init);
  758. static int __init arch_timer_mem_init(struct device_node *np)
  759. {
  760. struct device_node *frame, *best_frame = NULL;
  761. void __iomem *cntctlbase, *base;
  762. unsigned int irq, ret = -EINVAL;
  763. u32 cnttidr;
  764. arch_timers_present |= ARCH_MEM_TIMER;
  765. cntctlbase = of_iomap(np, 0);
  766. if (!cntctlbase) {
  767. pr_err("arch_timer: Can't find CNTCTLBase\n");
  768. return -ENXIO;
  769. }
  770. cnttidr = readl_relaxed(cntctlbase + CNTTIDR);
  771. /*
  772. * Try to find a virtual capable frame. Otherwise fall back to a
  773. * physical capable frame.
  774. */
  775. for_each_available_child_of_node(np, frame) {
  776. int n;
  777. u32 cntacr;
  778. if (of_property_read_u32(frame, "frame-number", &n)) {
  779. pr_err("arch_timer: Missing frame-number\n");
  780. of_node_put(frame);
  781. goto out;
  782. }
  783. /* Try enabling everything, and see what sticks */
  784. cntacr = CNTACR_RFRQ | CNTACR_RWPT | CNTACR_RPCT |
  785. CNTACR_RWVT | CNTACR_RVOFF | CNTACR_RVCT;
  786. writel_relaxed(cntacr, cntctlbase + CNTACR(n));
  787. cntacr = readl_relaxed(cntctlbase + CNTACR(n));
  788. if ((cnttidr & CNTTIDR_VIRT(n)) &&
  789. !(~cntacr & (CNTACR_RWVT | CNTACR_RVCT))) {
  790. of_node_put(best_frame);
  791. best_frame = frame;
  792. arch_timer_mem_use_virtual = true;
  793. break;
  794. }
  795. if (~cntacr & (CNTACR_RWPT | CNTACR_RPCT))
  796. continue;
  797. of_node_put(best_frame);
  798. best_frame = of_node_get(frame);
  799. }
  800. ret= -ENXIO;
  801. base = arch_counter_base = of_iomap(best_frame, 0);
  802. if (!base) {
  803. pr_err("arch_timer: Can't map frame's registers\n");
  804. goto out;
  805. }
  806. if (arch_timer_mem_use_virtual)
  807. irq = irq_of_parse_and_map(best_frame, 1);
  808. else
  809. irq = irq_of_parse_and_map(best_frame, 0);
  810. ret = -EINVAL;
  811. if (!irq) {
  812. pr_err("arch_timer: Frame missing %s irq",
  813. arch_timer_mem_use_virtual ? "virt" : "phys");
  814. goto out;
  815. }
  816. arch_timer_detect_rate(base, np);
  817. ret = arch_timer_mem_register(base, irq);
  818. if (ret)
  819. goto out;
  820. return arch_timer_common_init();
  821. out:
  822. iounmap(cntctlbase);
  823. of_node_put(best_frame);
  824. return ret;
  825. }
  826. CLOCKSOURCE_OF_DECLARE(armv7_arch_timer_mem, "arm,armv7-timer-mem",
  827. arch_timer_mem_init);
  828. #ifdef CONFIG_ACPI
  829. static int __init map_generic_timer_interrupt(u32 interrupt, u32 flags)
  830. {
  831. int trigger, polarity;
  832. if (!interrupt)
  833. return 0;
  834. trigger = (flags & ACPI_GTDT_INTERRUPT_MODE) ? ACPI_EDGE_SENSITIVE
  835. : ACPI_LEVEL_SENSITIVE;
  836. polarity = (flags & ACPI_GTDT_INTERRUPT_POLARITY) ? ACPI_ACTIVE_LOW
  837. : ACPI_ACTIVE_HIGH;
  838. return acpi_register_gsi(NULL, interrupt, trigger, polarity);
  839. }
  840. /* Initialize per-processor generic timer */
  841. static int __init arch_timer_acpi_init(struct acpi_table_header *table)
  842. {
  843. struct acpi_table_gtdt *gtdt;
  844. if (arch_timers_present & ARCH_CP15_TIMER) {
  845. pr_warn("arch_timer: already initialized, skipping\n");
  846. return -EINVAL;
  847. }
  848. gtdt = container_of(table, struct acpi_table_gtdt, header);
  849. arch_timers_present |= ARCH_CP15_TIMER;
  850. arch_timer_ppi[PHYS_SECURE_PPI] =
  851. map_generic_timer_interrupt(gtdt->secure_el1_interrupt,
  852. gtdt->secure_el1_flags);
  853. arch_timer_ppi[PHYS_NONSECURE_PPI] =
  854. map_generic_timer_interrupt(gtdt->non_secure_el1_interrupt,
  855. gtdt->non_secure_el1_flags);
  856. arch_timer_ppi[VIRT_PPI] =
  857. map_generic_timer_interrupt(gtdt->virtual_timer_interrupt,
  858. gtdt->virtual_timer_flags);
  859. arch_timer_ppi[HYP_PPI] =
  860. map_generic_timer_interrupt(gtdt->non_secure_el2_interrupt,
  861. gtdt->non_secure_el2_flags);
  862. /* Get the frequency from CNTFRQ */
  863. arch_timer_detect_rate(NULL, NULL);
  864. /* Always-on capability */
  865. arch_timer_c3stop = !(gtdt->non_secure_el1_flags & ACPI_GTDT_ALWAYS_ON);
  866. arch_timer_init();
  867. return 0;
  868. }
  869. CLOCKSOURCE_ACPI_DECLARE(arch_timer, ACPI_SIG_GTDT, arch_timer_acpi_init);
  870. #endif