hpet.c 28 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227
  1. #include <linux/clocksource.h>
  2. #include <linux/clockchips.h>
  3. #include <linux/interrupt.h>
  4. #include <linux/export.h>
  5. #include <linux/delay.h>
  6. #include <linux/errno.h>
  7. #include <linux/i8253.h>
  8. #include <linux/slab.h>
  9. #include <linux/hpet.h>
  10. #include <linux/init.h>
  11. #include <linux/cpu.h>
  12. #include <linux/pm.h>
  13. #include <linux/io.h>
  14. #include <asm/irqdomain.h>
  15. #include <asm/fixmap.h>
  16. #include <asm/hpet.h>
  17. #include <asm/time.h>
  18. #define HPET_MASK CLOCKSOURCE_MASK(32)
  19. /* FSEC = 10^-15
  20. NSEC = 10^-9 */
  21. #define FSEC_PER_NSEC 1000000L
  22. #define HPET_DEV_USED_BIT 2
  23. #define HPET_DEV_USED (1 << HPET_DEV_USED_BIT)
  24. #define HPET_DEV_VALID 0x8
  25. #define HPET_DEV_FSB_CAP 0x1000
  26. #define HPET_DEV_PERI_CAP 0x2000
  27. #define HPET_MIN_CYCLES 128
  28. #define HPET_MIN_PROG_DELTA (HPET_MIN_CYCLES + (HPET_MIN_CYCLES >> 1))
  29. /*
  30. * HPET address is set in acpi/boot.c, when an ACPI entry exists
  31. */
  32. unsigned long hpet_address;
  33. u8 hpet_blockid; /* OS timer block num */
  34. u8 hpet_msi_disable;
  35. #ifdef CONFIG_PCI_MSI
  36. static unsigned long hpet_num_timers;
  37. #endif
  38. static void __iomem *hpet_virt_address;
  39. struct hpet_dev {
  40. struct clock_event_device evt;
  41. unsigned int num;
  42. int cpu;
  43. unsigned int irq;
  44. unsigned int flags;
  45. char name[10];
  46. };
  47. inline struct hpet_dev *EVT_TO_HPET_DEV(struct clock_event_device *evtdev)
  48. {
  49. return container_of(evtdev, struct hpet_dev, evt);
  50. }
  51. inline unsigned int hpet_readl(unsigned int a)
  52. {
  53. return readl(hpet_virt_address + a);
  54. }
  55. static inline void hpet_writel(unsigned int d, unsigned int a)
  56. {
  57. writel(d, hpet_virt_address + a);
  58. }
  59. #ifdef CONFIG_X86_64
  60. #include <asm/pgtable.h>
  61. #endif
  62. static inline void hpet_set_mapping(void)
  63. {
  64. hpet_virt_address = ioremap_nocache(hpet_address, HPET_MMAP_SIZE);
  65. }
  66. static inline void hpet_clear_mapping(void)
  67. {
  68. iounmap(hpet_virt_address);
  69. hpet_virt_address = NULL;
  70. }
  71. /*
  72. * HPET command line enable / disable
  73. */
  74. int boot_hpet_disable;
  75. int hpet_force_user;
  76. static int hpet_verbose;
  77. static int __init hpet_setup(char *str)
  78. {
  79. while (str) {
  80. char *next = strchr(str, ',');
  81. if (next)
  82. *next++ = 0;
  83. if (!strncmp("disable", str, 7))
  84. boot_hpet_disable = 1;
  85. if (!strncmp("force", str, 5))
  86. hpet_force_user = 1;
  87. if (!strncmp("verbose", str, 7))
  88. hpet_verbose = 1;
  89. str = next;
  90. }
  91. return 1;
  92. }
  93. __setup("hpet=", hpet_setup);
  94. static int __init disable_hpet(char *str)
  95. {
  96. boot_hpet_disable = 1;
  97. return 1;
  98. }
  99. __setup("nohpet", disable_hpet);
  100. static inline int is_hpet_capable(void)
  101. {
  102. return !boot_hpet_disable && hpet_address;
  103. }
  104. /*
  105. * HPET timer interrupt enable / disable
  106. */
  107. static int hpet_legacy_int_enabled;
  108. /**
  109. * is_hpet_enabled - check whether the hpet timer interrupt is enabled
  110. */
  111. int is_hpet_enabled(void)
  112. {
  113. return is_hpet_capable() && hpet_legacy_int_enabled;
  114. }
  115. EXPORT_SYMBOL_GPL(is_hpet_enabled);
  116. static void _hpet_print_config(const char *function, int line)
  117. {
  118. u32 i, timers, l, h;
  119. printk(KERN_INFO "hpet: %s(%d):\n", function, line);
  120. l = hpet_readl(HPET_ID);
  121. h = hpet_readl(HPET_PERIOD);
  122. timers = ((l & HPET_ID_NUMBER) >> HPET_ID_NUMBER_SHIFT) + 1;
  123. printk(KERN_INFO "hpet: ID: 0x%x, PERIOD: 0x%x\n", l, h);
  124. l = hpet_readl(HPET_CFG);
  125. h = hpet_readl(HPET_STATUS);
  126. printk(KERN_INFO "hpet: CFG: 0x%x, STATUS: 0x%x\n", l, h);
  127. l = hpet_readl(HPET_COUNTER);
  128. h = hpet_readl(HPET_COUNTER+4);
  129. printk(KERN_INFO "hpet: COUNTER_l: 0x%x, COUNTER_h: 0x%x\n", l, h);
  130. for (i = 0; i < timers; i++) {
  131. l = hpet_readl(HPET_Tn_CFG(i));
  132. h = hpet_readl(HPET_Tn_CFG(i)+4);
  133. printk(KERN_INFO "hpet: T%d: CFG_l: 0x%x, CFG_h: 0x%x\n",
  134. i, l, h);
  135. l = hpet_readl(HPET_Tn_CMP(i));
  136. h = hpet_readl(HPET_Tn_CMP(i)+4);
  137. printk(KERN_INFO "hpet: T%d: CMP_l: 0x%x, CMP_h: 0x%x\n",
  138. i, l, h);
  139. l = hpet_readl(HPET_Tn_ROUTE(i));
  140. h = hpet_readl(HPET_Tn_ROUTE(i)+4);
  141. printk(KERN_INFO "hpet: T%d ROUTE_l: 0x%x, ROUTE_h: 0x%x\n",
  142. i, l, h);
  143. }
  144. }
  145. #define hpet_print_config() \
  146. do { \
  147. if (hpet_verbose) \
  148. _hpet_print_config(__func__, __LINE__); \
  149. } while (0)
  150. /*
  151. * When the hpet driver (/dev/hpet) is enabled, we need to reserve
  152. * timer 0 and timer 1 in case of RTC emulation.
  153. */
  154. #ifdef CONFIG_HPET
  155. static void hpet_reserve_msi_timers(struct hpet_data *hd);
  156. static void hpet_reserve_platform_timers(unsigned int id)
  157. {
  158. struct hpet __iomem *hpet = hpet_virt_address;
  159. struct hpet_timer __iomem *timer = &hpet->hpet_timers[2];
  160. unsigned int nrtimers, i;
  161. struct hpet_data hd;
  162. nrtimers = ((id & HPET_ID_NUMBER) >> HPET_ID_NUMBER_SHIFT) + 1;
  163. memset(&hd, 0, sizeof(hd));
  164. hd.hd_phys_address = hpet_address;
  165. hd.hd_address = hpet;
  166. hd.hd_nirqs = nrtimers;
  167. hpet_reserve_timer(&hd, 0);
  168. #ifdef CONFIG_HPET_EMULATE_RTC
  169. hpet_reserve_timer(&hd, 1);
  170. #endif
  171. /*
  172. * NOTE that hd_irq[] reflects IOAPIC input pins (LEGACY_8254
  173. * is wrong for i8259!) not the output IRQ. Many BIOS writers
  174. * don't bother configuring *any* comparator interrupts.
  175. */
  176. hd.hd_irq[0] = HPET_LEGACY_8254;
  177. hd.hd_irq[1] = HPET_LEGACY_RTC;
  178. for (i = 2; i < nrtimers; timer++, i++) {
  179. hd.hd_irq[i] = (readl(&timer->hpet_config) &
  180. Tn_INT_ROUTE_CNF_MASK) >> Tn_INT_ROUTE_CNF_SHIFT;
  181. }
  182. hpet_reserve_msi_timers(&hd);
  183. hpet_alloc(&hd);
  184. }
  185. #else
  186. static void hpet_reserve_platform_timers(unsigned int id) { }
  187. #endif
  188. /*
  189. * Common hpet info
  190. */
  191. static unsigned long hpet_freq;
  192. static void hpet_legacy_set_mode(enum clock_event_mode mode,
  193. struct clock_event_device *evt);
  194. static int hpet_legacy_next_event(unsigned long delta,
  195. struct clock_event_device *evt);
  196. /*
  197. * The hpet clock event device
  198. */
  199. static struct clock_event_device hpet_clockevent = {
  200. .name = "hpet",
  201. .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
  202. .set_mode = hpet_legacy_set_mode,
  203. .set_next_event = hpet_legacy_next_event,
  204. .irq = 0,
  205. .rating = 50,
  206. };
  207. static void hpet_stop_counter(void)
  208. {
  209. unsigned long cfg = hpet_readl(HPET_CFG);
  210. cfg &= ~HPET_CFG_ENABLE;
  211. hpet_writel(cfg, HPET_CFG);
  212. }
  213. static void hpet_reset_counter(void)
  214. {
  215. hpet_writel(0, HPET_COUNTER);
  216. hpet_writel(0, HPET_COUNTER + 4);
  217. }
  218. static void hpet_start_counter(void)
  219. {
  220. unsigned int cfg = hpet_readl(HPET_CFG);
  221. cfg |= HPET_CFG_ENABLE;
  222. hpet_writel(cfg, HPET_CFG);
  223. }
  224. static void hpet_restart_counter(void)
  225. {
  226. hpet_stop_counter();
  227. hpet_reset_counter();
  228. hpet_start_counter();
  229. }
  230. static void hpet_resume_device(void)
  231. {
  232. force_hpet_resume();
  233. }
  234. static void hpet_resume_counter(struct clocksource *cs)
  235. {
  236. hpet_resume_device();
  237. hpet_restart_counter();
  238. }
  239. static void hpet_enable_legacy_int(void)
  240. {
  241. unsigned int cfg = hpet_readl(HPET_CFG);
  242. cfg |= HPET_CFG_LEGACY;
  243. hpet_writel(cfg, HPET_CFG);
  244. hpet_legacy_int_enabled = 1;
  245. }
  246. static void hpet_legacy_clockevent_register(void)
  247. {
  248. /* Start HPET legacy interrupts */
  249. hpet_enable_legacy_int();
  250. /*
  251. * Start hpet with the boot cpu mask and make it
  252. * global after the IO_APIC has been initialized.
  253. */
  254. hpet_clockevent.cpumask = cpumask_of(smp_processor_id());
  255. clockevents_config_and_register(&hpet_clockevent, hpet_freq,
  256. HPET_MIN_PROG_DELTA, 0x7FFFFFFF);
  257. global_clock_event = &hpet_clockevent;
  258. printk(KERN_DEBUG "hpet clockevent registered\n");
  259. }
  260. static void hpet_set_mode(enum clock_event_mode mode,
  261. struct clock_event_device *evt, int timer)
  262. {
  263. unsigned int cfg, cmp, now;
  264. uint64_t delta;
  265. switch (mode) {
  266. case CLOCK_EVT_MODE_PERIODIC:
  267. hpet_stop_counter();
  268. delta = ((uint64_t)(NSEC_PER_SEC/HZ)) * evt->mult;
  269. delta >>= evt->shift;
  270. now = hpet_readl(HPET_COUNTER);
  271. cmp = now + (unsigned int) delta;
  272. cfg = hpet_readl(HPET_Tn_CFG(timer));
  273. cfg |= HPET_TN_ENABLE | HPET_TN_PERIODIC |
  274. HPET_TN_SETVAL | HPET_TN_32BIT;
  275. hpet_writel(cfg, HPET_Tn_CFG(timer));
  276. hpet_writel(cmp, HPET_Tn_CMP(timer));
  277. udelay(1);
  278. /*
  279. * HPET on AMD 81xx needs a second write (with HPET_TN_SETVAL
  280. * cleared) to T0_CMP to set the period. The HPET_TN_SETVAL
  281. * bit is automatically cleared after the first write.
  282. * (See AMD-8111 HyperTransport I/O Hub Data Sheet,
  283. * Publication # 24674)
  284. */
  285. hpet_writel((unsigned int) delta, HPET_Tn_CMP(timer));
  286. hpet_start_counter();
  287. hpet_print_config();
  288. break;
  289. case CLOCK_EVT_MODE_ONESHOT:
  290. cfg = hpet_readl(HPET_Tn_CFG(timer));
  291. cfg &= ~HPET_TN_PERIODIC;
  292. cfg |= HPET_TN_ENABLE | HPET_TN_32BIT;
  293. hpet_writel(cfg, HPET_Tn_CFG(timer));
  294. break;
  295. case CLOCK_EVT_MODE_UNUSED:
  296. case CLOCK_EVT_MODE_SHUTDOWN:
  297. cfg = hpet_readl(HPET_Tn_CFG(timer));
  298. cfg &= ~HPET_TN_ENABLE;
  299. hpet_writel(cfg, HPET_Tn_CFG(timer));
  300. break;
  301. case CLOCK_EVT_MODE_RESUME:
  302. if (timer == 0) {
  303. hpet_enable_legacy_int();
  304. } else {
  305. struct hpet_dev *hdev = EVT_TO_HPET_DEV(evt);
  306. irq_domain_activate_irq(irq_get_irq_data(hdev->irq));
  307. disable_irq(hdev->irq);
  308. irq_set_affinity(hdev->irq, cpumask_of(hdev->cpu));
  309. enable_irq(hdev->irq);
  310. }
  311. hpet_print_config();
  312. break;
  313. }
  314. }
  315. static int hpet_next_event(unsigned long delta,
  316. struct clock_event_device *evt, int timer)
  317. {
  318. u32 cnt;
  319. s32 res;
  320. cnt = hpet_readl(HPET_COUNTER);
  321. cnt += (u32) delta;
  322. hpet_writel(cnt, HPET_Tn_CMP(timer));
  323. /*
  324. * HPETs are a complete disaster. The compare register is
  325. * based on a equal comparison and neither provides a less
  326. * than or equal functionality (which would require to take
  327. * the wraparound into account) nor a simple count down event
  328. * mode. Further the write to the comparator register is
  329. * delayed internally up to two HPET clock cycles in certain
  330. * chipsets (ATI, ICH9,10). Some newer AMD chipsets have even
  331. * longer delays. We worked around that by reading back the
  332. * compare register, but that required another workaround for
  333. * ICH9,10 chips where the first readout after write can
  334. * return the old stale value. We already had a minimum
  335. * programming delta of 5us enforced, but a NMI or SMI hitting
  336. * between the counter readout and the comparator write can
  337. * move us behind that point easily. Now instead of reading
  338. * the compare register back several times, we make the ETIME
  339. * decision based on the following: Return ETIME if the
  340. * counter value after the write is less than HPET_MIN_CYCLES
  341. * away from the event or if the counter is already ahead of
  342. * the event. The minimum programming delta for the generic
  343. * clockevents code is set to 1.5 * HPET_MIN_CYCLES.
  344. */
  345. res = (s32)(cnt - hpet_readl(HPET_COUNTER));
  346. return res < HPET_MIN_CYCLES ? -ETIME : 0;
  347. }
  348. static void hpet_legacy_set_mode(enum clock_event_mode mode,
  349. struct clock_event_device *evt)
  350. {
  351. hpet_set_mode(mode, evt, 0);
  352. }
  353. static int hpet_legacy_next_event(unsigned long delta,
  354. struct clock_event_device *evt)
  355. {
  356. return hpet_next_event(delta, evt, 0);
  357. }
  358. /*
  359. * HPET MSI Support
  360. */
  361. #ifdef CONFIG_PCI_MSI
  362. static DEFINE_PER_CPU(struct hpet_dev *, cpu_hpet_dev);
  363. static struct hpet_dev *hpet_devs;
  364. static struct irq_domain *hpet_domain;
  365. void hpet_msi_unmask(struct irq_data *data)
  366. {
  367. struct hpet_dev *hdev = data->handler_data;
  368. unsigned int cfg;
  369. /* unmask it */
  370. cfg = hpet_readl(HPET_Tn_CFG(hdev->num));
  371. cfg |= HPET_TN_ENABLE | HPET_TN_FSB;
  372. hpet_writel(cfg, HPET_Tn_CFG(hdev->num));
  373. }
  374. void hpet_msi_mask(struct irq_data *data)
  375. {
  376. struct hpet_dev *hdev = data->handler_data;
  377. unsigned int cfg;
  378. /* mask it */
  379. cfg = hpet_readl(HPET_Tn_CFG(hdev->num));
  380. cfg &= ~(HPET_TN_ENABLE | HPET_TN_FSB);
  381. hpet_writel(cfg, HPET_Tn_CFG(hdev->num));
  382. }
  383. void hpet_msi_write(struct hpet_dev *hdev, struct msi_msg *msg)
  384. {
  385. hpet_writel(msg->data, HPET_Tn_ROUTE(hdev->num));
  386. hpet_writel(msg->address_lo, HPET_Tn_ROUTE(hdev->num) + 4);
  387. }
  388. void hpet_msi_read(struct hpet_dev *hdev, struct msi_msg *msg)
  389. {
  390. msg->data = hpet_readl(HPET_Tn_ROUTE(hdev->num));
  391. msg->address_lo = hpet_readl(HPET_Tn_ROUTE(hdev->num) + 4);
  392. msg->address_hi = 0;
  393. }
  394. static void hpet_msi_set_mode(enum clock_event_mode mode,
  395. struct clock_event_device *evt)
  396. {
  397. struct hpet_dev *hdev = EVT_TO_HPET_DEV(evt);
  398. hpet_set_mode(mode, evt, hdev->num);
  399. }
  400. static int hpet_msi_next_event(unsigned long delta,
  401. struct clock_event_device *evt)
  402. {
  403. struct hpet_dev *hdev = EVT_TO_HPET_DEV(evt);
  404. return hpet_next_event(delta, evt, hdev->num);
  405. }
  406. static irqreturn_t hpet_interrupt_handler(int irq, void *data)
  407. {
  408. struct hpet_dev *dev = (struct hpet_dev *)data;
  409. struct clock_event_device *hevt = &dev->evt;
  410. if (!hevt->event_handler) {
  411. printk(KERN_INFO "Spurious HPET timer interrupt on HPET timer %d\n",
  412. dev->num);
  413. return IRQ_HANDLED;
  414. }
  415. hevt->event_handler(hevt);
  416. return IRQ_HANDLED;
  417. }
  418. static int hpet_setup_irq(struct hpet_dev *dev)
  419. {
  420. if (request_irq(dev->irq, hpet_interrupt_handler,
  421. IRQF_TIMER | IRQF_NOBALANCING,
  422. dev->name, dev))
  423. return -1;
  424. disable_irq(dev->irq);
  425. irq_set_affinity(dev->irq, cpumask_of(dev->cpu));
  426. enable_irq(dev->irq);
  427. printk(KERN_DEBUG "hpet: %s irq %d for MSI\n",
  428. dev->name, dev->irq);
  429. return 0;
  430. }
  431. /* This should be called in specific @cpu */
  432. static void init_one_hpet_msi_clockevent(struct hpet_dev *hdev, int cpu)
  433. {
  434. struct clock_event_device *evt = &hdev->evt;
  435. WARN_ON(cpu != smp_processor_id());
  436. if (!(hdev->flags & HPET_DEV_VALID))
  437. return;
  438. hdev->cpu = cpu;
  439. per_cpu(cpu_hpet_dev, cpu) = hdev;
  440. evt->name = hdev->name;
  441. hpet_setup_irq(hdev);
  442. evt->irq = hdev->irq;
  443. evt->rating = 110;
  444. evt->features = CLOCK_EVT_FEAT_ONESHOT;
  445. if (hdev->flags & HPET_DEV_PERI_CAP)
  446. evt->features |= CLOCK_EVT_FEAT_PERIODIC;
  447. evt->set_mode = hpet_msi_set_mode;
  448. evt->set_next_event = hpet_msi_next_event;
  449. evt->cpumask = cpumask_of(hdev->cpu);
  450. clockevents_config_and_register(evt, hpet_freq, HPET_MIN_PROG_DELTA,
  451. 0x7FFFFFFF);
  452. }
  453. #ifdef CONFIG_HPET
  454. /* Reserve at least one timer for userspace (/dev/hpet) */
  455. #define RESERVE_TIMERS 1
  456. #else
  457. #define RESERVE_TIMERS 0
  458. #endif
  459. static void hpet_msi_capability_lookup(unsigned int start_timer)
  460. {
  461. unsigned int id;
  462. unsigned int num_timers;
  463. unsigned int num_timers_used = 0;
  464. int i, irq;
  465. if (hpet_msi_disable)
  466. return;
  467. if (boot_cpu_has(X86_FEATURE_ARAT))
  468. return;
  469. id = hpet_readl(HPET_ID);
  470. num_timers = ((id & HPET_ID_NUMBER) >> HPET_ID_NUMBER_SHIFT);
  471. num_timers++; /* Value read out starts from 0 */
  472. hpet_print_config();
  473. hpet_domain = hpet_create_irq_domain(hpet_blockid);
  474. if (!hpet_domain)
  475. return;
  476. hpet_devs = kzalloc(sizeof(struct hpet_dev) * num_timers, GFP_KERNEL);
  477. if (!hpet_devs)
  478. return;
  479. hpet_num_timers = num_timers;
  480. for (i = start_timer; i < num_timers - RESERVE_TIMERS; i++) {
  481. struct hpet_dev *hdev = &hpet_devs[num_timers_used];
  482. unsigned int cfg = hpet_readl(HPET_Tn_CFG(i));
  483. /* Only consider HPET timer with MSI support */
  484. if (!(cfg & HPET_TN_FSB_CAP))
  485. continue;
  486. hdev->flags = 0;
  487. if (cfg & HPET_TN_PERIODIC_CAP)
  488. hdev->flags |= HPET_DEV_PERI_CAP;
  489. sprintf(hdev->name, "hpet%d", i);
  490. hdev->num = i;
  491. irq = hpet_assign_irq(hpet_domain, hdev, hdev->num);
  492. if (irq <= 0)
  493. continue;
  494. hdev->irq = irq;
  495. hdev->flags |= HPET_DEV_FSB_CAP;
  496. hdev->flags |= HPET_DEV_VALID;
  497. num_timers_used++;
  498. if (num_timers_used == num_possible_cpus())
  499. break;
  500. }
  501. printk(KERN_INFO "HPET: %d timers in total, %d timers will be used for per-cpu timer\n",
  502. num_timers, num_timers_used);
  503. }
  504. #ifdef CONFIG_HPET
  505. static void hpet_reserve_msi_timers(struct hpet_data *hd)
  506. {
  507. int i;
  508. if (!hpet_devs)
  509. return;
  510. for (i = 0; i < hpet_num_timers; i++) {
  511. struct hpet_dev *hdev = &hpet_devs[i];
  512. if (!(hdev->flags & HPET_DEV_VALID))
  513. continue;
  514. hd->hd_irq[hdev->num] = hdev->irq;
  515. hpet_reserve_timer(hd, hdev->num);
  516. }
  517. }
  518. #endif
  519. static struct hpet_dev *hpet_get_unused_timer(void)
  520. {
  521. int i;
  522. if (!hpet_devs)
  523. return NULL;
  524. for (i = 0; i < hpet_num_timers; i++) {
  525. struct hpet_dev *hdev = &hpet_devs[i];
  526. if (!(hdev->flags & HPET_DEV_VALID))
  527. continue;
  528. if (test_and_set_bit(HPET_DEV_USED_BIT,
  529. (unsigned long *)&hdev->flags))
  530. continue;
  531. return hdev;
  532. }
  533. return NULL;
  534. }
  535. struct hpet_work_struct {
  536. struct delayed_work work;
  537. struct completion complete;
  538. };
  539. static void hpet_work(struct work_struct *w)
  540. {
  541. struct hpet_dev *hdev;
  542. int cpu = smp_processor_id();
  543. struct hpet_work_struct *hpet_work;
  544. hpet_work = container_of(w, struct hpet_work_struct, work.work);
  545. hdev = hpet_get_unused_timer();
  546. if (hdev)
  547. init_one_hpet_msi_clockevent(hdev, cpu);
  548. complete(&hpet_work->complete);
  549. }
  550. static int hpet_cpuhp_notify(struct notifier_block *n,
  551. unsigned long action, void *hcpu)
  552. {
  553. unsigned long cpu = (unsigned long)hcpu;
  554. struct hpet_work_struct work;
  555. struct hpet_dev *hdev = per_cpu(cpu_hpet_dev, cpu);
  556. switch (action & 0xf) {
  557. case CPU_ONLINE:
  558. INIT_DELAYED_WORK_ONSTACK(&work.work, hpet_work);
  559. init_completion(&work.complete);
  560. /* FIXME: add schedule_work_on() */
  561. schedule_delayed_work_on(cpu, &work.work, 0);
  562. wait_for_completion(&work.complete);
  563. destroy_delayed_work_on_stack(&work.work);
  564. break;
  565. case CPU_DEAD:
  566. if (hdev) {
  567. free_irq(hdev->irq, hdev);
  568. hdev->flags &= ~HPET_DEV_USED;
  569. per_cpu(cpu_hpet_dev, cpu) = NULL;
  570. }
  571. break;
  572. }
  573. return NOTIFY_OK;
  574. }
  575. #else
  576. static void hpet_msi_capability_lookup(unsigned int start_timer)
  577. {
  578. return;
  579. }
  580. #ifdef CONFIG_HPET
  581. static void hpet_reserve_msi_timers(struct hpet_data *hd)
  582. {
  583. return;
  584. }
  585. #endif
  586. static int hpet_cpuhp_notify(struct notifier_block *n,
  587. unsigned long action, void *hcpu)
  588. {
  589. return NOTIFY_OK;
  590. }
  591. #endif
  592. /*
  593. * Clock source related code
  594. */
  595. static cycle_t read_hpet(struct clocksource *cs)
  596. {
  597. return (cycle_t)hpet_readl(HPET_COUNTER);
  598. }
  599. static struct clocksource clocksource_hpet = {
  600. .name = "hpet",
  601. .rating = 250,
  602. .read = read_hpet,
  603. .mask = HPET_MASK,
  604. .flags = CLOCK_SOURCE_IS_CONTINUOUS,
  605. .resume = hpet_resume_counter,
  606. .archdata = { .vclock_mode = VCLOCK_HPET },
  607. };
  608. static int hpet_clocksource_register(void)
  609. {
  610. u64 start, now;
  611. cycle_t t1;
  612. /* Start the counter */
  613. hpet_restart_counter();
  614. /* Verify whether hpet counter works */
  615. t1 = hpet_readl(HPET_COUNTER);
  616. rdtscll(start);
  617. /*
  618. * We don't know the TSC frequency yet, but waiting for
  619. * 200000 TSC cycles is safe:
  620. * 4 GHz == 50us
  621. * 1 GHz == 200us
  622. */
  623. do {
  624. rep_nop();
  625. rdtscll(now);
  626. } while ((now - start) < 200000UL);
  627. if (t1 == hpet_readl(HPET_COUNTER)) {
  628. printk(KERN_WARNING
  629. "HPET counter not counting. HPET disabled\n");
  630. return -ENODEV;
  631. }
  632. clocksource_register_hz(&clocksource_hpet, (u32)hpet_freq);
  633. return 0;
  634. }
  635. static u32 *hpet_boot_cfg;
  636. /**
  637. * hpet_enable - Try to setup the HPET timer. Returns 1 on success.
  638. */
  639. int __init hpet_enable(void)
  640. {
  641. u32 hpet_period, cfg, id;
  642. u64 freq;
  643. unsigned int i, last;
  644. if (!is_hpet_capable())
  645. return 0;
  646. hpet_set_mapping();
  647. /*
  648. * Read the period and check for a sane value:
  649. */
  650. hpet_period = hpet_readl(HPET_PERIOD);
  651. /*
  652. * AMD SB700 based systems with spread spectrum enabled use a
  653. * SMM based HPET emulation to provide proper frequency
  654. * setting. The SMM code is initialized with the first HPET
  655. * register access and takes some time to complete. During
  656. * this time the config register reads 0xffffffff. We check
  657. * for max. 1000 loops whether the config register reads a non
  658. * 0xffffffff value to make sure that HPET is up and running
  659. * before we go further. A counting loop is safe, as the HPET
  660. * access takes thousands of CPU cycles. On non SB700 based
  661. * machines this check is only done once and has no side
  662. * effects.
  663. */
  664. for (i = 0; hpet_readl(HPET_CFG) == 0xFFFFFFFF; i++) {
  665. if (i == 1000) {
  666. printk(KERN_WARNING
  667. "HPET config register value = 0xFFFFFFFF. "
  668. "Disabling HPET\n");
  669. goto out_nohpet;
  670. }
  671. }
  672. if (hpet_period < HPET_MIN_PERIOD || hpet_period > HPET_MAX_PERIOD)
  673. goto out_nohpet;
  674. /*
  675. * The period is a femto seconds value. Convert it to a
  676. * frequency.
  677. */
  678. freq = FSEC_PER_SEC;
  679. do_div(freq, hpet_period);
  680. hpet_freq = freq;
  681. /*
  682. * Read the HPET ID register to retrieve the IRQ routing
  683. * information and the number of channels
  684. */
  685. id = hpet_readl(HPET_ID);
  686. hpet_print_config();
  687. last = (id & HPET_ID_NUMBER) >> HPET_ID_NUMBER_SHIFT;
  688. #ifdef CONFIG_HPET_EMULATE_RTC
  689. /*
  690. * The legacy routing mode needs at least two channels, tick timer
  691. * and the rtc emulation channel.
  692. */
  693. if (!last)
  694. goto out_nohpet;
  695. #endif
  696. cfg = hpet_readl(HPET_CFG);
  697. hpet_boot_cfg = kmalloc((last + 2) * sizeof(*hpet_boot_cfg),
  698. GFP_KERNEL);
  699. if (hpet_boot_cfg)
  700. *hpet_boot_cfg = cfg;
  701. else
  702. pr_warn("HPET initial state will not be saved\n");
  703. cfg &= ~(HPET_CFG_ENABLE | HPET_CFG_LEGACY);
  704. hpet_writel(cfg, HPET_CFG);
  705. if (cfg)
  706. pr_warn("HPET: Unrecognized bits %#x set in global cfg\n",
  707. cfg);
  708. for (i = 0; i <= last; ++i) {
  709. cfg = hpet_readl(HPET_Tn_CFG(i));
  710. if (hpet_boot_cfg)
  711. hpet_boot_cfg[i + 1] = cfg;
  712. cfg &= ~(HPET_TN_ENABLE | HPET_TN_LEVEL | HPET_TN_FSB);
  713. hpet_writel(cfg, HPET_Tn_CFG(i));
  714. cfg &= ~(HPET_TN_PERIODIC | HPET_TN_PERIODIC_CAP
  715. | HPET_TN_64BIT_CAP | HPET_TN_32BIT | HPET_TN_ROUTE
  716. | HPET_TN_FSB | HPET_TN_FSB_CAP);
  717. if (cfg)
  718. pr_warn("HPET: Unrecognized bits %#x set in cfg#%u\n",
  719. cfg, i);
  720. }
  721. hpet_print_config();
  722. if (hpet_clocksource_register())
  723. goto out_nohpet;
  724. if (id & HPET_ID_LEGSUP) {
  725. hpet_legacy_clockevent_register();
  726. return 1;
  727. }
  728. return 0;
  729. out_nohpet:
  730. hpet_clear_mapping();
  731. hpet_address = 0;
  732. return 0;
  733. }
  734. /*
  735. * Needs to be late, as the reserve_timer code calls kalloc !
  736. *
  737. * Not a problem on i386 as hpet_enable is called from late_time_init,
  738. * but on x86_64 it is necessary !
  739. */
  740. static __init int hpet_late_init(void)
  741. {
  742. int cpu;
  743. if (boot_hpet_disable)
  744. return -ENODEV;
  745. if (!hpet_address) {
  746. if (!force_hpet_address)
  747. return -ENODEV;
  748. hpet_address = force_hpet_address;
  749. hpet_enable();
  750. }
  751. if (!hpet_virt_address)
  752. return -ENODEV;
  753. if (hpet_readl(HPET_ID) & HPET_ID_LEGSUP)
  754. hpet_msi_capability_lookup(2);
  755. else
  756. hpet_msi_capability_lookup(0);
  757. hpet_reserve_platform_timers(hpet_readl(HPET_ID));
  758. hpet_print_config();
  759. if (hpet_msi_disable)
  760. return 0;
  761. if (boot_cpu_has(X86_FEATURE_ARAT))
  762. return 0;
  763. cpu_notifier_register_begin();
  764. for_each_online_cpu(cpu) {
  765. hpet_cpuhp_notify(NULL, CPU_ONLINE, (void *)(long)cpu);
  766. }
  767. /* This notifier should be called after workqueue is ready */
  768. __hotcpu_notifier(hpet_cpuhp_notify, -20);
  769. cpu_notifier_register_done();
  770. return 0;
  771. }
  772. fs_initcall(hpet_late_init);
  773. void hpet_disable(void)
  774. {
  775. if (is_hpet_capable() && hpet_virt_address) {
  776. unsigned int cfg = hpet_readl(HPET_CFG), id, last;
  777. if (hpet_boot_cfg)
  778. cfg = *hpet_boot_cfg;
  779. else if (hpet_legacy_int_enabled) {
  780. cfg &= ~HPET_CFG_LEGACY;
  781. hpet_legacy_int_enabled = 0;
  782. }
  783. cfg &= ~HPET_CFG_ENABLE;
  784. hpet_writel(cfg, HPET_CFG);
  785. if (!hpet_boot_cfg)
  786. return;
  787. id = hpet_readl(HPET_ID);
  788. last = ((id & HPET_ID_NUMBER) >> HPET_ID_NUMBER_SHIFT);
  789. for (id = 0; id <= last; ++id)
  790. hpet_writel(hpet_boot_cfg[id + 1], HPET_Tn_CFG(id));
  791. if (*hpet_boot_cfg & HPET_CFG_ENABLE)
  792. hpet_writel(*hpet_boot_cfg, HPET_CFG);
  793. }
  794. }
  795. #ifdef CONFIG_HPET_EMULATE_RTC
  796. /* HPET in LegacyReplacement Mode eats up RTC interrupt line. When, HPET
  797. * is enabled, we support RTC interrupt functionality in software.
  798. * RTC has 3 kinds of interrupts:
  799. * 1) Update Interrupt - generate an interrupt, every sec, when RTC clock
  800. * is updated
  801. * 2) Alarm Interrupt - generate an interrupt at a specific time of day
  802. * 3) Periodic Interrupt - generate periodic interrupt, with frequencies
  803. * 2Hz-8192Hz (2Hz-64Hz for non-root user) (all freqs in powers of 2)
  804. * (1) and (2) above are implemented using polling at a frequency of
  805. * 64 Hz. The exact frequency is a tradeoff between accuracy and interrupt
  806. * overhead. (DEFAULT_RTC_INT_FREQ)
  807. * For (3), we use interrupts at 64Hz or user specified periodic
  808. * frequency, whichever is higher.
  809. */
  810. #include <linux/mc146818rtc.h>
  811. #include <linux/rtc.h>
  812. #include <asm/rtc.h>
  813. #define DEFAULT_RTC_INT_FREQ 64
  814. #define DEFAULT_RTC_SHIFT 6
  815. #define RTC_NUM_INTS 1
  816. static unsigned long hpet_rtc_flags;
  817. static int hpet_prev_update_sec;
  818. static struct rtc_time hpet_alarm_time;
  819. static unsigned long hpet_pie_count;
  820. static u32 hpet_t1_cmp;
  821. static u32 hpet_default_delta;
  822. static u32 hpet_pie_delta;
  823. static unsigned long hpet_pie_limit;
  824. static rtc_irq_handler irq_handler;
  825. /*
  826. * Check that the hpet counter c1 is ahead of the c2
  827. */
  828. static inline int hpet_cnt_ahead(u32 c1, u32 c2)
  829. {
  830. return (s32)(c2 - c1) < 0;
  831. }
  832. /*
  833. * Registers a IRQ handler.
  834. */
  835. int hpet_register_irq_handler(rtc_irq_handler handler)
  836. {
  837. if (!is_hpet_enabled())
  838. return -ENODEV;
  839. if (irq_handler)
  840. return -EBUSY;
  841. irq_handler = handler;
  842. return 0;
  843. }
  844. EXPORT_SYMBOL_GPL(hpet_register_irq_handler);
  845. /*
  846. * Deregisters the IRQ handler registered with hpet_register_irq_handler()
  847. * and does cleanup.
  848. */
  849. void hpet_unregister_irq_handler(rtc_irq_handler handler)
  850. {
  851. if (!is_hpet_enabled())
  852. return;
  853. irq_handler = NULL;
  854. hpet_rtc_flags = 0;
  855. }
  856. EXPORT_SYMBOL_GPL(hpet_unregister_irq_handler);
  857. /*
  858. * Timer 1 for RTC emulation. We use one shot mode, as periodic mode
  859. * is not supported by all HPET implementations for timer 1.
  860. *
  861. * hpet_rtc_timer_init() is called when the rtc is initialized.
  862. */
  863. int hpet_rtc_timer_init(void)
  864. {
  865. unsigned int cfg, cnt, delta;
  866. unsigned long flags;
  867. if (!is_hpet_enabled())
  868. return 0;
  869. if (!hpet_default_delta) {
  870. uint64_t clc;
  871. clc = (uint64_t) hpet_clockevent.mult * NSEC_PER_SEC;
  872. clc >>= hpet_clockevent.shift + DEFAULT_RTC_SHIFT;
  873. hpet_default_delta = clc;
  874. }
  875. if (!(hpet_rtc_flags & RTC_PIE) || hpet_pie_limit)
  876. delta = hpet_default_delta;
  877. else
  878. delta = hpet_pie_delta;
  879. local_irq_save(flags);
  880. cnt = delta + hpet_readl(HPET_COUNTER);
  881. hpet_writel(cnt, HPET_T1_CMP);
  882. hpet_t1_cmp = cnt;
  883. cfg = hpet_readl(HPET_T1_CFG);
  884. cfg &= ~HPET_TN_PERIODIC;
  885. cfg |= HPET_TN_ENABLE | HPET_TN_32BIT;
  886. hpet_writel(cfg, HPET_T1_CFG);
  887. local_irq_restore(flags);
  888. return 1;
  889. }
  890. EXPORT_SYMBOL_GPL(hpet_rtc_timer_init);
  891. static void hpet_disable_rtc_channel(void)
  892. {
  893. unsigned long cfg;
  894. cfg = hpet_readl(HPET_T1_CFG);
  895. cfg &= ~HPET_TN_ENABLE;
  896. hpet_writel(cfg, HPET_T1_CFG);
  897. }
  898. /*
  899. * The functions below are called from rtc driver.
  900. * Return 0 if HPET is not being used.
  901. * Otherwise do the necessary changes and return 1.
  902. */
  903. int hpet_mask_rtc_irq_bit(unsigned long bit_mask)
  904. {
  905. if (!is_hpet_enabled())
  906. return 0;
  907. hpet_rtc_flags &= ~bit_mask;
  908. if (unlikely(!hpet_rtc_flags))
  909. hpet_disable_rtc_channel();
  910. return 1;
  911. }
  912. EXPORT_SYMBOL_GPL(hpet_mask_rtc_irq_bit);
  913. int hpet_set_rtc_irq_bit(unsigned long bit_mask)
  914. {
  915. unsigned long oldbits = hpet_rtc_flags;
  916. if (!is_hpet_enabled())
  917. return 0;
  918. hpet_rtc_flags |= bit_mask;
  919. if ((bit_mask & RTC_UIE) && !(oldbits & RTC_UIE))
  920. hpet_prev_update_sec = -1;
  921. if (!oldbits)
  922. hpet_rtc_timer_init();
  923. return 1;
  924. }
  925. EXPORT_SYMBOL_GPL(hpet_set_rtc_irq_bit);
  926. int hpet_set_alarm_time(unsigned char hrs, unsigned char min,
  927. unsigned char sec)
  928. {
  929. if (!is_hpet_enabled())
  930. return 0;
  931. hpet_alarm_time.tm_hour = hrs;
  932. hpet_alarm_time.tm_min = min;
  933. hpet_alarm_time.tm_sec = sec;
  934. return 1;
  935. }
  936. EXPORT_SYMBOL_GPL(hpet_set_alarm_time);
  937. int hpet_set_periodic_freq(unsigned long freq)
  938. {
  939. uint64_t clc;
  940. if (!is_hpet_enabled())
  941. return 0;
  942. if (freq <= DEFAULT_RTC_INT_FREQ)
  943. hpet_pie_limit = DEFAULT_RTC_INT_FREQ / freq;
  944. else {
  945. clc = (uint64_t) hpet_clockevent.mult * NSEC_PER_SEC;
  946. do_div(clc, freq);
  947. clc >>= hpet_clockevent.shift;
  948. hpet_pie_delta = clc;
  949. hpet_pie_limit = 0;
  950. }
  951. return 1;
  952. }
  953. EXPORT_SYMBOL_GPL(hpet_set_periodic_freq);
  954. int hpet_rtc_dropped_irq(void)
  955. {
  956. return is_hpet_enabled();
  957. }
  958. EXPORT_SYMBOL_GPL(hpet_rtc_dropped_irq);
  959. static void hpet_rtc_timer_reinit(void)
  960. {
  961. unsigned int delta;
  962. int lost_ints = -1;
  963. if (unlikely(!hpet_rtc_flags))
  964. hpet_disable_rtc_channel();
  965. if (!(hpet_rtc_flags & RTC_PIE) || hpet_pie_limit)
  966. delta = hpet_default_delta;
  967. else
  968. delta = hpet_pie_delta;
  969. /*
  970. * Increment the comparator value until we are ahead of the
  971. * current count.
  972. */
  973. do {
  974. hpet_t1_cmp += delta;
  975. hpet_writel(hpet_t1_cmp, HPET_T1_CMP);
  976. lost_ints++;
  977. } while (!hpet_cnt_ahead(hpet_t1_cmp, hpet_readl(HPET_COUNTER)));
  978. if (lost_ints) {
  979. if (hpet_rtc_flags & RTC_PIE)
  980. hpet_pie_count += lost_ints;
  981. if (printk_ratelimit())
  982. printk(KERN_WARNING "hpet1: lost %d rtc interrupts\n",
  983. lost_ints);
  984. }
  985. }
  986. irqreturn_t hpet_rtc_interrupt(int irq, void *dev_id)
  987. {
  988. struct rtc_time curr_time;
  989. unsigned long rtc_int_flag = 0;
  990. hpet_rtc_timer_reinit();
  991. memset(&curr_time, 0, sizeof(struct rtc_time));
  992. if (hpet_rtc_flags & (RTC_UIE | RTC_AIE))
  993. get_rtc_time(&curr_time);
  994. if (hpet_rtc_flags & RTC_UIE &&
  995. curr_time.tm_sec != hpet_prev_update_sec) {
  996. if (hpet_prev_update_sec >= 0)
  997. rtc_int_flag = RTC_UF;
  998. hpet_prev_update_sec = curr_time.tm_sec;
  999. }
  1000. if (hpet_rtc_flags & RTC_PIE &&
  1001. ++hpet_pie_count >= hpet_pie_limit) {
  1002. rtc_int_flag |= RTC_PF;
  1003. hpet_pie_count = 0;
  1004. }
  1005. if (hpet_rtc_flags & RTC_AIE &&
  1006. (curr_time.tm_sec == hpet_alarm_time.tm_sec) &&
  1007. (curr_time.tm_min == hpet_alarm_time.tm_min) &&
  1008. (curr_time.tm_hour == hpet_alarm_time.tm_hour))
  1009. rtc_int_flag |= RTC_AF;
  1010. if (rtc_int_flag) {
  1011. rtc_int_flag |= (RTC_IRQF | (RTC_NUM_INTS << 8));
  1012. if (irq_handler)
  1013. irq_handler(rtc_int_flag, dev_id);
  1014. }
  1015. return IRQ_HANDLED;
  1016. }
  1017. EXPORT_SYMBOL_GPL(hpet_rtc_interrupt);
  1018. #endif