hpet.c 29 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253
  1. #include <linux/clocksource.h>
  2. #include <linux/clockchips.h>
  3. #include <linux/interrupt.h>
  4. #include <linux/export.h>
  5. #include <linux/delay.h>
  6. #include <linux/errno.h>
  7. #include <linux/i8253.h>
  8. #include <linux/slab.h>
  9. #include <linux/hpet.h>
  10. #include <linux/init.h>
  11. #include <linux/cpu.h>
  12. #include <linux/pm.h>
  13. #include <linux/io.h>
  14. #include <asm/fixmap.h>
  15. #include <asm/hpet.h>
  16. #include <asm/time.h>
  17. #define HPET_MASK CLOCKSOURCE_MASK(32)
  18. /* FSEC = 10^-15
  19. NSEC = 10^-9 */
  20. #define FSEC_PER_NSEC 1000000L
  21. #define HPET_DEV_USED_BIT 2
  22. #define HPET_DEV_USED (1 << HPET_DEV_USED_BIT)
  23. #define HPET_DEV_VALID 0x8
  24. #define HPET_DEV_FSB_CAP 0x1000
  25. #define HPET_DEV_PERI_CAP 0x2000
  26. #define HPET_MIN_CYCLES 128
  27. #define HPET_MIN_PROG_DELTA (HPET_MIN_CYCLES + (HPET_MIN_CYCLES >> 1))
  28. /*
  29. * HPET address is set in acpi/boot.c, when an ACPI entry exists
  30. */
  31. unsigned long hpet_address;
  32. u8 hpet_blockid; /* OS timer block num */
  33. u8 hpet_msi_disable;
  34. #ifdef CONFIG_PCI_MSI
  35. static unsigned long hpet_num_timers;
  36. #endif
  37. static void __iomem *hpet_virt_address;
  38. struct hpet_dev {
  39. struct clock_event_device evt;
  40. unsigned int num;
  41. int cpu;
  42. unsigned int irq;
  43. unsigned int flags;
  44. char name[10];
  45. };
  46. inline struct hpet_dev *EVT_TO_HPET_DEV(struct clock_event_device *evtdev)
  47. {
  48. return container_of(evtdev, struct hpet_dev, evt);
  49. }
  50. inline unsigned int hpet_readl(unsigned int a)
  51. {
  52. return readl(hpet_virt_address + a);
  53. }
  54. static inline void hpet_writel(unsigned int d, unsigned int a)
  55. {
  56. writel(d, hpet_virt_address + a);
  57. }
  58. #ifdef CONFIG_X86_64
  59. #include <asm/pgtable.h>
  60. #endif
  61. static inline void hpet_set_mapping(void)
  62. {
  63. hpet_virt_address = ioremap_nocache(hpet_address, HPET_MMAP_SIZE);
  64. }
  65. static inline void hpet_clear_mapping(void)
  66. {
  67. iounmap(hpet_virt_address);
  68. hpet_virt_address = NULL;
  69. }
  70. /*
  71. * HPET command line enable / disable
  72. */
  73. int boot_hpet_disable;
  74. int hpet_force_user;
  75. static int hpet_verbose;
  76. static int __init hpet_setup(char *str)
  77. {
  78. while (str) {
  79. char *next = strchr(str, ',');
  80. if (next)
  81. *next++ = 0;
  82. if (!strncmp("disable", str, 7))
  83. boot_hpet_disable = 1;
  84. if (!strncmp("force", str, 5))
  85. hpet_force_user = 1;
  86. if (!strncmp("verbose", str, 7))
  87. hpet_verbose = 1;
  88. str = next;
  89. }
  90. return 1;
  91. }
  92. __setup("hpet=", hpet_setup);
  93. static int __init disable_hpet(char *str)
  94. {
  95. boot_hpet_disable = 1;
  96. return 1;
  97. }
  98. __setup("nohpet", disable_hpet);
  99. static inline int is_hpet_capable(void)
  100. {
  101. return !boot_hpet_disable && hpet_address;
  102. }
  103. /*
  104. * HPET timer interrupt enable / disable
  105. */
  106. static int hpet_legacy_int_enabled;
  107. /**
  108. * is_hpet_enabled - check whether the hpet timer interrupt is enabled
  109. */
  110. int is_hpet_enabled(void)
  111. {
  112. return is_hpet_capable() && hpet_legacy_int_enabled;
  113. }
  114. EXPORT_SYMBOL_GPL(is_hpet_enabled);
  115. static void _hpet_print_config(const char *function, int line)
  116. {
  117. u32 i, timers, l, h;
  118. printk(KERN_INFO "hpet: %s(%d):\n", function, line);
  119. l = hpet_readl(HPET_ID);
  120. h = hpet_readl(HPET_PERIOD);
  121. timers = ((l & HPET_ID_NUMBER) >> HPET_ID_NUMBER_SHIFT) + 1;
  122. printk(KERN_INFO "hpet: ID: 0x%x, PERIOD: 0x%x\n", l, h);
  123. l = hpet_readl(HPET_CFG);
  124. h = hpet_readl(HPET_STATUS);
  125. printk(KERN_INFO "hpet: CFG: 0x%x, STATUS: 0x%x\n", l, h);
  126. l = hpet_readl(HPET_COUNTER);
  127. h = hpet_readl(HPET_COUNTER+4);
  128. printk(KERN_INFO "hpet: COUNTER_l: 0x%x, COUNTER_h: 0x%x\n", l, h);
  129. for (i = 0; i < timers; i++) {
  130. l = hpet_readl(HPET_Tn_CFG(i));
  131. h = hpet_readl(HPET_Tn_CFG(i)+4);
  132. printk(KERN_INFO "hpet: T%d: CFG_l: 0x%x, CFG_h: 0x%x\n",
  133. i, l, h);
  134. l = hpet_readl(HPET_Tn_CMP(i));
  135. h = hpet_readl(HPET_Tn_CMP(i)+4);
  136. printk(KERN_INFO "hpet: T%d: CMP_l: 0x%x, CMP_h: 0x%x\n",
  137. i, l, h);
  138. l = hpet_readl(HPET_Tn_ROUTE(i));
  139. h = hpet_readl(HPET_Tn_ROUTE(i)+4);
  140. printk(KERN_INFO "hpet: T%d ROUTE_l: 0x%x, ROUTE_h: 0x%x\n",
  141. i, l, h);
  142. }
  143. }
  144. #define hpet_print_config() \
  145. do { \
  146. if (hpet_verbose) \
  147. _hpet_print_config(__FUNCTION__, __LINE__); \
  148. } while (0)
  149. /*
  150. * When the hpet driver (/dev/hpet) is enabled, we need to reserve
  151. * timer 0 and timer 1 in case of RTC emulation.
  152. */
  153. #ifdef CONFIG_HPET
  154. static void hpet_reserve_msi_timers(struct hpet_data *hd);
  155. static void hpet_reserve_platform_timers(unsigned int id)
  156. {
  157. struct hpet __iomem *hpet = hpet_virt_address;
  158. struct hpet_timer __iomem *timer = &hpet->hpet_timers[2];
  159. unsigned int nrtimers, i;
  160. struct hpet_data hd;
  161. nrtimers = ((id & HPET_ID_NUMBER) >> HPET_ID_NUMBER_SHIFT) + 1;
  162. memset(&hd, 0, sizeof(hd));
  163. hd.hd_phys_address = hpet_address;
  164. hd.hd_address = hpet;
  165. hd.hd_nirqs = nrtimers;
  166. hpet_reserve_timer(&hd, 0);
  167. #ifdef CONFIG_HPET_EMULATE_RTC
  168. hpet_reserve_timer(&hd, 1);
  169. #endif
  170. /*
  171. * NOTE that hd_irq[] reflects IOAPIC input pins (LEGACY_8254
  172. * is wrong for i8259!) not the output IRQ. Many BIOS writers
  173. * don't bother configuring *any* comparator interrupts.
  174. */
  175. hd.hd_irq[0] = HPET_LEGACY_8254;
  176. hd.hd_irq[1] = HPET_LEGACY_RTC;
  177. for (i = 2; i < nrtimers; timer++, i++) {
  178. hd.hd_irq[i] = (readl(&timer->hpet_config) &
  179. Tn_INT_ROUTE_CNF_MASK) >> Tn_INT_ROUTE_CNF_SHIFT;
  180. }
  181. hpet_reserve_msi_timers(&hd);
  182. hpet_alloc(&hd);
  183. }
  184. #else
  185. static void hpet_reserve_platform_timers(unsigned int id) { }
  186. #endif
  187. /*
  188. * Common hpet info
  189. */
  190. static unsigned long hpet_freq;
  191. static void hpet_legacy_set_mode(enum clock_event_mode mode,
  192. struct clock_event_device *evt);
  193. static int hpet_legacy_next_event(unsigned long delta,
  194. struct clock_event_device *evt);
  195. /*
  196. * The hpet clock event device
  197. */
  198. static struct clock_event_device hpet_clockevent = {
  199. .name = "hpet",
  200. .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
  201. .set_mode = hpet_legacy_set_mode,
  202. .set_next_event = hpet_legacy_next_event,
  203. .irq = 0,
  204. .rating = 50,
  205. };
  206. static void hpet_stop_counter(void)
  207. {
  208. unsigned long cfg = hpet_readl(HPET_CFG);
  209. cfg &= ~HPET_CFG_ENABLE;
  210. hpet_writel(cfg, HPET_CFG);
  211. }
  212. static void hpet_reset_counter(void)
  213. {
  214. hpet_writel(0, HPET_COUNTER);
  215. hpet_writel(0, HPET_COUNTER + 4);
  216. }
  217. static void hpet_start_counter(void)
  218. {
  219. unsigned int cfg = hpet_readl(HPET_CFG);
  220. cfg |= HPET_CFG_ENABLE;
  221. hpet_writel(cfg, HPET_CFG);
  222. }
  223. static void hpet_restart_counter(void)
  224. {
  225. hpet_stop_counter();
  226. hpet_reset_counter();
  227. hpet_start_counter();
  228. }
  229. static void hpet_resume_device(void)
  230. {
  231. force_hpet_resume();
  232. }
  233. static void hpet_resume_counter(struct clocksource *cs)
  234. {
  235. hpet_resume_device();
  236. hpet_restart_counter();
  237. }
  238. static void hpet_enable_legacy_int(void)
  239. {
  240. unsigned int cfg = hpet_readl(HPET_CFG);
  241. cfg |= HPET_CFG_LEGACY;
  242. hpet_writel(cfg, HPET_CFG);
  243. hpet_legacy_int_enabled = 1;
  244. }
  245. static void hpet_legacy_clockevent_register(void)
  246. {
  247. /* Start HPET legacy interrupts */
  248. hpet_enable_legacy_int();
  249. /*
  250. * Start hpet with the boot cpu mask and make it
  251. * global after the IO_APIC has been initialized.
  252. */
  253. hpet_clockevent.cpumask = cpumask_of(smp_processor_id());
  254. clockevents_config_and_register(&hpet_clockevent, hpet_freq,
  255. HPET_MIN_PROG_DELTA, 0x7FFFFFFF);
  256. global_clock_event = &hpet_clockevent;
  257. printk(KERN_DEBUG "hpet clockevent registered\n");
  258. }
  259. static int hpet_setup_msi_irq(unsigned int irq);
  260. static void hpet_set_mode(enum clock_event_mode mode,
  261. struct clock_event_device *evt, int timer)
  262. {
  263. unsigned int cfg, cmp, now;
  264. uint64_t delta;
  265. switch (mode) {
  266. case CLOCK_EVT_MODE_PERIODIC:
  267. hpet_stop_counter();
  268. delta = ((uint64_t)(NSEC_PER_SEC/HZ)) * evt->mult;
  269. delta >>= evt->shift;
  270. now = hpet_readl(HPET_COUNTER);
  271. cmp = now + (unsigned int) delta;
  272. cfg = hpet_readl(HPET_Tn_CFG(timer));
  273. cfg |= HPET_TN_ENABLE | HPET_TN_PERIODIC |
  274. HPET_TN_SETVAL | HPET_TN_32BIT;
  275. hpet_writel(cfg, HPET_Tn_CFG(timer));
  276. hpet_writel(cmp, HPET_Tn_CMP(timer));
  277. udelay(1);
  278. /*
  279. * HPET on AMD 81xx needs a second write (with HPET_TN_SETVAL
  280. * cleared) to T0_CMP to set the period. The HPET_TN_SETVAL
  281. * bit is automatically cleared after the first write.
  282. * (See AMD-8111 HyperTransport I/O Hub Data Sheet,
  283. * Publication # 24674)
  284. */
  285. hpet_writel((unsigned int) delta, HPET_Tn_CMP(timer));
  286. hpet_start_counter();
  287. hpet_print_config();
  288. break;
  289. case CLOCK_EVT_MODE_ONESHOT:
  290. cfg = hpet_readl(HPET_Tn_CFG(timer));
  291. cfg &= ~HPET_TN_PERIODIC;
  292. cfg |= HPET_TN_ENABLE | HPET_TN_32BIT;
  293. hpet_writel(cfg, HPET_Tn_CFG(timer));
  294. break;
  295. case CLOCK_EVT_MODE_UNUSED:
  296. case CLOCK_EVT_MODE_SHUTDOWN:
  297. cfg = hpet_readl(HPET_Tn_CFG(timer));
  298. cfg &= ~HPET_TN_ENABLE;
  299. hpet_writel(cfg, HPET_Tn_CFG(timer));
  300. break;
  301. case CLOCK_EVT_MODE_RESUME:
  302. if (timer == 0) {
  303. hpet_enable_legacy_int();
  304. } else {
  305. struct hpet_dev *hdev = EVT_TO_HPET_DEV(evt);
  306. hpet_setup_msi_irq(hdev->irq);
  307. disable_irq(hdev->irq);
  308. irq_set_affinity(hdev->irq, cpumask_of(hdev->cpu));
  309. enable_irq(hdev->irq);
  310. }
  311. hpet_print_config();
  312. break;
  313. }
  314. }
  315. static int hpet_next_event(unsigned long delta,
  316. struct clock_event_device *evt, int timer)
  317. {
  318. u32 cnt;
  319. s32 res;
  320. cnt = hpet_readl(HPET_COUNTER);
  321. cnt += (u32) delta;
  322. hpet_writel(cnt, HPET_Tn_CMP(timer));
  323. /*
  324. * HPETs are a complete disaster. The compare register is
  325. * based on a equal comparison and neither provides a less
  326. * than or equal functionality (which would require to take
  327. * the wraparound into account) nor a simple count down event
  328. * mode. Further the write to the comparator register is
  329. * delayed internally up to two HPET clock cycles in certain
  330. * chipsets (ATI, ICH9,10). Some newer AMD chipsets have even
  331. * longer delays. We worked around that by reading back the
  332. * compare register, but that required another workaround for
  333. * ICH9,10 chips where the first readout after write can
  334. * return the old stale value. We already had a minimum
  335. * programming delta of 5us enforced, but a NMI or SMI hitting
  336. * between the counter readout and the comparator write can
  337. * move us behind that point easily. Now instead of reading
  338. * the compare register back several times, we make the ETIME
  339. * decision based on the following: Return ETIME if the
  340. * counter value after the write is less than HPET_MIN_CYCLES
  341. * away from the event or if the counter is already ahead of
  342. * the event. The minimum programming delta for the generic
  343. * clockevents code is set to 1.5 * HPET_MIN_CYCLES.
  344. */
  345. res = (s32)(cnt - hpet_readl(HPET_COUNTER));
  346. return res < HPET_MIN_CYCLES ? -ETIME : 0;
  347. }
  348. static void hpet_legacy_set_mode(enum clock_event_mode mode,
  349. struct clock_event_device *evt)
  350. {
  351. hpet_set_mode(mode, evt, 0);
  352. }
  353. static int hpet_legacy_next_event(unsigned long delta,
  354. struct clock_event_device *evt)
  355. {
  356. return hpet_next_event(delta, evt, 0);
  357. }
  358. /*
  359. * HPET MSI Support
  360. */
  361. #ifdef CONFIG_PCI_MSI
  362. static DEFINE_PER_CPU(struct hpet_dev *, cpu_hpet_dev);
  363. static struct hpet_dev *hpet_devs;
  364. void hpet_msi_unmask(struct irq_data *data)
  365. {
  366. struct hpet_dev *hdev = data->handler_data;
  367. unsigned int cfg;
  368. /* unmask it */
  369. cfg = hpet_readl(HPET_Tn_CFG(hdev->num));
  370. cfg |= HPET_TN_ENABLE | HPET_TN_FSB;
  371. hpet_writel(cfg, HPET_Tn_CFG(hdev->num));
  372. }
  373. void hpet_msi_mask(struct irq_data *data)
  374. {
  375. struct hpet_dev *hdev = data->handler_data;
  376. unsigned int cfg;
  377. /* mask it */
  378. cfg = hpet_readl(HPET_Tn_CFG(hdev->num));
  379. cfg &= ~(HPET_TN_ENABLE | HPET_TN_FSB);
  380. hpet_writel(cfg, HPET_Tn_CFG(hdev->num));
  381. }
  382. void hpet_msi_write(struct hpet_dev *hdev, struct msi_msg *msg)
  383. {
  384. hpet_writel(msg->data, HPET_Tn_ROUTE(hdev->num));
  385. hpet_writel(msg->address_lo, HPET_Tn_ROUTE(hdev->num) + 4);
  386. }
  387. void hpet_msi_read(struct hpet_dev *hdev, struct msi_msg *msg)
  388. {
  389. msg->data = hpet_readl(HPET_Tn_ROUTE(hdev->num));
  390. msg->address_lo = hpet_readl(HPET_Tn_ROUTE(hdev->num) + 4);
  391. msg->address_hi = 0;
  392. }
  393. static void hpet_msi_set_mode(enum clock_event_mode mode,
  394. struct clock_event_device *evt)
  395. {
  396. struct hpet_dev *hdev = EVT_TO_HPET_DEV(evt);
  397. hpet_set_mode(mode, evt, hdev->num);
  398. }
  399. static int hpet_msi_next_event(unsigned long delta,
  400. struct clock_event_device *evt)
  401. {
  402. struct hpet_dev *hdev = EVT_TO_HPET_DEV(evt);
  403. return hpet_next_event(delta, evt, hdev->num);
  404. }
  405. static int hpet_setup_msi_irq(unsigned int irq)
  406. {
  407. if (x86_msi.setup_hpet_msi(irq, hpet_blockid)) {
  408. irq_free_hwirq(irq);
  409. return -EINVAL;
  410. }
  411. return 0;
  412. }
  413. static int hpet_assign_irq(struct hpet_dev *dev)
  414. {
  415. unsigned int irq = irq_alloc_hwirq(-1);
  416. if (!irq)
  417. return -EINVAL;
  418. irq_set_handler_data(irq, dev);
  419. if (hpet_setup_msi_irq(irq))
  420. return -EINVAL;
  421. dev->irq = irq;
  422. return 0;
  423. }
  424. static irqreturn_t hpet_interrupt_handler(int irq, void *data)
  425. {
  426. struct hpet_dev *dev = (struct hpet_dev *)data;
  427. struct clock_event_device *hevt = &dev->evt;
  428. if (!hevt->event_handler) {
  429. printk(KERN_INFO "Spurious HPET timer interrupt on HPET timer %d\n",
  430. dev->num);
  431. return IRQ_HANDLED;
  432. }
  433. hevt->event_handler(hevt);
  434. return IRQ_HANDLED;
  435. }
  436. static int hpet_setup_irq(struct hpet_dev *dev)
  437. {
  438. if (request_irq(dev->irq, hpet_interrupt_handler,
  439. IRQF_TIMER | IRQF_NOBALANCING,
  440. dev->name, dev))
  441. return -1;
  442. disable_irq(dev->irq);
  443. irq_set_affinity(dev->irq, cpumask_of(dev->cpu));
  444. enable_irq(dev->irq);
  445. printk(KERN_DEBUG "hpet: %s irq %d for MSI\n",
  446. dev->name, dev->irq);
  447. return 0;
  448. }
  449. /* This should be called in specific @cpu */
  450. static void init_one_hpet_msi_clockevent(struct hpet_dev *hdev, int cpu)
  451. {
  452. struct clock_event_device *evt = &hdev->evt;
  453. WARN_ON(cpu != smp_processor_id());
  454. if (!(hdev->flags & HPET_DEV_VALID))
  455. return;
  456. if (hpet_setup_msi_irq(hdev->irq))
  457. return;
  458. hdev->cpu = cpu;
  459. per_cpu(cpu_hpet_dev, cpu) = hdev;
  460. evt->name = hdev->name;
  461. hpet_setup_irq(hdev);
  462. evt->irq = hdev->irq;
  463. evt->rating = 110;
  464. evt->features = CLOCK_EVT_FEAT_ONESHOT;
  465. if (hdev->flags & HPET_DEV_PERI_CAP)
  466. evt->features |= CLOCK_EVT_FEAT_PERIODIC;
  467. evt->set_mode = hpet_msi_set_mode;
  468. evt->set_next_event = hpet_msi_next_event;
  469. evt->cpumask = cpumask_of(hdev->cpu);
  470. clockevents_config_and_register(evt, hpet_freq, HPET_MIN_PROG_DELTA,
  471. 0x7FFFFFFF);
  472. }
  473. #ifdef CONFIG_HPET
  474. /* Reserve at least one timer for userspace (/dev/hpet) */
  475. #define RESERVE_TIMERS 1
  476. #else
  477. #define RESERVE_TIMERS 0
  478. #endif
  479. static void hpet_msi_capability_lookup(unsigned int start_timer)
  480. {
  481. unsigned int id;
  482. unsigned int num_timers;
  483. unsigned int num_timers_used = 0;
  484. int i;
  485. if (hpet_msi_disable)
  486. return;
  487. if (boot_cpu_has(X86_FEATURE_ARAT))
  488. return;
  489. id = hpet_readl(HPET_ID);
  490. num_timers = ((id & HPET_ID_NUMBER) >> HPET_ID_NUMBER_SHIFT);
  491. num_timers++; /* Value read out starts from 0 */
  492. hpet_print_config();
  493. hpet_devs = kzalloc(sizeof(struct hpet_dev) * num_timers, GFP_KERNEL);
  494. if (!hpet_devs)
  495. return;
  496. hpet_num_timers = num_timers;
  497. for (i = start_timer; i < num_timers - RESERVE_TIMERS; i++) {
  498. struct hpet_dev *hdev = &hpet_devs[num_timers_used];
  499. unsigned int cfg = hpet_readl(HPET_Tn_CFG(i));
  500. /* Only consider HPET timer with MSI support */
  501. if (!(cfg & HPET_TN_FSB_CAP))
  502. continue;
  503. hdev->flags = 0;
  504. if (cfg & HPET_TN_PERIODIC_CAP)
  505. hdev->flags |= HPET_DEV_PERI_CAP;
  506. hdev->num = i;
  507. sprintf(hdev->name, "hpet%d", i);
  508. if (hpet_assign_irq(hdev))
  509. continue;
  510. hdev->flags |= HPET_DEV_FSB_CAP;
  511. hdev->flags |= HPET_DEV_VALID;
  512. num_timers_used++;
  513. if (num_timers_used == num_possible_cpus())
  514. break;
  515. }
  516. printk(KERN_INFO "HPET: %d timers in total, %d timers will be used for per-cpu timer\n",
  517. num_timers, num_timers_used);
  518. }
  519. #ifdef CONFIG_HPET
  520. static void hpet_reserve_msi_timers(struct hpet_data *hd)
  521. {
  522. int i;
  523. if (!hpet_devs)
  524. return;
  525. for (i = 0; i < hpet_num_timers; i++) {
  526. struct hpet_dev *hdev = &hpet_devs[i];
  527. if (!(hdev->flags & HPET_DEV_VALID))
  528. continue;
  529. hd->hd_irq[hdev->num] = hdev->irq;
  530. hpet_reserve_timer(hd, hdev->num);
  531. }
  532. }
  533. #endif
  534. static struct hpet_dev *hpet_get_unused_timer(void)
  535. {
  536. int i;
  537. if (!hpet_devs)
  538. return NULL;
  539. for (i = 0; i < hpet_num_timers; i++) {
  540. struct hpet_dev *hdev = &hpet_devs[i];
  541. if (!(hdev->flags & HPET_DEV_VALID))
  542. continue;
  543. if (test_and_set_bit(HPET_DEV_USED_BIT,
  544. (unsigned long *)&hdev->flags))
  545. continue;
  546. return hdev;
  547. }
  548. return NULL;
  549. }
  550. struct hpet_work_struct {
  551. struct delayed_work work;
  552. struct completion complete;
  553. };
  554. static void hpet_work(struct work_struct *w)
  555. {
  556. struct hpet_dev *hdev;
  557. int cpu = smp_processor_id();
  558. struct hpet_work_struct *hpet_work;
  559. hpet_work = container_of(w, struct hpet_work_struct, work.work);
  560. hdev = hpet_get_unused_timer();
  561. if (hdev)
  562. init_one_hpet_msi_clockevent(hdev, cpu);
  563. complete(&hpet_work->complete);
  564. }
  565. static int hpet_cpuhp_notify(struct notifier_block *n,
  566. unsigned long action, void *hcpu)
  567. {
  568. unsigned long cpu = (unsigned long)hcpu;
  569. struct hpet_work_struct work;
  570. struct hpet_dev *hdev = per_cpu(cpu_hpet_dev, cpu);
  571. switch (action & 0xf) {
  572. case CPU_ONLINE:
  573. INIT_DELAYED_WORK_ONSTACK(&work.work, hpet_work);
  574. init_completion(&work.complete);
  575. /* FIXME: add schedule_work_on() */
  576. schedule_delayed_work_on(cpu, &work.work, 0);
  577. wait_for_completion(&work.complete);
  578. destroy_delayed_work_on_stack(&work.work);
  579. break;
  580. case CPU_DEAD:
  581. if (hdev) {
  582. free_irq(hdev->irq, hdev);
  583. hdev->flags &= ~HPET_DEV_USED;
  584. per_cpu(cpu_hpet_dev, cpu) = NULL;
  585. }
  586. break;
  587. }
  588. return NOTIFY_OK;
  589. }
  590. #else
  591. static int hpet_setup_msi_irq(unsigned int irq)
  592. {
  593. return 0;
  594. }
  595. static void hpet_msi_capability_lookup(unsigned int start_timer)
  596. {
  597. return;
  598. }
  599. #ifdef CONFIG_HPET
  600. static void hpet_reserve_msi_timers(struct hpet_data *hd)
  601. {
  602. return;
  603. }
  604. #endif
  605. static int hpet_cpuhp_notify(struct notifier_block *n,
  606. unsigned long action, void *hcpu)
  607. {
  608. return NOTIFY_OK;
  609. }
  610. #endif
  611. /*
  612. * Clock source related code
  613. */
  614. static cycle_t read_hpet(struct clocksource *cs)
  615. {
  616. return (cycle_t)hpet_readl(HPET_COUNTER);
  617. }
  618. static struct clocksource clocksource_hpet = {
  619. .name = "hpet",
  620. .rating = 250,
  621. .read = read_hpet,
  622. .mask = HPET_MASK,
  623. .flags = CLOCK_SOURCE_IS_CONTINUOUS,
  624. .resume = hpet_resume_counter,
  625. .archdata = { .vclock_mode = VCLOCK_HPET },
  626. };
  627. static int hpet_clocksource_register(void)
  628. {
  629. u64 start, now;
  630. cycle_t t1;
  631. /* Start the counter */
  632. hpet_restart_counter();
  633. /* Verify whether hpet counter works */
  634. t1 = hpet_readl(HPET_COUNTER);
  635. rdtscll(start);
  636. /*
  637. * We don't know the TSC frequency yet, but waiting for
  638. * 200000 TSC cycles is safe:
  639. * 4 GHz == 50us
  640. * 1 GHz == 200us
  641. */
  642. do {
  643. rep_nop();
  644. rdtscll(now);
  645. } while ((now - start) < 200000UL);
  646. if (t1 == hpet_readl(HPET_COUNTER)) {
  647. printk(KERN_WARNING
  648. "HPET counter not counting. HPET disabled\n");
  649. return -ENODEV;
  650. }
  651. clocksource_register_hz(&clocksource_hpet, (u32)hpet_freq);
  652. return 0;
  653. }
  654. static u32 *hpet_boot_cfg;
  655. /**
  656. * hpet_enable - Try to setup the HPET timer. Returns 1 on success.
  657. */
  658. int __init hpet_enable(void)
  659. {
  660. u32 hpet_period, cfg, id;
  661. u64 freq;
  662. unsigned int i, last;
  663. if (!is_hpet_capable())
  664. return 0;
  665. hpet_set_mapping();
  666. /*
  667. * Read the period and check for a sane value:
  668. */
  669. hpet_period = hpet_readl(HPET_PERIOD);
  670. /*
  671. * AMD SB700 based systems with spread spectrum enabled use a
  672. * SMM based HPET emulation to provide proper frequency
  673. * setting. The SMM code is initialized with the first HPET
  674. * register access and takes some time to complete. During
  675. * this time the config register reads 0xffffffff. We check
  676. * for max. 1000 loops whether the config register reads a non
  677. * 0xffffffff value to make sure that HPET is up and running
  678. * before we go further. A counting loop is safe, as the HPET
  679. * access takes thousands of CPU cycles. On non SB700 based
  680. * machines this check is only done once and has no side
  681. * effects.
  682. */
  683. for (i = 0; hpet_readl(HPET_CFG) == 0xFFFFFFFF; i++) {
  684. if (i == 1000) {
  685. printk(KERN_WARNING
  686. "HPET config register value = 0xFFFFFFFF. "
  687. "Disabling HPET\n");
  688. goto out_nohpet;
  689. }
  690. }
  691. if (hpet_period < HPET_MIN_PERIOD || hpet_period > HPET_MAX_PERIOD)
  692. goto out_nohpet;
  693. /*
  694. * The period is a femto seconds value. Convert it to a
  695. * frequency.
  696. */
  697. freq = FSEC_PER_SEC;
  698. do_div(freq, hpet_period);
  699. hpet_freq = freq;
  700. /*
  701. * Read the HPET ID register to retrieve the IRQ routing
  702. * information and the number of channels
  703. */
  704. id = hpet_readl(HPET_ID);
  705. hpet_print_config();
  706. last = (id & HPET_ID_NUMBER) >> HPET_ID_NUMBER_SHIFT;
  707. #ifdef CONFIG_HPET_EMULATE_RTC
  708. /*
  709. * The legacy routing mode needs at least two channels, tick timer
  710. * and the rtc emulation channel.
  711. */
  712. if (!last)
  713. goto out_nohpet;
  714. #endif
  715. cfg = hpet_readl(HPET_CFG);
  716. hpet_boot_cfg = kmalloc((last + 2) * sizeof(*hpet_boot_cfg),
  717. GFP_KERNEL);
  718. if (hpet_boot_cfg)
  719. *hpet_boot_cfg = cfg;
  720. else
  721. pr_warn("HPET initial state will not be saved\n");
  722. cfg &= ~(HPET_CFG_ENABLE | HPET_CFG_LEGACY);
  723. hpet_writel(cfg, HPET_CFG);
  724. if (cfg)
  725. pr_warn("HPET: Unrecognized bits %#x set in global cfg\n",
  726. cfg);
  727. for (i = 0; i <= last; ++i) {
  728. cfg = hpet_readl(HPET_Tn_CFG(i));
  729. if (hpet_boot_cfg)
  730. hpet_boot_cfg[i + 1] = cfg;
  731. cfg &= ~(HPET_TN_ENABLE | HPET_TN_LEVEL | HPET_TN_FSB);
  732. hpet_writel(cfg, HPET_Tn_CFG(i));
  733. cfg &= ~(HPET_TN_PERIODIC | HPET_TN_PERIODIC_CAP
  734. | HPET_TN_64BIT_CAP | HPET_TN_32BIT | HPET_TN_ROUTE
  735. | HPET_TN_FSB | HPET_TN_FSB_CAP);
  736. if (cfg)
  737. pr_warn("HPET: Unrecognized bits %#x set in cfg#%u\n",
  738. cfg, i);
  739. }
  740. hpet_print_config();
  741. if (hpet_clocksource_register())
  742. goto out_nohpet;
  743. if (id & HPET_ID_LEGSUP) {
  744. hpet_legacy_clockevent_register();
  745. return 1;
  746. }
  747. return 0;
  748. out_nohpet:
  749. hpet_clear_mapping();
  750. hpet_address = 0;
  751. return 0;
  752. }
  753. /*
  754. * Needs to be late, as the reserve_timer code calls kalloc !
  755. *
  756. * Not a problem on i386 as hpet_enable is called from late_time_init,
  757. * but on x86_64 it is necessary !
  758. */
  759. static __init int hpet_late_init(void)
  760. {
  761. int cpu;
  762. if (boot_hpet_disable)
  763. return -ENODEV;
  764. if (!hpet_address) {
  765. if (!force_hpet_address)
  766. return -ENODEV;
  767. hpet_address = force_hpet_address;
  768. hpet_enable();
  769. }
  770. if (!hpet_virt_address)
  771. return -ENODEV;
  772. if (hpet_readl(HPET_ID) & HPET_ID_LEGSUP)
  773. hpet_msi_capability_lookup(2);
  774. else
  775. hpet_msi_capability_lookup(0);
  776. hpet_reserve_platform_timers(hpet_readl(HPET_ID));
  777. hpet_print_config();
  778. if (hpet_msi_disable)
  779. return 0;
  780. if (boot_cpu_has(X86_FEATURE_ARAT))
  781. return 0;
  782. cpu_notifier_register_begin();
  783. for_each_online_cpu(cpu) {
  784. hpet_cpuhp_notify(NULL, CPU_ONLINE, (void *)(long)cpu);
  785. }
  786. /* This notifier should be called after workqueue is ready */
  787. __hotcpu_notifier(hpet_cpuhp_notify, -20);
  788. cpu_notifier_register_done();
  789. return 0;
  790. }
  791. fs_initcall(hpet_late_init);
  792. void hpet_disable(void)
  793. {
  794. if (is_hpet_capable() && hpet_virt_address) {
  795. unsigned int cfg = hpet_readl(HPET_CFG), id, last;
  796. if (hpet_boot_cfg)
  797. cfg = *hpet_boot_cfg;
  798. else if (hpet_legacy_int_enabled) {
  799. cfg &= ~HPET_CFG_LEGACY;
  800. hpet_legacy_int_enabled = 0;
  801. }
  802. cfg &= ~HPET_CFG_ENABLE;
  803. hpet_writel(cfg, HPET_CFG);
  804. if (!hpet_boot_cfg)
  805. return;
  806. id = hpet_readl(HPET_ID);
  807. last = ((id & HPET_ID_NUMBER) >> HPET_ID_NUMBER_SHIFT);
  808. for (id = 0; id <= last; ++id)
  809. hpet_writel(hpet_boot_cfg[id + 1], HPET_Tn_CFG(id));
  810. if (*hpet_boot_cfg & HPET_CFG_ENABLE)
  811. hpet_writel(*hpet_boot_cfg, HPET_CFG);
  812. }
  813. }
  814. #ifdef CONFIG_HPET_EMULATE_RTC
  815. /* HPET in LegacyReplacement Mode eats up RTC interrupt line. When, HPET
  816. * is enabled, we support RTC interrupt functionality in software.
  817. * RTC has 3 kinds of interrupts:
  818. * 1) Update Interrupt - generate an interrupt, every sec, when RTC clock
  819. * is updated
  820. * 2) Alarm Interrupt - generate an interrupt at a specific time of day
  821. * 3) Periodic Interrupt - generate periodic interrupt, with frequencies
  822. * 2Hz-8192Hz (2Hz-64Hz for non-root user) (all freqs in powers of 2)
  823. * (1) and (2) above are implemented using polling at a frequency of
  824. * 64 Hz. The exact frequency is a tradeoff between accuracy and interrupt
  825. * overhead. (DEFAULT_RTC_INT_FREQ)
  826. * For (3), we use interrupts at 64Hz or user specified periodic
  827. * frequency, whichever is higher.
  828. */
  829. #include <linux/mc146818rtc.h>
  830. #include <linux/rtc.h>
  831. #include <asm/rtc.h>
  832. #define DEFAULT_RTC_INT_FREQ 64
  833. #define DEFAULT_RTC_SHIFT 6
  834. #define RTC_NUM_INTS 1
  835. static unsigned long hpet_rtc_flags;
  836. static int hpet_prev_update_sec;
  837. static struct rtc_time hpet_alarm_time;
  838. static unsigned long hpet_pie_count;
  839. static u32 hpet_t1_cmp;
  840. static u32 hpet_default_delta;
  841. static u32 hpet_pie_delta;
  842. static unsigned long hpet_pie_limit;
  843. static rtc_irq_handler irq_handler;
  844. /*
  845. * Check that the hpet counter c1 is ahead of the c2
  846. */
  847. static inline int hpet_cnt_ahead(u32 c1, u32 c2)
  848. {
  849. return (s32)(c2 - c1) < 0;
  850. }
  851. /*
  852. * Registers a IRQ handler.
  853. */
  854. int hpet_register_irq_handler(rtc_irq_handler handler)
  855. {
  856. if (!is_hpet_enabled())
  857. return -ENODEV;
  858. if (irq_handler)
  859. return -EBUSY;
  860. irq_handler = handler;
  861. return 0;
  862. }
  863. EXPORT_SYMBOL_GPL(hpet_register_irq_handler);
  864. /*
  865. * Deregisters the IRQ handler registered with hpet_register_irq_handler()
  866. * and does cleanup.
  867. */
  868. void hpet_unregister_irq_handler(rtc_irq_handler handler)
  869. {
  870. if (!is_hpet_enabled())
  871. return;
  872. irq_handler = NULL;
  873. hpet_rtc_flags = 0;
  874. }
  875. EXPORT_SYMBOL_GPL(hpet_unregister_irq_handler);
  876. /*
  877. * Timer 1 for RTC emulation. We use one shot mode, as periodic mode
  878. * is not supported by all HPET implementations for timer 1.
  879. *
  880. * hpet_rtc_timer_init() is called when the rtc is initialized.
  881. */
  882. int hpet_rtc_timer_init(void)
  883. {
  884. unsigned int cfg, cnt, delta;
  885. unsigned long flags;
  886. if (!is_hpet_enabled())
  887. return 0;
  888. if (!hpet_default_delta) {
  889. uint64_t clc;
  890. clc = (uint64_t) hpet_clockevent.mult * NSEC_PER_SEC;
  891. clc >>= hpet_clockevent.shift + DEFAULT_RTC_SHIFT;
  892. hpet_default_delta = clc;
  893. }
  894. if (!(hpet_rtc_flags & RTC_PIE) || hpet_pie_limit)
  895. delta = hpet_default_delta;
  896. else
  897. delta = hpet_pie_delta;
  898. local_irq_save(flags);
  899. cnt = delta + hpet_readl(HPET_COUNTER);
  900. hpet_writel(cnt, HPET_T1_CMP);
  901. hpet_t1_cmp = cnt;
  902. cfg = hpet_readl(HPET_T1_CFG);
  903. cfg &= ~HPET_TN_PERIODIC;
  904. cfg |= HPET_TN_ENABLE | HPET_TN_32BIT;
  905. hpet_writel(cfg, HPET_T1_CFG);
  906. local_irq_restore(flags);
  907. return 1;
  908. }
  909. EXPORT_SYMBOL_GPL(hpet_rtc_timer_init);
  910. static void hpet_disable_rtc_channel(void)
  911. {
  912. unsigned long cfg;
  913. cfg = hpet_readl(HPET_T1_CFG);
  914. cfg &= ~HPET_TN_ENABLE;
  915. hpet_writel(cfg, HPET_T1_CFG);
  916. }
  917. /*
  918. * The functions below are called from rtc driver.
  919. * Return 0 if HPET is not being used.
  920. * Otherwise do the necessary changes and return 1.
  921. */
  922. int hpet_mask_rtc_irq_bit(unsigned long bit_mask)
  923. {
  924. if (!is_hpet_enabled())
  925. return 0;
  926. hpet_rtc_flags &= ~bit_mask;
  927. if (unlikely(!hpet_rtc_flags))
  928. hpet_disable_rtc_channel();
  929. return 1;
  930. }
  931. EXPORT_SYMBOL_GPL(hpet_mask_rtc_irq_bit);
  932. int hpet_set_rtc_irq_bit(unsigned long bit_mask)
  933. {
  934. unsigned long oldbits = hpet_rtc_flags;
  935. if (!is_hpet_enabled())
  936. return 0;
  937. hpet_rtc_flags |= bit_mask;
  938. if ((bit_mask & RTC_UIE) && !(oldbits & RTC_UIE))
  939. hpet_prev_update_sec = -1;
  940. if (!oldbits)
  941. hpet_rtc_timer_init();
  942. return 1;
  943. }
  944. EXPORT_SYMBOL_GPL(hpet_set_rtc_irq_bit);
  945. int hpet_set_alarm_time(unsigned char hrs, unsigned char min,
  946. unsigned char sec)
  947. {
  948. if (!is_hpet_enabled())
  949. return 0;
  950. hpet_alarm_time.tm_hour = hrs;
  951. hpet_alarm_time.tm_min = min;
  952. hpet_alarm_time.tm_sec = sec;
  953. return 1;
  954. }
  955. EXPORT_SYMBOL_GPL(hpet_set_alarm_time);
  956. int hpet_set_periodic_freq(unsigned long freq)
  957. {
  958. uint64_t clc;
  959. if (!is_hpet_enabled())
  960. return 0;
  961. if (freq <= DEFAULT_RTC_INT_FREQ)
  962. hpet_pie_limit = DEFAULT_RTC_INT_FREQ / freq;
  963. else {
  964. clc = (uint64_t) hpet_clockevent.mult * NSEC_PER_SEC;
  965. do_div(clc, freq);
  966. clc >>= hpet_clockevent.shift;
  967. hpet_pie_delta = clc;
  968. hpet_pie_limit = 0;
  969. }
  970. return 1;
  971. }
  972. EXPORT_SYMBOL_GPL(hpet_set_periodic_freq);
  973. int hpet_rtc_dropped_irq(void)
  974. {
  975. return is_hpet_enabled();
  976. }
  977. EXPORT_SYMBOL_GPL(hpet_rtc_dropped_irq);
  978. static void hpet_rtc_timer_reinit(void)
  979. {
  980. unsigned int delta;
  981. int lost_ints = -1;
  982. if (unlikely(!hpet_rtc_flags))
  983. hpet_disable_rtc_channel();
  984. if (!(hpet_rtc_flags & RTC_PIE) || hpet_pie_limit)
  985. delta = hpet_default_delta;
  986. else
  987. delta = hpet_pie_delta;
  988. /*
  989. * Increment the comparator value until we are ahead of the
  990. * current count.
  991. */
  992. do {
  993. hpet_t1_cmp += delta;
  994. hpet_writel(hpet_t1_cmp, HPET_T1_CMP);
  995. lost_ints++;
  996. } while (!hpet_cnt_ahead(hpet_t1_cmp, hpet_readl(HPET_COUNTER)));
  997. if (lost_ints) {
  998. if (hpet_rtc_flags & RTC_PIE)
  999. hpet_pie_count += lost_ints;
  1000. if (printk_ratelimit())
  1001. printk(KERN_WARNING "hpet1: lost %d rtc interrupts\n",
  1002. lost_ints);
  1003. }
  1004. }
  1005. irqreturn_t hpet_rtc_interrupt(int irq, void *dev_id)
  1006. {
  1007. struct rtc_time curr_time;
  1008. unsigned long rtc_int_flag = 0;
  1009. hpet_rtc_timer_reinit();
  1010. memset(&curr_time, 0, sizeof(struct rtc_time));
  1011. if (hpet_rtc_flags & (RTC_UIE | RTC_AIE))
  1012. get_rtc_time(&curr_time);
  1013. if (hpet_rtc_flags & RTC_UIE &&
  1014. curr_time.tm_sec != hpet_prev_update_sec) {
  1015. if (hpet_prev_update_sec >= 0)
  1016. rtc_int_flag = RTC_UF;
  1017. hpet_prev_update_sec = curr_time.tm_sec;
  1018. }
  1019. if (hpet_rtc_flags & RTC_PIE &&
  1020. ++hpet_pie_count >= hpet_pie_limit) {
  1021. rtc_int_flag |= RTC_PF;
  1022. hpet_pie_count = 0;
  1023. }
  1024. if (hpet_rtc_flags & RTC_AIE &&
  1025. (curr_time.tm_sec == hpet_alarm_time.tm_sec) &&
  1026. (curr_time.tm_min == hpet_alarm_time.tm_min) &&
  1027. (curr_time.tm_hour == hpet_alarm_time.tm_hour))
  1028. rtc_int_flag |= RTC_AF;
  1029. if (rtc_int_flag) {
  1030. rtc_int_flag |= (RTC_IRQF | (RTC_NUM_INTS << 8));
  1031. if (irq_handler)
  1032. irq_handler(rtc_int_flag, dev_id);
  1033. }
  1034. return IRQ_HANDLED;
  1035. }
  1036. EXPORT_SYMBOL_GPL(hpet_rtc_interrupt);
  1037. #endif