tsc.c 34 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322
  1. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  2. #include <linux/kernel.h>
  3. #include <linux/sched.h>
  4. #include <linux/init.h>
  5. #include <linux/module.h>
  6. #include <linux/timer.h>
  7. #include <linux/acpi_pmtmr.h>
  8. #include <linux/cpufreq.h>
  9. #include <linux/delay.h>
  10. #include <linux/clocksource.h>
  11. #include <linux/percpu.h>
  12. #include <linux/timex.h>
  13. #include <linux/static_key.h>
  14. #include <asm/hpet.h>
  15. #include <asm/timer.h>
  16. #include <asm/vgtod.h>
  17. #include <asm/time.h>
  18. #include <asm/delay.h>
  19. #include <asm/hypervisor.h>
  20. #include <asm/nmi.h>
  21. #include <asm/x86_init.h>
  22. #include <asm/geode.h>
  23. unsigned int __read_mostly cpu_khz; /* TSC clocks / usec, not used here */
  24. EXPORT_SYMBOL(cpu_khz);
  25. unsigned int __read_mostly tsc_khz;
  26. EXPORT_SYMBOL(tsc_khz);
  27. /*
  28. * TSC can be unstable due to cpufreq or due to unsynced TSCs
  29. */
  30. static int __read_mostly tsc_unstable;
  31. /* native_sched_clock() is called before tsc_init(), so
  32. we must start with the TSC soft disabled to prevent
  33. erroneous rdtsc usage on !cpu_has_tsc processors */
  34. static int __read_mostly tsc_disabled = -1;
  35. static DEFINE_STATIC_KEY_FALSE(__use_tsc);
  36. int tsc_clocksource_reliable;
  37. static u32 art_to_tsc_numerator;
  38. static u32 art_to_tsc_denominator;
  39. static u64 art_to_tsc_offset;
  40. struct clocksource *art_related_clocksource;
  41. /*
  42. * Use a ring-buffer like data structure, where a writer advances the head by
  43. * writing a new data entry and a reader advances the tail when it observes a
  44. * new entry.
  45. *
  46. * Writers are made to wait on readers until there's space to write a new
  47. * entry.
  48. *
  49. * This means that we can always use an {offset, mul} pair to compute a ns
  50. * value that is 'roughly' in the right direction, even if we're writing a new
  51. * {offset, mul} pair during the clock read.
  52. *
  53. * The down-side is that we can no longer guarantee strict monotonicity anymore
  54. * (assuming the TSC was that to begin with), because while we compute the
  55. * intersection point of the two clock slopes and make sure the time is
  56. * continuous at the point of switching; we can no longer guarantee a reader is
  57. * strictly before or after the switch point.
  58. *
  59. * It does mean a reader no longer needs to disable IRQs in order to avoid
  60. * CPU-Freq updates messing with his times, and similarly an NMI reader will
  61. * no longer run the risk of hitting half-written state.
  62. */
  63. struct cyc2ns {
  64. struct cyc2ns_data data[2]; /* 0 + 2*24 = 48 */
  65. struct cyc2ns_data *head; /* 48 + 8 = 56 */
  66. struct cyc2ns_data *tail; /* 56 + 8 = 64 */
  67. }; /* exactly fits one cacheline */
  68. static DEFINE_PER_CPU_ALIGNED(struct cyc2ns, cyc2ns);
  69. struct cyc2ns_data *cyc2ns_read_begin(void)
  70. {
  71. struct cyc2ns_data *head;
  72. preempt_disable();
  73. head = this_cpu_read(cyc2ns.head);
  74. /*
  75. * Ensure we observe the entry when we observe the pointer to it.
  76. * matches the wmb from cyc2ns_write_end().
  77. */
  78. smp_read_barrier_depends();
  79. head->__count++;
  80. barrier();
  81. return head;
  82. }
  83. void cyc2ns_read_end(struct cyc2ns_data *head)
  84. {
  85. barrier();
  86. /*
  87. * If we're the outer most nested read; update the tail pointer
  88. * when we're done. This notifies possible pending writers
  89. * that we've observed the head pointer and that the other
  90. * entry is now free.
  91. */
  92. if (!--head->__count) {
  93. /*
  94. * x86-TSO does not reorder writes with older reads;
  95. * therefore once this write becomes visible to another
  96. * cpu, we must be finished reading the cyc2ns_data.
  97. *
  98. * matches with cyc2ns_write_begin().
  99. */
  100. this_cpu_write(cyc2ns.tail, head);
  101. }
  102. preempt_enable();
  103. }
  104. /*
  105. * Begin writing a new @data entry for @cpu.
  106. *
  107. * Assumes some sort of write side lock; currently 'provided' by the assumption
  108. * that cpufreq will call its notifiers sequentially.
  109. */
  110. static struct cyc2ns_data *cyc2ns_write_begin(int cpu)
  111. {
  112. struct cyc2ns *c2n = &per_cpu(cyc2ns, cpu);
  113. struct cyc2ns_data *data = c2n->data;
  114. if (data == c2n->head)
  115. data++;
  116. /* XXX send an IPI to @cpu in order to guarantee a read? */
  117. /*
  118. * When we observe the tail write from cyc2ns_read_end(),
  119. * the cpu must be done with that entry and its safe
  120. * to start writing to it.
  121. */
  122. while (c2n->tail == data)
  123. cpu_relax();
  124. return data;
  125. }
  126. static void cyc2ns_write_end(int cpu, struct cyc2ns_data *data)
  127. {
  128. struct cyc2ns *c2n = &per_cpu(cyc2ns, cpu);
  129. /*
  130. * Ensure the @data writes are visible before we publish the
  131. * entry. Matches the data-depencency in cyc2ns_read_begin().
  132. */
  133. smp_wmb();
  134. ACCESS_ONCE(c2n->head) = data;
  135. }
  136. /*
  137. * Accelerators for sched_clock()
  138. * convert from cycles(64bits) => nanoseconds (64bits)
  139. * basic equation:
  140. * ns = cycles / (freq / ns_per_sec)
  141. * ns = cycles * (ns_per_sec / freq)
  142. * ns = cycles * (10^9 / (cpu_khz * 10^3))
  143. * ns = cycles * (10^6 / cpu_khz)
  144. *
  145. * Then we use scaling math (suggested by george@mvista.com) to get:
  146. * ns = cycles * (10^6 * SC / cpu_khz) / SC
  147. * ns = cycles * cyc2ns_scale / SC
  148. *
  149. * And since SC is a constant power of two, we can convert the div
  150. * into a shift. The larger SC is, the more accurate the conversion, but
  151. * cyc2ns_scale needs to be a 32-bit value so that 32-bit multiplication
  152. * (64-bit result) can be used.
  153. *
  154. * We can use khz divisor instead of mhz to keep a better precision.
  155. * (mathieu.desnoyers@polymtl.ca)
  156. *
  157. * -johnstul@us.ibm.com "math is hard, lets go shopping!"
  158. */
  159. static void cyc2ns_data_init(struct cyc2ns_data *data)
  160. {
  161. data->cyc2ns_mul = 0;
  162. data->cyc2ns_shift = 0;
  163. data->cyc2ns_offset = 0;
  164. data->__count = 0;
  165. }
  166. static void cyc2ns_init(int cpu)
  167. {
  168. struct cyc2ns *c2n = &per_cpu(cyc2ns, cpu);
  169. cyc2ns_data_init(&c2n->data[0]);
  170. cyc2ns_data_init(&c2n->data[1]);
  171. c2n->head = c2n->data;
  172. c2n->tail = c2n->data;
  173. }
  174. static inline unsigned long long cycles_2_ns(unsigned long long cyc)
  175. {
  176. struct cyc2ns_data *data, *tail;
  177. unsigned long long ns;
  178. /*
  179. * See cyc2ns_read_*() for details; replicated in order to avoid
  180. * an extra few instructions that came with the abstraction.
  181. * Notable, it allows us to only do the __count and tail update
  182. * dance when its actually needed.
  183. */
  184. preempt_disable_notrace();
  185. data = this_cpu_read(cyc2ns.head);
  186. tail = this_cpu_read(cyc2ns.tail);
  187. if (likely(data == tail)) {
  188. ns = data->cyc2ns_offset;
  189. ns += mul_u64_u32_shr(cyc, data->cyc2ns_mul, data->cyc2ns_shift);
  190. } else {
  191. data->__count++;
  192. barrier();
  193. ns = data->cyc2ns_offset;
  194. ns += mul_u64_u32_shr(cyc, data->cyc2ns_mul, data->cyc2ns_shift);
  195. barrier();
  196. if (!--data->__count)
  197. this_cpu_write(cyc2ns.tail, data);
  198. }
  199. preempt_enable_notrace();
  200. return ns;
  201. }
  202. static void set_cyc2ns_scale(unsigned long cpu_khz, int cpu)
  203. {
  204. unsigned long long tsc_now, ns_now;
  205. struct cyc2ns_data *data;
  206. unsigned long flags;
  207. local_irq_save(flags);
  208. sched_clock_idle_sleep_event();
  209. if (!cpu_khz)
  210. goto done;
  211. data = cyc2ns_write_begin(cpu);
  212. tsc_now = rdtsc();
  213. ns_now = cycles_2_ns(tsc_now);
  214. /*
  215. * Compute a new multiplier as per the above comment and ensure our
  216. * time function is continuous; see the comment near struct
  217. * cyc2ns_data.
  218. */
  219. clocks_calc_mult_shift(&data->cyc2ns_mul, &data->cyc2ns_shift, cpu_khz,
  220. NSEC_PER_MSEC, 0);
  221. /*
  222. * cyc2ns_shift is exported via arch_perf_update_userpage() where it is
  223. * not expected to be greater than 31 due to the original published
  224. * conversion algorithm shifting a 32-bit value (now specifies a 64-bit
  225. * value) - refer perf_event_mmap_page documentation in perf_event.h.
  226. */
  227. if (data->cyc2ns_shift == 32) {
  228. data->cyc2ns_shift = 31;
  229. data->cyc2ns_mul >>= 1;
  230. }
  231. data->cyc2ns_offset = ns_now -
  232. mul_u64_u32_shr(tsc_now, data->cyc2ns_mul, data->cyc2ns_shift);
  233. cyc2ns_write_end(cpu, data);
  234. done:
  235. sched_clock_idle_wakeup_event(0);
  236. local_irq_restore(flags);
  237. }
  238. /*
  239. * Scheduler clock - returns current time in nanosec units.
  240. */
  241. u64 native_sched_clock(void)
  242. {
  243. if (static_branch_likely(&__use_tsc)) {
  244. u64 tsc_now = rdtsc();
  245. /* return the value in ns */
  246. return cycles_2_ns(tsc_now);
  247. }
  248. /*
  249. * Fall back to jiffies if there's no TSC available:
  250. * ( But note that we still use it if the TSC is marked
  251. * unstable. We do this because unlike Time Of Day,
  252. * the scheduler clock tolerates small errors and it's
  253. * very important for it to be as fast as the platform
  254. * can achieve it. )
  255. */
  256. /* No locking but a rare wrong value is not a big deal: */
  257. return (jiffies_64 - INITIAL_JIFFIES) * (1000000000 / HZ);
  258. }
  259. /*
  260. * Generate a sched_clock if you already have a TSC value.
  261. */
  262. u64 native_sched_clock_from_tsc(u64 tsc)
  263. {
  264. return cycles_2_ns(tsc);
  265. }
  266. /* We need to define a real function for sched_clock, to override the
  267. weak default version */
  268. #ifdef CONFIG_PARAVIRT
  269. unsigned long long sched_clock(void)
  270. {
  271. return paravirt_sched_clock();
  272. }
  273. #else
  274. unsigned long long
  275. sched_clock(void) __attribute__((alias("native_sched_clock")));
  276. #endif
  277. int check_tsc_unstable(void)
  278. {
  279. return tsc_unstable;
  280. }
  281. EXPORT_SYMBOL_GPL(check_tsc_unstable);
  282. int check_tsc_disabled(void)
  283. {
  284. return tsc_disabled;
  285. }
  286. EXPORT_SYMBOL_GPL(check_tsc_disabled);
  287. #ifdef CONFIG_X86_TSC
  288. int __init notsc_setup(char *str)
  289. {
  290. pr_warn("Kernel compiled with CONFIG_X86_TSC, cannot disable TSC completely\n");
  291. tsc_disabled = 1;
  292. return 1;
  293. }
  294. #else
  295. /*
  296. * disable flag for tsc. Takes effect by clearing the TSC cpu flag
  297. * in cpu/common.c
  298. */
  299. int __init notsc_setup(char *str)
  300. {
  301. setup_clear_cpu_cap(X86_FEATURE_TSC);
  302. return 1;
  303. }
  304. #endif
  305. __setup("notsc", notsc_setup);
  306. static int no_sched_irq_time;
  307. static int __init tsc_setup(char *str)
  308. {
  309. if (!strcmp(str, "reliable"))
  310. tsc_clocksource_reliable = 1;
  311. if (!strncmp(str, "noirqtime", 9))
  312. no_sched_irq_time = 1;
  313. return 1;
  314. }
  315. __setup("tsc=", tsc_setup);
  316. #define MAX_RETRIES 5
  317. #define SMI_TRESHOLD 50000
  318. /*
  319. * Read TSC and the reference counters. Take care of SMI disturbance
  320. */
  321. static u64 tsc_read_refs(u64 *p, int hpet)
  322. {
  323. u64 t1, t2;
  324. int i;
  325. for (i = 0; i < MAX_RETRIES; i++) {
  326. t1 = get_cycles();
  327. if (hpet)
  328. *p = hpet_readl(HPET_COUNTER) & 0xFFFFFFFF;
  329. else
  330. *p = acpi_pm_read_early();
  331. t2 = get_cycles();
  332. if ((t2 - t1) < SMI_TRESHOLD)
  333. return t2;
  334. }
  335. return ULLONG_MAX;
  336. }
  337. /*
  338. * Calculate the TSC frequency from HPET reference
  339. */
  340. static unsigned long calc_hpet_ref(u64 deltatsc, u64 hpet1, u64 hpet2)
  341. {
  342. u64 tmp;
  343. if (hpet2 < hpet1)
  344. hpet2 += 0x100000000ULL;
  345. hpet2 -= hpet1;
  346. tmp = ((u64)hpet2 * hpet_readl(HPET_PERIOD));
  347. do_div(tmp, 1000000);
  348. do_div(deltatsc, tmp);
  349. return (unsigned long) deltatsc;
  350. }
  351. /*
  352. * Calculate the TSC frequency from PMTimer reference
  353. */
  354. static unsigned long calc_pmtimer_ref(u64 deltatsc, u64 pm1, u64 pm2)
  355. {
  356. u64 tmp;
  357. if (!pm1 && !pm2)
  358. return ULONG_MAX;
  359. if (pm2 < pm1)
  360. pm2 += (u64)ACPI_PM_OVRRUN;
  361. pm2 -= pm1;
  362. tmp = pm2 * 1000000000LL;
  363. do_div(tmp, PMTMR_TICKS_PER_SEC);
  364. do_div(deltatsc, tmp);
  365. return (unsigned long) deltatsc;
  366. }
  367. #define CAL_MS 10
  368. #define CAL_LATCH (PIT_TICK_RATE / (1000 / CAL_MS))
  369. #define CAL_PIT_LOOPS 1000
  370. #define CAL2_MS 50
  371. #define CAL2_LATCH (PIT_TICK_RATE / (1000 / CAL2_MS))
  372. #define CAL2_PIT_LOOPS 5000
  373. /*
  374. * Try to calibrate the TSC against the Programmable
  375. * Interrupt Timer and return the frequency of the TSC
  376. * in kHz.
  377. *
  378. * Return ULONG_MAX on failure to calibrate.
  379. */
  380. static unsigned long pit_calibrate_tsc(u32 latch, unsigned long ms, int loopmin)
  381. {
  382. u64 tsc, t1, t2, delta;
  383. unsigned long tscmin, tscmax;
  384. int pitcnt;
  385. /* Set the Gate high, disable speaker */
  386. outb((inb(0x61) & ~0x02) | 0x01, 0x61);
  387. /*
  388. * Setup CTC channel 2* for mode 0, (interrupt on terminal
  389. * count mode), binary count. Set the latch register to 50ms
  390. * (LSB then MSB) to begin countdown.
  391. */
  392. outb(0xb0, 0x43);
  393. outb(latch & 0xff, 0x42);
  394. outb(latch >> 8, 0x42);
  395. tsc = t1 = t2 = get_cycles();
  396. pitcnt = 0;
  397. tscmax = 0;
  398. tscmin = ULONG_MAX;
  399. while ((inb(0x61) & 0x20) == 0) {
  400. t2 = get_cycles();
  401. delta = t2 - tsc;
  402. tsc = t2;
  403. if ((unsigned long) delta < tscmin)
  404. tscmin = (unsigned int) delta;
  405. if ((unsigned long) delta > tscmax)
  406. tscmax = (unsigned int) delta;
  407. pitcnt++;
  408. }
  409. /*
  410. * Sanity checks:
  411. *
  412. * If we were not able to read the PIT more than loopmin
  413. * times, then we have been hit by a massive SMI
  414. *
  415. * If the maximum is 10 times larger than the minimum,
  416. * then we got hit by an SMI as well.
  417. */
  418. if (pitcnt < loopmin || tscmax > 10 * tscmin)
  419. return ULONG_MAX;
  420. /* Calculate the PIT value */
  421. delta = t2 - t1;
  422. do_div(delta, ms);
  423. return delta;
  424. }
  425. /*
  426. * This reads the current MSB of the PIT counter, and
  427. * checks if we are running on sufficiently fast and
  428. * non-virtualized hardware.
  429. *
  430. * Our expectations are:
  431. *
  432. * - the PIT is running at roughly 1.19MHz
  433. *
  434. * - each IO is going to take about 1us on real hardware,
  435. * but we allow it to be much faster (by a factor of 10) or
  436. * _slightly_ slower (ie we allow up to a 2us read+counter
  437. * update - anything else implies a unacceptably slow CPU
  438. * or PIT for the fast calibration to work.
  439. *
  440. * - with 256 PIT ticks to read the value, we have 214us to
  441. * see the same MSB (and overhead like doing a single TSC
  442. * read per MSB value etc).
  443. *
  444. * - We're doing 2 reads per loop (LSB, MSB), and we expect
  445. * them each to take about a microsecond on real hardware.
  446. * So we expect a count value of around 100. But we'll be
  447. * generous, and accept anything over 50.
  448. *
  449. * - if the PIT is stuck, and we see *many* more reads, we
  450. * return early (and the next caller of pit_expect_msb()
  451. * then consider it a failure when they don't see the
  452. * next expected value).
  453. *
  454. * These expectations mean that we know that we have seen the
  455. * transition from one expected value to another with a fairly
  456. * high accuracy, and we didn't miss any events. We can thus
  457. * use the TSC value at the transitions to calculate a pretty
  458. * good value for the TSC frequencty.
  459. */
  460. static inline int pit_verify_msb(unsigned char val)
  461. {
  462. /* Ignore LSB */
  463. inb(0x42);
  464. return inb(0x42) == val;
  465. }
  466. static inline int pit_expect_msb(unsigned char val, u64 *tscp, unsigned long *deltap)
  467. {
  468. int count;
  469. u64 tsc = 0, prev_tsc = 0;
  470. for (count = 0; count < 50000; count++) {
  471. if (!pit_verify_msb(val))
  472. break;
  473. prev_tsc = tsc;
  474. tsc = get_cycles();
  475. }
  476. *deltap = get_cycles() - prev_tsc;
  477. *tscp = tsc;
  478. /*
  479. * We require _some_ success, but the quality control
  480. * will be based on the error terms on the TSC values.
  481. */
  482. return count > 5;
  483. }
  484. /*
  485. * How many MSB values do we want to see? We aim for
  486. * a maximum error rate of 500ppm (in practice the
  487. * real error is much smaller), but refuse to spend
  488. * more than 50ms on it.
  489. */
  490. #define MAX_QUICK_PIT_MS 50
  491. #define MAX_QUICK_PIT_ITERATIONS (MAX_QUICK_PIT_MS * PIT_TICK_RATE / 1000 / 256)
  492. static unsigned long quick_pit_calibrate(void)
  493. {
  494. int i;
  495. u64 tsc, delta;
  496. unsigned long d1, d2;
  497. /* Set the Gate high, disable speaker */
  498. outb((inb(0x61) & ~0x02) | 0x01, 0x61);
  499. /*
  500. * Counter 2, mode 0 (one-shot), binary count
  501. *
  502. * NOTE! Mode 2 decrements by two (and then the
  503. * output is flipped each time, giving the same
  504. * final output frequency as a decrement-by-one),
  505. * so mode 0 is much better when looking at the
  506. * individual counts.
  507. */
  508. outb(0xb0, 0x43);
  509. /* Start at 0xffff */
  510. outb(0xff, 0x42);
  511. outb(0xff, 0x42);
  512. /*
  513. * The PIT starts counting at the next edge, so we
  514. * need to delay for a microsecond. The easiest way
  515. * to do that is to just read back the 16-bit counter
  516. * once from the PIT.
  517. */
  518. pit_verify_msb(0);
  519. if (pit_expect_msb(0xff, &tsc, &d1)) {
  520. for (i = 1; i <= MAX_QUICK_PIT_ITERATIONS; i++) {
  521. if (!pit_expect_msb(0xff-i, &delta, &d2))
  522. break;
  523. delta -= tsc;
  524. /*
  525. * Extrapolate the error and fail fast if the error will
  526. * never be below 500 ppm.
  527. */
  528. if (i == 1 &&
  529. d1 + d2 >= (delta * MAX_QUICK_PIT_ITERATIONS) >> 11)
  530. return 0;
  531. /*
  532. * Iterate until the error is less than 500 ppm
  533. */
  534. if (d1+d2 >= delta >> 11)
  535. continue;
  536. /*
  537. * Check the PIT one more time to verify that
  538. * all TSC reads were stable wrt the PIT.
  539. *
  540. * This also guarantees serialization of the
  541. * last cycle read ('d2') in pit_expect_msb.
  542. */
  543. if (!pit_verify_msb(0xfe - i))
  544. break;
  545. goto success;
  546. }
  547. }
  548. pr_info("Fast TSC calibration failed\n");
  549. return 0;
  550. success:
  551. /*
  552. * Ok, if we get here, then we've seen the
  553. * MSB of the PIT decrement 'i' times, and the
  554. * error has shrunk to less than 500 ppm.
  555. *
  556. * As a result, we can depend on there not being
  557. * any odd delays anywhere, and the TSC reads are
  558. * reliable (within the error).
  559. *
  560. * kHz = ticks / time-in-seconds / 1000;
  561. * kHz = (t2 - t1) / (I * 256 / PIT_TICK_RATE) / 1000
  562. * kHz = ((t2 - t1) * PIT_TICK_RATE) / (I * 256 * 1000)
  563. */
  564. delta *= PIT_TICK_RATE;
  565. do_div(delta, i*256*1000);
  566. pr_info("Fast TSC calibration using PIT\n");
  567. return delta;
  568. }
  569. /**
  570. * native_calibrate_tsc - calibrate the tsc on boot
  571. */
  572. unsigned long native_calibrate_tsc(void)
  573. {
  574. u64 tsc1, tsc2, delta, ref1, ref2;
  575. unsigned long tsc_pit_min = ULONG_MAX, tsc_ref_min = ULONG_MAX;
  576. unsigned long flags, latch, ms, fast_calibrate;
  577. int hpet = is_hpet_enabled(), i, loopmin;
  578. /* Calibrate TSC using MSR for Intel Atom SoCs */
  579. local_irq_save(flags);
  580. fast_calibrate = try_msr_calibrate_tsc();
  581. local_irq_restore(flags);
  582. if (fast_calibrate)
  583. return fast_calibrate;
  584. local_irq_save(flags);
  585. fast_calibrate = quick_pit_calibrate();
  586. local_irq_restore(flags);
  587. if (fast_calibrate)
  588. return fast_calibrate;
  589. /*
  590. * Run 5 calibration loops to get the lowest frequency value
  591. * (the best estimate). We use two different calibration modes
  592. * here:
  593. *
  594. * 1) PIT loop. We set the PIT Channel 2 to oneshot mode and
  595. * load a timeout of 50ms. We read the time right after we
  596. * started the timer and wait until the PIT count down reaches
  597. * zero. In each wait loop iteration we read the TSC and check
  598. * the delta to the previous read. We keep track of the min
  599. * and max values of that delta. The delta is mostly defined
  600. * by the IO time of the PIT access, so we can detect when a
  601. * SMI/SMM disturbance happened between the two reads. If the
  602. * maximum time is significantly larger than the minimum time,
  603. * then we discard the result and have another try.
  604. *
  605. * 2) Reference counter. If available we use the HPET or the
  606. * PMTIMER as a reference to check the sanity of that value.
  607. * We use separate TSC readouts and check inside of the
  608. * reference read for a SMI/SMM disturbance. We dicard
  609. * disturbed values here as well. We do that around the PIT
  610. * calibration delay loop as we have to wait for a certain
  611. * amount of time anyway.
  612. */
  613. /* Preset PIT loop values */
  614. latch = CAL_LATCH;
  615. ms = CAL_MS;
  616. loopmin = CAL_PIT_LOOPS;
  617. for (i = 0; i < 3; i++) {
  618. unsigned long tsc_pit_khz;
  619. /*
  620. * Read the start value and the reference count of
  621. * hpet/pmtimer when available. Then do the PIT
  622. * calibration, which will take at least 50ms, and
  623. * read the end value.
  624. */
  625. local_irq_save(flags);
  626. tsc1 = tsc_read_refs(&ref1, hpet);
  627. tsc_pit_khz = pit_calibrate_tsc(latch, ms, loopmin);
  628. tsc2 = tsc_read_refs(&ref2, hpet);
  629. local_irq_restore(flags);
  630. /* Pick the lowest PIT TSC calibration so far */
  631. tsc_pit_min = min(tsc_pit_min, tsc_pit_khz);
  632. /* hpet or pmtimer available ? */
  633. if (ref1 == ref2)
  634. continue;
  635. /* Check, whether the sampling was disturbed by an SMI */
  636. if (tsc1 == ULLONG_MAX || tsc2 == ULLONG_MAX)
  637. continue;
  638. tsc2 = (tsc2 - tsc1) * 1000000LL;
  639. if (hpet)
  640. tsc2 = calc_hpet_ref(tsc2, ref1, ref2);
  641. else
  642. tsc2 = calc_pmtimer_ref(tsc2, ref1, ref2);
  643. tsc_ref_min = min(tsc_ref_min, (unsigned long) tsc2);
  644. /* Check the reference deviation */
  645. delta = ((u64) tsc_pit_min) * 100;
  646. do_div(delta, tsc_ref_min);
  647. /*
  648. * If both calibration results are inside a 10% window
  649. * then we can be sure, that the calibration
  650. * succeeded. We break out of the loop right away. We
  651. * use the reference value, as it is more precise.
  652. */
  653. if (delta >= 90 && delta <= 110) {
  654. pr_info("PIT calibration matches %s. %d loops\n",
  655. hpet ? "HPET" : "PMTIMER", i + 1);
  656. return tsc_ref_min;
  657. }
  658. /*
  659. * Check whether PIT failed more than once. This
  660. * happens in virtualized environments. We need to
  661. * give the virtual PC a slightly longer timeframe for
  662. * the HPET/PMTIMER to make the result precise.
  663. */
  664. if (i == 1 && tsc_pit_min == ULONG_MAX) {
  665. latch = CAL2_LATCH;
  666. ms = CAL2_MS;
  667. loopmin = CAL2_PIT_LOOPS;
  668. }
  669. }
  670. /*
  671. * Now check the results.
  672. */
  673. if (tsc_pit_min == ULONG_MAX) {
  674. /* PIT gave no useful value */
  675. pr_warn("Unable to calibrate against PIT\n");
  676. /* We don't have an alternative source, disable TSC */
  677. if (!hpet && !ref1 && !ref2) {
  678. pr_notice("No reference (HPET/PMTIMER) available\n");
  679. return 0;
  680. }
  681. /* The alternative source failed as well, disable TSC */
  682. if (tsc_ref_min == ULONG_MAX) {
  683. pr_warn("HPET/PMTIMER calibration failed\n");
  684. return 0;
  685. }
  686. /* Use the alternative source */
  687. pr_info("using %s reference calibration\n",
  688. hpet ? "HPET" : "PMTIMER");
  689. return tsc_ref_min;
  690. }
  691. /* We don't have an alternative source, use the PIT calibration value */
  692. if (!hpet && !ref1 && !ref2) {
  693. pr_info("Using PIT calibration value\n");
  694. return tsc_pit_min;
  695. }
  696. /* The alternative source failed, use the PIT calibration value */
  697. if (tsc_ref_min == ULONG_MAX) {
  698. pr_warn("HPET/PMTIMER calibration failed. Using PIT calibration.\n");
  699. return tsc_pit_min;
  700. }
  701. /*
  702. * The calibration values differ too much. In doubt, we use
  703. * the PIT value as we know that there are PMTIMERs around
  704. * running at double speed. At least we let the user know:
  705. */
  706. pr_warn("PIT calibration deviates from %s: %lu %lu\n",
  707. hpet ? "HPET" : "PMTIMER", tsc_pit_min, tsc_ref_min);
  708. pr_info("Using PIT calibration value\n");
  709. return tsc_pit_min;
  710. }
  711. int recalibrate_cpu_khz(void)
  712. {
  713. #ifndef CONFIG_SMP
  714. unsigned long cpu_khz_old = cpu_khz;
  715. if (cpu_has_tsc) {
  716. tsc_khz = x86_platform.calibrate_tsc();
  717. cpu_khz = tsc_khz;
  718. cpu_data(0).loops_per_jiffy =
  719. cpufreq_scale(cpu_data(0).loops_per_jiffy,
  720. cpu_khz_old, cpu_khz);
  721. return 0;
  722. } else
  723. return -ENODEV;
  724. #else
  725. return -ENODEV;
  726. #endif
  727. }
  728. EXPORT_SYMBOL(recalibrate_cpu_khz);
  729. static unsigned long long cyc2ns_suspend;
  730. void tsc_save_sched_clock_state(void)
  731. {
  732. if (!sched_clock_stable())
  733. return;
  734. cyc2ns_suspend = sched_clock();
  735. }
  736. /*
  737. * Even on processors with invariant TSC, TSC gets reset in some the
  738. * ACPI system sleep states. And in some systems BIOS seem to reinit TSC to
  739. * arbitrary value (still sync'd across cpu's) during resume from such sleep
  740. * states. To cope up with this, recompute the cyc2ns_offset for each cpu so
  741. * that sched_clock() continues from the point where it was left off during
  742. * suspend.
  743. */
  744. void tsc_restore_sched_clock_state(void)
  745. {
  746. unsigned long long offset;
  747. unsigned long flags;
  748. int cpu;
  749. if (!sched_clock_stable())
  750. return;
  751. local_irq_save(flags);
  752. /*
  753. * We're coming out of suspend, there's no concurrency yet; don't
  754. * bother being nice about the RCU stuff, just write to both
  755. * data fields.
  756. */
  757. this_cpu_write(cyc2ns.data[0].cyc2ns_offset, 0);
  758. this_cpu_write(cyc2ns.data[1].cyc2ns_offset, 0);
  759. offset = cyc2ns_suspend - sched_clock();
  760. for_each_possible_cpu(cpu) {
  761. per_cpu(cyc2ns.data[0].cyc2ns_offset, cpu) = offset;
  762. per_cpu(cyc2ns.data[1].cyc2ns_offset, cpu) = offset;
  763. }
  764. local_irq_restore(flags);
  765. }
  766. #ifdef CONFIG_CPU_FREQ
  767. /* Frequency scaling support. Adjust the TSC based timer when the cpu frequency
  768. * changes.
  769. *
  770. * RED-PEN: On SMP we assume all CPUs run with the same frequency. It's
  771. * not that important because current Opteron setups do not support
  772. * scaling on SMP anyroads.
  773. *
  774. * Should fix up last_tsc too. Currently gettimeofday in the
  775. * first tick after the change will be slightly wrong.
  776. */
  777. static unsigned int ref_freq;
  778. static unsigned long loops_per_jiffy_ref;
  779. static unsigned long tsc_khz_ref;
  780. static int time_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
  781. void *data)
  782. {
  783. struct cpufreq_freqs *freq = data;
  784. unsigned long *lpj;
  785. if (cpu_has(&cpu_data(freq->cpu), X86_FEATURE_CONSTANT_TSC))
  786. return 0;
  787. lpj = &boot_cpu_data.loops_per_jiffy;
  788. #ifdef CONFIG_SMP
  789. if (!(freq->flags & CPUFREQ_CONST_LOOPS))
  790. lpj = &cpu_data(freq->cpu).loops_per_jiffy;
  791. #endif
  792. if (!ref_freq) {
  793. ref_freq = freq->old;
  794. loops_per_jiffy_ref = *lpj;
  795. tsc_khz_ref = tsc_khz;
  796. }
  797. if ((val == CPUFREQ_PRECHANGE && freq->old < freq->new) ||
  798. (val == CPUFREQ_POSTCHANGE && freq->old > freq->new)) {
  799. *lpj = cpufreq_scale(loops_per_jiffy_ref, ref_freq, freq->new);
  800. tsc_khz = cpufreq_scale(tsc_khz_ref, ref_freq, freq->new);
  801. if (!(freq->flags & CPUFREQ_CONST_LOOPS))
  802. mark_tsc_unstable("cpufreq changes");
  803. set_cyc2ns_scale(tsc_khz, freq->cpu);
  804. }
  805. return 0;
  806. }
  807. static struct notifier_block time_cpufreq_notifier_block = {
  808. .notifier_call = time_cpufreq_notifier
  809. };
  810. static int __init cpufreq_tsc(void)
  811. {
  812. if (!cpu_has_tsc)
  813. return 0;
  814. if (boot_cpu_has(X86_FEATURE_CONSTANT_TSC))
  815. return 0;
  816. cpufreq_register_notifier(&time_cpufreq_notifier_block,
  817. CPUFREQ_TRANSITION_NOTIFIER);
  818. return 0;
  819. }
  820. core_initcall(cpufreq_tsc);
  821. #endif /* CONFIG_CPU_FREQ */
  822. #define ART_CPUID_LEAF (0x15)
  823. #define ART_MIN_DENOMINATOR (1)
  824. /*
  825. * If ART is present detect the numerator:denominator to convert to TSC
  826. */
  827. static void detect_art(void)
  828. {
  829. unsigned int unused[2];
  830. if (boot_cpu_data.cpuid_level < ART_CPUID_LEAF)
  831. return;
  832. cpuid(ART_CPUID_LEAF, &art_to_tsc_denominator,
  833. &art_to_tsc_numerator, unused, unused+1);
  834. /* Don't enable ART in a VM, non-stop TSC required */
  835. if (boot_cpu_has(X86_FEATURE_HYPERVISOR) ||
  836. !boot_cpu_has(X86_FEATURE_NONSTOP_TSC) ||
  837. art_to_tsc_denominator < ART_MIN_DENOMINATOR)
  838. return;
  839. if (rdmsrl_safe(MSR_IA32_TSC_ADJUST, &art_to_tsc_offset))
  840. return;
  841. /* Make this sticky over multiple CPU init calls */
  842. setup_force_cpu_cap(X86_FEATURE_ART);
  843. }
  844. /* clocksource code */
  845. static struct clocksource clocksource_tsc;
  846. /*
  847. * We used to compare the TSC to the cycle_last value in the clocksource
  848. * structure to avoid a nasty time-warp. This can be observed in a
  849. * very small window right after one CPU updated cycle_last under
  850. * xtime/vsyscall_gtod lock and the other CPU reads a TSC value which
  851. * is smaller than the cycle_last reference value due to a TSC which
  852. * is slighty behind. This delta is nowhere else observable, but in
  853. * that case it results in a forward time jump in the range of hours
  854. * due to the unsigned delta calculation of the time keeping core
  855. * code, which is necessary to support wrapping clocksources like pm
  856. * timer.
  857. *
  858. * This sanity check is now done in the core timekeeping code.
  859. * checking the result of read_tsc() - cycle_last for being negative.
  860. * That works because CLOCKSOURCE_MASK(64) does not mask out any bit.
  861. */
  862. static cycle_t read_tsc(struct clocksource *cs)
  863. {
  864. return (cycle_t)rdtsc_ordered();
  865. }
  866. /*
  867. * .mask MUST be CLOCKSOURCE_MASK(64). See comment above read_tsc()
  868. */
  869. static struct clocksource clocksource_tsc = {
  870. .name = "tsc",
  871. .rating = 300,
  872. .read = read_tsc,
  873. .mask = CLOCKSOURCE_MASK(64),
  874. .flags = CLOCK_SOURCE_IS_CONTINUOUS |
  875. CLOCK_SOURCE_MUST_VERIFY,
  876. .archdata = { .vclock_mode = VCLOCK_TSC },
  877. };
  878. void mark_tsc_unstable(char *reason)
  879. {
  880. if (!tsc_unstable) {
  881. tsc_unstable = 1;
  882. clear_sched_clock_stable();
  883. disable_sched_clock_irqtime();
  884. pr_info("Marking TSC unstable due to %s\n", reason);
  885. /* Change only the rating, when not registered */
  886. if (clocksource_tsc.mult)
  887. clocksource_mark_unstable(&clocksource_tsc);
  888. else {
  889. clocksource_tsc.flags |= CLOCK_SOURCE_UNSTABLE;
  890. clocksource_tsc.rating = 0;
  891. }
  892. }
  893. }
  894. EXPORT_SYMBOL_GPL(mark_tsc_unstable);
  895. static void __init check_system_tsc_reliable(void)
  896. {
  897. #if defined(CONFIG_MGEODEGX1) || defined(CONFIG_MGEODE_LX) || defined(CONFIG_X86_GENERIC)
  898. if (is_geode_lx()) {
  899. /* RTSC counts during suspend */
  900. #define RTSC_SUSP 0x100
  901. unsigned long res_low, res_high;
  902. rdmsr_safe(MSR_GEODE_BUSCONT_CONF0, &res_low, &res_high);
  903. /* Geode_LX - the OLPC CPU has a very reliable TSC */
  904. if (res_low & RTSC_SUSP)
  905. tsc_clocksource_reliable = 1;
  906. }
  907. #endif
  908. if (boot_cpu_has(X86_FEATURE_TSC_RELIABLE))
  909. tsc_clocksource_reliable = 1;
  910. }
  911. /*
  912. * Make an educated guess if the TSC is trustworthy and synchronized
  913. * over all CPUs.
  914. */
  915. int unsynchronized_tsc(void)
  916. {
  917. if (!cpu_has_tsc || tsc_unstable)
  918. return 1;
  919. #ifdef CONFIG_SMP
  920. if (apic_is_clustered_box())
  921. return 1;
  922. #endif
  923. if (boot_cpu_has(X86_FEATURE_CONSTANT_TSC))
  924. return 0;
  925. if (tsc_clocksource_reliable)
  926. return 0;
  927. /*
  928. * Intel systems are normally all synchronized.
  929. * Exceptions must mark TSC as unstable:
  930. */
  931. if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) {
  932. /* assume multi socket systems are not synchronized: */
  933. if (num_possible_cpus() > 1)
  934. return 1;
  935. }
  936. return 0;
  937. }
  938. /*
  939. * Convert ART to TSC given numerator/denominator found in detect_art()
  940. */
  941. struct system_counterval_t convert_art_to_tsc(cycle_t art)
  942. {
  943. u64 tmp, res, rem;
  944. rem = do_div(art, art_to_tsc_denominator);
  945. res = art * art_to_tsc_numerator;
  946. tmp = rem * art_to_tsc_numerator;
  947. do_div(tmp, art_to_tsc_denominator);
  948. res += tmp + art_to_tsc_offset;
  949. return (struct system_counterval_t) {.cs = art_related_clocksource,
  950. .cycles = res};
  951. }
  952. EXPORT_SYMBOL(convert_art_to_tsc);
  953. static void tsc_refine_calibration_work(struct work_struct *work);
  954. static DECLARE_DELAYED_WORK(tsc_irqwork, tsc_refine_calibration_work);
  955. /**
  956. * tsc_refine_calibration_work - Further refine tsc freq calibration
  957. * @work - ignored.
  958. *
  959. * This functions uses delayed work over a period of a
  960. * second to further refine the TSC freq value. Since this is
  961. * timer based, instead of loop based, we don't block the boot
  962. * process while this longer calibration is done.
  963. *
  964. * If there are any calibration anomalies (too many SMIs, etc),
  965. * or the refined calibration is off by 1% of the fast early
  966. * calibration, we throw out the new calibration and use the
  967. * early calibration.
  968. */
  969. static void tsc_refine_calibration_work(struct work_struct *work)
  970. {
  971. static u64 tsc_start = -1, ref_start;
  972. static int hpet;
  973. u64 tsc_stop, ref_stop, delta;
  974. unsigned long freq;
  975. /* Don't bother refining TSC on unstable systems */
  976. if (check_tsc_unstable())
  977. goto out;
  978. /*
  979. * Since the work is started early in boot, we may be
  980. * delayed the first time we expire. So set the workqueue
  981. * again once we know timers are working.
  982. */
  983. if (tsc_start == -1) {
  984. /*
  985. * Only set hpet once, to avoid mixing hardware
  986. * if the hpet becomes enabled later.
  987. */
  988. hpet = is_hpet_enabled();
  989. schedule_delayed_work(&tsc_irqwork, HZ);
  990. tsc_start = tsc_read_refs(&ref_start, hpet);
  991. return;
  992. }
  993. tsc_stop = tsc_read_refs(&ref_stop, hpet);
  994. /* hpet or pmtimer available ? */
  995. if (ref_start == ref_stop)
  996. goto out;
  997. /* Check, whether the sampling was disturbed by an SMI */
  998. if (tsc_start == ULLONG_MAX || tsc_stop == ULLONG_MAX)
  999. goto out;
  1000. delta = tsc_stop - tsc_start;
  1001. delta *= 1000000LL;
  1002. if (hpet)
  1003. freq = calc_hpet_ref(delta, ref_start, ref_stop);
  1004. else
  1005. freq = calc_pmtimer_ref(delta, ref_start, ref_stop);
  1006. /* Make sure we're within 1% */
  1007. if (abs(tsc_khz - freq) > tsc_khz/100)
  1008. goto out;
  1009. tsc_khz = freq;
  1010. pr_info("Refined TSC clocksource calibration: %lu.%03lu MHz\n",
  1011. (unsigned long)tsc_khz / 1000,
  1012. (unsigned long)tsc_khz % 1000);
  1013. out:
  1014. if (boot_cpu_has(X86_FEATURE_ART))
  1015. art_related_clocksource = &clocksource_tsc;
  1016. clocksource_register_khz(&clocksource_tsc, tsc_khz);
  1017. }
  1018. static int __init init_tsc_clocksource(void)
  1019. {
  1020. if (!cpu_has_tsc || tsc_disabled > 0 || !tsc_khz)
  1021. return 0;
  1022. if (tsc_clocksource_reliable)
  1023. clocksource_tsc.flags &= ~CLOCK_SOURCE_MUST_VERIFY;
  1024. /* lower the rating if we already know its unstable: */
  1025. if (check_tsc_unstable()) {
  1026. clocksource_tsc.rating = 0;
  1027. clocksource_tsc.flags &= ~CLOCK_SOURCE_IS_CONTINUOUS;
  1028. }
  1029. if (boot_cpu_has(X86_FEATURE_NONSTOP_TSC_S3))
  1030. clocksource_tsc.flags |= CLOCK_SOURCE_SUSPEND_NONSTOP;
  1031. /*
  1032. * Trust the results of the earlier calibration on systems
  1033. * exporting a reliable TSC.
  1034. */
  1035. if (boot_cpu_has(X86_FEATURE_TSC_RELIABLE)) {
  1036. clocksource_register_khz(&clocksource_tsc, tsc_khz);
  1037. return 0;
  1038. }
  1039. schedule_delayed_work(&tsc_irqwork, 0);
  1040. return 0;
  1041. }
  1042. /*
  1043. * We use device_initcall here, to ensure we run after the hpet
  1044. * is fully initialized, which may occur at fs_initcall time.
  1045. */
  1046. device_initcall(init_tsc_clocksource);
  1047. void __init tsc_init(void)
  1048. {
  1049. u64 lpj;
  1050. int cpu;
  1051. if (!cpu_has_tsc) {
  1052. setup_clear_cpu_cap(X86_FEATURE_TSC_DEADLINE_TIMER);
  1053. return;
  1054. }
  1055. tsc_khz = x86_platform.calibrate_tsc();
  1056. cpu_khz = tsc_khz;
  1057. if (!tsc_khz) {
  1058. mark_tsc_unstable("could not calculate TSC khz");
  1059. setup_clear_cpu_cap(X86_FEATURE_TSC_DEADLINE_TIMER);
  1060. return;
  1061. }
  1062. pr_info("Detected %lu.%03lu MHz processor\n",
  1063. (unsigned long)cpu_khz / 1000,
  1064. (unsigned long)cpu_khz % 1000);
  1065. /*
  1066. * Secondary CPUs do not run through tsc_init(), so set up
  1067. * all the scale factors for all CPUs, assuming the same
  1068. * speed as the bootup CPU. (cpufreq notifiers will fix this
  1069. * up if their speed diverges)
  1070. */
  1071. for_each_possible_cpu(cpu) {
  1072. cyc2ns_init(cpu);
  1073. set_cyc2ns_scale(cpu_khz, cpu);
  1074. }
  1075. if (tsc_disabled > 0)
  1076. return;
  1077. /* now allow native_sched_clock() to use rdtsc */
  1078. tsc_disabled = 0;
  1079. static_branch_enable(&__use_tsc);
  1080. if (!no_sched_irq_time)
  1081. enable_sched_clock_irqtime();
  1082. lpj = ((u64)tsc_khz * 1000);
  1083. do_div(lpj, HZ);
  1084. lpj_fine = lpj;
  1085. use_tsc_delay();
  1086. if (unsynchronized_tsc())
  1087. mark_tsc_unstable("TSCs unsynchronized");
  1088. check_system_tsc_reliable();
  1089. detect_art();
  1090. }
  1091. #ifdef CONFIG_SMP
  1092. /*
  1093. * If we have a constant TSC and are using the TSC for the delay loop,
  1094. * we can skip clock calibration if another cpu in the same socket has already
  1095. * been calibrated. This assumes that CONSTANT_TSC applies to all
  1096. * cpus in the socket - this should be a safe assumption.
  1097. */
  1098. unsigned long calibrate_delay_is_known(void)
  1099. {
  1100. int sibling, cpu = smp_processor_id();
  1101. struct cpumask *mask = topology_core_cpumask(cpu);
  1102. if (!tsc_disabled && !cpu_has(&cpu_data(cpu), X86_FEATURE_CONSTANT_TSC))
  1103. return 0;
  1104. if (!mask)
  1105. return 0;
  1106. sibling = cpumask_any_but(mask, cpu);
  1107. if (sibling < nr_cpu_ids)
  1108. return cpu_data(sibling).loops_per_jiffy;
  1109. return 0;
  1110. }
  1111. #endif