timer.c 49 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879
  1. /*
  2. * linux/kernel/timer.c
  3. *
  4. * Kernel internal timers, kernel timekeeping, basic process system calls
  5. *
  6. * Copyright (C) 1991, 1992 Linus Torvalds
  7. *
  8. * 1997-01-28 Modified by Finn Arne Gangstad to make timers scale better.
  9. *
  10. * 1997-09-10 Updated NTP code according to technical memorandum Jan '96
  11. * "A Kernel Model for Precision Timekeeping" by Dave Mills
  12. * 1998-12-24 Fixed a xtime SMP race (we need the xtime_lock rw spinlock to
  13. * serialize accesses to xtime/lost_ticks).
  14. * Copyright (C) 1998 Andrea Arcangeli
  15. * 1999-03-10 Improved NTP compatibility by Ulrich Windl
  16. * 2002-05-31 Move sys_sysinfo here and make its locking sane, Robert Love
  17. * 2000-10-05 Implemented scalable SMP per-CPU timer handling.
  18. * Copyright (C) 2000, 2001, 2002 Ingo Molnar
  19. * Designed by David S. Miller, Alexey Kuznetsov and Ingo Molnar
  20. */
  21. #include <linux/kernel_stat.h>
  22. #include <linux/module.h>
  23. #include <linux/interrupt.h>
  24. #include <linux/percpu.h>
  25. #include <linux/init.h>
  26. #include <linux/mm.h>
  27. #include <linux/swap.h>
  28. #include <linux/notifier.h>
  29. #include <linux/thread_info.h>
  30. #include <linux/time.h>
  31. #include <linux/jiffies.h>
  32. #include <linux/posix-timers.h>
  33. #include <linux/cpu.h>
  34. #include <linux/syscalls.h>
  35. #include <linux/delay.h>
  36. #include <asm/uaccess.h>
  37. #include <asm/unistd.h>
  38. #include <asm/div64.h>
  39. #include <asm/timex.h>
  40. #include <asm/io.h>
  41. u64 jiffies_64 __cacheline_aligned_in_smp = INITIAL_JIFFIES;
  42. EXPORT_SYMBOL(jiffies_64);
  43. /*
  44. * per-CPU timer vector definitions:
  45. */
  46. #define TVN_BITS (CONFIG_BASE_SMALL ? 4 : 6)
  47. #define TVR_BITS (CONFIG_BASE_SMALL ? 6 : 8)
  48. #define TVN_SIZE (1 << TVN_BITS)
  49. #define TVR_SIZE (1 << TVR_BITS)
  50. #define TVN_MASK (TVN_SIZE - 1)
  51. #define TVR_MASK (TVR_SIZE - 1)
  52. typedef struct tvec_s {
  53. struct list_head vec[TVN_SIZE];
  54. } tvec_t;
  55. typedef struct tvec_root_s {
  56. struct list_head vec[TVR_SIZE];
  57. } tvec_root_t;
  58. struct tvec_t_base_s {
  59. spinlock_t lock;
  60. struct timer_list *running_timer;
  61. unsigned long timer_jiffies;
  62. tvec_root_t tv1;
  63. tvec_t tv2;
  64. tvec_t tv3;
  65. tvec_t tv4;
  66. tvec_t tv5;
  67. } ____cacheline_aligned_in_smp;
  68. typedef struct tvec_t_base_s tvec_base_t;
  69. tvec_base_t boot_tvec_bases;
  70. EXPORT_SYMBOL(boot_tvec_bases);
  71. static DEFINE_PER_CPU(tvec_base_t *, tvec_bases) = &boot_tvec_bases;
  72. /**
  73. * __round_jiffies - function to round jiffies to a full second
  74. * @j: the time in (absolute) jiffies that should be rounded
  75. * @cpu: the processor number on which the timeout will happen
  76. *
  77. * __round_jiffies() rounds an absolute time in the future (in jiffies)
  78. * up or down to (approximately) full seconds. This is useful for timers
  79. * for which the exact time they fire does not matter too much, as long as
  80. * they fire approximately every X seconds.
  81. *
  82. * By rounding these timers to whole seconds, all such timers will fire
  83. * at the same time, rather than at various times spread out. The goal
  84. * of this is to have the CPU wake up less, which saves power.
  85. *
  86. * The exact rounding is skewed for each processor to avoid all
  87. * processors firing at the exact same time, which could lead
  88. * to lock contention or spurious cache line bouncing.
  89. *
  90. * The return value is the rounded version of the @j parameter.
  91. */
  92. unsigned long __round_jiffies(unsigned long j, int cpu)
  93. {
  94. int rem;
  95. unsigned long original = j;
  96. /*
  97. * We don't want all cpus firing their timers at once hitting the
  98. * same lock or cachelines, so we skew each extra cpu with an extra
  99. * 3 jiffies. This 3 jiffies came originally from the mm/ code which
  100. * already did this.
  101. * The skew is done by adding 3*cpunr, then round, then subtract this
  102. * extra offset again.
  103. */
  104. j += cpu * 3;
  105. rem = j % HZ;
  106. /*
  107. * If the target jiffie is just after a whole second (which can happen
  108. * due to delays of the timer irq, long irq off times etc etc) then
  109. * we should round down to the whole second, not up. Use 1/4th second
  110. * as cutoff for this rounding as an extreme upper bound for this.
  111. */
  112. if (rem < HZ/4) /* round down */
  113. j = j - rem;
  114. else /* round up */
  115. j = j - rem + HZ;
  116. /* now that we have rounded, subtract the extra skew again */
  117. j -= cpu * 3;
  118. if (j <= jiffies) /* rounding ate our timeout entirely; */
  119. return original;
  120. return j;
  121. }
  122. EXPORT_SYMBOL_GPL(__round_jiffies);
  123. /**
  124. * __round_jiffies_relative - function to round jiffies to a full second
  125. * @j: the time in (relative) jiffies that should be rounded
  126. * @cpu: the processor number on which the timeout will happen
  127. *
  128. * __round_jiffies_relative() rounds a time delta in the future (in jiffies)
  129. * up or down to (approximately) full seconds. This is useful for timers
  130. * for which the exact time they fire does not matter too much, as long as
  131. * they fire approximately every X seconds.
  132. *
  133. * By rounding these timers to whole seconds, all such timers will fire
  134. * at the same time, rather than at various times spread out. The goal
  135. * of this is to have the CPU wake up less, which saves power.
  136. *
  137. * The exact rounding is skewed for each processor to avoid all
  138. * processors firing at the exact same time, which could lead
  139. * to lock contention or spurious cache line bouncing.
  140. *
  141. * The return value is the rounded version of the @j parameter.
  142. */
  143. unsigned long __round_jiffies_relative(unsigned long j, int cpu)
  144. {
  145. /*
  146. * In theory the following code can skip a jiffy in case jiffies
  147. * increments right between the addition and the later subtraction.
  148. * However since the entire point of this function is to use approximate
  149. * timeouts, it's entirely ok to not handle that.
  150. */
  151. return __round_jiffies(j + jiffies, cpu) - jiffies;
  152. }
  153. EXPORT_SYMBOL_GPL(__round_jiffies_relative);
  154. /**
  155. * round_jiffies - function to round jiffies to a full second
  156. * @j: the time in (absolute) jiffies that should be rounded
  157. *
  158. * round_jiffies() rounds an absolute time in the future (in jiffies)
  159. * up or down to (approximately) full seconds. This is useful for timers
  160. * for which the exact time they fire does not matter too much, as long as
  161. * they fire approximately every X seconds.
  162. *
  163. * By rounding these timers to whole seconds, all such timers will fire
  164. * at the same time, rather than at various times spread out. The goal
  165. * of this is to have the CPU wake up less, which saves power.
  166. *
  167. * The return value is the rounded version of the @j parameter.
  168. */
  169. unsigned long round_jiffies(unsigned long j)
  170. {
  171. return __round_jiffies(j, raw_smp_processor_id());
  172. }
  173. EXPORT_SYMBOL_GPL(round_jiffies);
  174. /**
  175. * round_jiffies_relative - function to round jiffies to a full second
  176. * @j: the time in (relative) jiffies that should be rounded
  177. *
  178. * round_jiffies_relative() rounds a time delta in the future (in jiffies)
  179. * up or down to (approximately) full seconds. This is useful for timers
  180. * for which the exact time they fire does not matter too much, as long as
  181. * they fire approximately every X seconds.
  182. *
  183. * By rounding these timers to whole seconds, all such timers will fire
  184. * at the same time, rather than at various times spread out. The goal
  185. * of this is to have the CPU wake up less, which saves power.
  186. *
  187. * The return value is the rounded version of the @j parameter.
  188. */
  189. unsigned long round_jiffies_relative(unsigned long j)
  190. {
  191. return __round_jiffies_relative(j, raw_smp_processor_id());
  192. }
  193. EXPORT_SYMBOL_GPL(round_jiffies_relative);
  194. static inline void set_running_timer(tvec_base_t *base,
  195. struct timer_list *timer)
  196. {
  197. #ifdef CONFIG_SMP
  198. base->running_timer = timer;
  199. #endif
  200. }
  201. static void internal_add_timer(tvec_base_t *base, struct timer_list *timer)
  202. {
  203. unsigned long expires = timer->expires;
  204. unsigned long idx = expires - base->timer_jiffies;
  205. struct list_head *vec;
  206. if (idx < TVR_SIZE) {
  207. int i = expires & TVR_MASK;
  208. vec = base->tv1.vec + i;
  209. } else if (idx < 1 << (TVR_BITS + TVN_BITS)) {
  210. int i = (expires >> TVR_BITS) & TVN_MASK;
  211. vec = base->tv2.vec + i;
  212. } else if (idx < 1 << (TVR_BITS + 2 * TVN_BITS)) {
  213. int i = (expires >> (TVR_BITS + TVN_BITS)) & TVN_MASK;
  214. vec = base->tv3.vec + i;
  215. } else if (idx < 1 << (TVR_BITS + 3 * TVN_BITS)) {
  216. int i = (expires >> (TVR_BITS + 2 * TVN_BITS)) & TVN_MASK;
  217. vec = base->tv4.vec + i;
  218. } else if ((signed long) idx < 0) {
  219. /*
  220. * Can happen if you add a timer with expires == jiffies,
  221. * or you set a timer to go off in the past
  222. */
  223. vec = base->tv1.vec + (base->timer_jiffies & TVR_MASK);
  224. } else {
  225. int i;
  226. /* If the timeout is larger than 0xffffffff on 64-bit
  227. * architectures then we use the maximum timeout:
  228. */
  229. if (idx > 0xffffffffUL) {
  230. idx = 0xffffffffUL;
  231. expires = idx + base->timer_jiffies;
  232. }
  233. i = (expires >> (TVR_BITS + 3 * TVN_BITS)) & TVN_MASK;
  234. vec = base->tv5.vec + i;
  235. }
  236. /*
  237. * Timers are FIFO:
  238. */
  239. list_add_tail(&timer->entry, vec);
  240. }
  241. /**
  242. * init_timer - initialize a timer.
  243. * @timer: the timer to be initialized
  244. *
  245. * init_timer() must be done to a timer prior calling *any* of the
  246. * other timer functions.
  247. */
  248. void fastcall init_timer(struct timer_list *timer)
  249. {
  250. timer->entry.next = NULL;
  251. timer->base = __raw_get_cpu_var(tvec_bases);
  252. }
  253. EXPORT_SYMBOL(init_timer);
  254. static inline void detach_timer(struct timer_list *timer,
  255. int clear_pending)
  256. {
  257. struct list_head *entry = &timer->entry;
  258. __list_del(entry->prev, entry->next);
  259. if (clear_pending)
  260. entry->next = NULL;
  261. entry->prev = LIST_POISON2;
  262. }
  263. /*
  264. * We are using hashed locking: holding per_cpu(tvec_bases).lock
  265. * means that all timers which are tied to this base via timer->base are
  266. * locked, and the base itself is locked too.
  267. *
  268. * So __run_timers/migrate_timers can safely modify all timers which could
  269. * be found on ->tvX lists.
  270. *
  271. * When the timer's base is locked, and the timer removed from list, it is
  272. * possible to set timer->base = NULL and drop the lock: the timer remains
  273. * locked.
  274. */
  275. static tvec_base_t *lock_timer_base(struct timer_list *timer,
  276. unsigned long *flags)
  277. __acquires(timer->base->lock)
  278. {
  279. tvec_base_t *base;
  280. for (;;) {
  281. base = timer->base;
  282. if (likely(base != NULL)) {
  283. spin_lock_irqsave(&base->lock, *flags);
  284. if (likely(base == timer->base))
  285. return base;
  286. /* The timer has migrated to another CPU */
  287. spin_unlock_irqrestore(&base->lock, *flags);
  288. }
  289. cpu_relax();
  290. }
  291. }
  292. int __mod_timer(struct timer_list *timer, unsigned long expires)
  293. {
  294. tvec_base_t *base, *new_base;
  295. unsigned long flags;
  296. int ret = 0;
  297. BUG_ON(!timer->function);
  298. base = lock_timer_base(timer, &flags);
  299. if (timer_pending(timer)) {
  300. detach_timer(timer, 0);
  301. ret = 1;
  302. }
  303. new_base = __get_cpu_var(tvec_bases);
  304. if (base != new_base) {
  305. /*
  306. * We are trying to schedule the timer on the local CPU.
  307. * However we can't change timer's base while it is running,
  308. * otherwise del_timer_sync() can't detect that the timer's
  309. * handler yet has not finished. This also guarantees that
  310. * the timer is serialized wrt itself.
  311. */
  312. if (likely(base->running_timer != timer)) {
  313. /* See the comment in lock_timer_base() */
  314. timer->base = NULL;
  315. spin_unlock(&base->lock);
  316. base = new_base;
  317. spin_lock(&base->lock);
  318. timer->base = base;
  319. }
  320. }
  321. timer->expires = expires;
  322. internal_add_timer(base, timer);
  323. spin_unlock_irqrestore(&base->lock, flags);
  324. return ret;
  325. }
  326. EXPORT_SYMBOL(__mod_timer);
  327. /**
  328. * add_timer_on - start a timer on a particular CPU
  329. * @timer: the timer to be added
  330. * @cpu: the CPU to start it on
  331. *
  332. * This is not very scalable on SMP. Double adds are not possible.
  333. */
  334. void add_timer_on(struct timer_list *timer, int cpu)
  335. {
  336. tvec_base_t *base = per_cpu(tvec_bases, cpu);
  337. unsigned long flags;
  338. BUG_ON(timer_pending(timer) || !timer->function);
  339. spin_lock_irqsave(&base->lock, flags);
  340. timer->base = base;
  341. internal_add_timer(base, timer);
  342. spin_unlock_irqrestore(&base->lock, flags);
  343. }
  344. /**
  345. * mod_timer - modify a timer's timeout
  346. * @timer: the timer to be modified
  347. * @expires: new timeout in jiffies
  348. *
  349. * mod_timer() is a more efficient way to update the expire field of an
  350. * active timer (if the timer is inactive it will be activated)
  351. *
  352. * mod_timer(timer, expires) is equivalent to:
  353. *
  354. * del_timer(timer); timer->expires = expires; add_timer(timer);
  355. *
  356. * Note that if there are multiple unserialized concurrent users of the
  357. * same timer, then mod_timer() is the only safe way to modify the timeout,
  358. * since add_timer() cannot modify an already running timer.
  359. *
  360. * The function returns whether it has modified a pending timer or not.
  361. * (ie. mod_timer() of an inactive timer returns 0, mod_timer() of an
  362. * active timer returns 1.)
  363. */
  364. int mod_timer(struct timer_list *timer, unsigned long expires)
  365. {
  366. BUG_ON(!timer->function);
  367. /*
  368. * This is a common optimization triggered by the
  369. * networking code - if the timer is re-modified
  370. * to be the same thing then just return:
  371. */
  372. if (timer->expires == expires && timer_pending(timer))
  373. return 1;
  374. return __mod_timer(timer, expires);
  375. }
  376. EXPORT_SYMBOL(mod_timer);
  377. /**
  378. * del_timer - deactive a timer.
  379. * @timer: the timer to be deactivated
  380. *
  381. * del_timer() deactivates a timer - this works on both active and inactive
  382. * timers.
  383. *
  384. * The function returns whether it has deactivated a pending timer or not.
  385. * (ie. del_timer() of an inactive timer returns 0, del_timer() of an
  386. * active timer returns 1.)
  387. */
  388. int del_timer(struct timer_list *timer)
  389. {
  390. tvec_base_t *base;
  391. unsigned long flags;
  392. int ret = 0;
  393. if (timer_pending(timer)) {
  394. base = lock_timer_base(timer, &flags);
  395. if (timer_pending(timer)) {
  396. detach_timer(timer, 1);
  397. ret = 1;
  398. }
  399. spin_unlock_irqrestore(&base->lock, flags);
  400. }
  401. return ret;
  402. }
  403. EXPORT_SYMBOL(del_timer);
  404. #ifdef CONFIG_SMP
  405. /**
  406. * try_to_del_timer_sync - Try to deactivate a timer
  407. * @timer: timer do del
  408. *
  409. * This function tries to deactivate a timer. Upon successful (ret >= 0)
  410. * exit the timer is not queued and the handler is not running on any CPU.
  411. *
  412. * It must not be called from interrupt contexts.
  413. */
  414. int try_to_del_timer_sync(struct timer_list *timer)
  415. {
  416. tvec_base_t *base;
  417. unsigned long flags;
  418. int ret = -1;
  419. base = lock_timer_base(timer, &flags);
  420. if (base->running_timer == timer)
  421. goto out;
  422. ret = 0;
  423. if (timer_pending(timer)) {
  424. detach_timer(timer, 1);
  425. ret = 1;
  426. }
  427. out:
  428. spin_unlock_irqrestore(&base->lock, flags);
  429. return ret;
  430. }
  431. /**
  432. * del_timer_sync - deactivate a timer and wait for the handler to finish.
  433. * @timer: the timer to be deactivated
  434. *
  435. * This function only differs from del_timer() on SMP: besides deactivating
  436. * the timer it also makes sure the handler has finished executing on other
  437. * CPUs.
  438. *
  439. * Synchronization rules: Callers must prevent restarting of the timer,
  440. * otherwise this function is meaningless. It must not be called from
  441. * interrupt contexts. The caller must not hold locks which would prevent
  442. * completion of the timer's handler. The timer's handler must not call
  443. * add_timer_on(). Upon exit the timer is not queued and the handler is
  444. * not running on any CPU.
  445. *
  446. * The function returns whether it has deactivated a pending timer or not.
  447. */
  448. int del_timer_sync(struct timer_list *timer)
  449. {
  450. for (;;) {
  451. int ret = try_to_del_timer_sync(timer);
  452. if (ret >= 0)
  453. return ret;
  454. cpu_relax();
  455. }
  456. }
  457. EXPORT_SYMBOL(del_timer_sync);
  458. #endif
  459. static int cascade(tvec_base_t *base, tvec_t *tv, int index)
  460. {
  461. /* cascade all the timers from tv up one level */
  462. struct timer_list *timer, *tmp;
  463. struct list_head tv_list;
  464. list_replace_init(tv->vec + index, &tv_list);
  465. /*
  466. * We are removing _all_ timers from the list, so we
  467. * don't have to detach them individually.
  468. */
  469. list_for_each_entry_safe(timer, tmp, &tv_list, entry) {
  470. BUG_ON(timer->base != base);
  471. internal_add_timer(base, timer);
  472. }
  473. return index;
  474. }
  475. #define INDEX(N) ((base->timer_jiffies >> (TVR_BITS + (N) * TVN_BITS)) & TVN_MASK)
  476. /**
  477. * __run_timers - run all expired timers (if any) on this CPU.
  478. * @base: the timer vector to be processed.
  479. *
  480. * This function cascades all vectors and executes all expired timer
  481. * vectors.
  482. */
  483. static inline void __run_timers(tvec_base_t *base)
  484. {
  485. struct timer_list *timer;
  486. spin_lock_irq(&base->lock);
  487. while (time_after_eq(jiffies, base->timer_jiffies)) {
  488. struct list_head work_list;
  489. struct list_head *head = &work_list;
  490. int index = base->timer_jiffies & TVR_MASK;
  491. /*
  492. * Cascade timers:
  493. */
  494. if (!index &&
  495. (!cascade(base, &base->tv2, INDEX(0))) &&
  496. (!cascade(base, &base->tv3, INDEX(1))) &&
  497. !cascade(base, &base->tv4, INDEX(2)))
  498. cascade(base, &base->tv5, INDEX(3));
  499. ++base->timer_jiffies;
  500. list_replace_init(base->tv1.vec + index, &work_list);
  501. while (!list_empty(head)) {
  502. void (*fn)(unsigned long);
  503. unsigned long data;
  504. timer = list_entry(head->next,struct timer_list,entry);
  505. fn = timer->function;
  506. data = timer->data;
  507. set_running_timer(base, timer);
  508. detach_timer(timer, 1);
  509. spin_unlock_irq(&base->lock);
  510. {
  511. int preempt_count = preempt_count();
  512. fn(data);
  513. if (preempt_count != preempt_count()) {
  514. printk(KERN_WARNING "huh, entered %p "
  515. "with preempt_count %08x, exited"
  516. " with %08x?\n",
  517. fn, preempt_count,
  518. preempt_count());
  519. BUG();
  520. }
  521. }
  522. spin_lock_irq(&base->lock);
  523. }
  524. }
  525. set_running_timer(base, NULL);
  526. spin_unlock_irq(&base->lock);
  527. }
  528. #ifdef CONFIG_NO_IDLE_HZ
  529. /*
  530. * Find out when the next timer event is due to happen. This
  531. * is used on S/390 to stop all activity when a cpus is idle.
  532. * This functions needs to be called disabled.
  533. */
  534. unsigned long next_timer_interrupt(void)
  535. {
  536. tvec_base_t *base;
  537. struct list_head *list;
  538. struct timer_list *nte;
  539. unsigned long expires;
  540. unsigned long hr_expires = MAX_JIFFY_OFFSET;
  541. ktime_t hr_delta;
  542. tvec_t *varray[4];
  543. int i, j;
  544. hr_delta = hrtimer_get_next_event();
  545. if (hr_delta.tv64 != KTIME_MAX) {
  546. struct timespec tsdelta;
  547. tsdelta = ktime_to_timespec(hr_delta);
  548. hr_expires = timespec_to_jiffies(&tsdelta);
  549. if (hr_expires < 3)
  550. return hr_expires + jiffies;
  551. }
  552. hr_expires += jiffies;
  553. base = __get_cpu_var(tvec_bases);
  554. spin_lock(&base->lock);
  555. expires = base->timer_jiffies + (LONG_MAX >> 1);
  556. list = NULL;
  557. /* Look for timer events in tv1. */
  558. j = base->timer_jiffies & TVR_MASK;
  559. do {
  560. list_for_each_entry(nte, base->tv1.vec + j, entry) {
  561. expires = nte->expires;
  562. if (j < (base->timer_jiffies & TVR_MASK))
  563. list = base->tv2.vec + (INDEX(0));
  564. goto found;
  565. }
  566. j = (j + 1) & TVR_MASK;
  567. } while (j != (base->timer_jiffies & TVR_MASK));
  568. /* Check tv2-tv5. */
  569. varray[0] = &base->tv2;
  570. varray[1] = &base->tv3;
  571. varray[2] = &base->tv4;
  572. varray[3] = &base->tv5;
  573. for (i = 0; i < 4; i++) {
  574. j = INDEX(i);
  575. do {
  576. if (list_empty(varray[i]->vec + j)) {
  577. j = (j + 1) & TVN_MASK;
  578. continue;
  579. }
  580. list_for_each_entry(nte, varray[i]->vec + j, entry)
  581. if (time_before(nte->expires, expires))
  582. expires = nte->expires;
  583. if (j < (INDEX(i)) && i < 3)
  584. list = varray[i + 1]->vec + (INDEX(i + 1));
  585. goto found;
  586. } while (j != (INDEX(i)));
  587. }
  588. found:
  589. if (list) {
  590. /*
  591. * The search wrapped. We need to look at the next list
  592. * from next tv element that would cascade into tv element
  593. * where we found the timer element.
  594. */
  595. list_for_each_entry(nte, list, entry) {
  596. if (time_before(nte->expires, expires))
  597. expires = nte->expires;
  598. }
  599. }
  600. spin_unlock(&base->lock);
  601. /*
  602. * It can happen that other CPUs service timer IRQs and increment
  603. * jiffies, but we have not yet got a local timer tick to process
  604. * the timer wheels. In that case, the expiry time can be before
  605. * jiffies, but since the high-resolution timer here is relative to
  606. * jiffies, the default expression when high-resolution timers are
  607. * not active,
  608. *
  609. * time_before(MAX_JIFFY_OFFSET + jiffies, expires)
  610. *
  611. * would falsely evaluate to true. If that is the case, just
  612. * return jiffies so that we can immediately fire the local timer
  613. */
  614. if (time_before(expires, jiffies))
  615. return jiffies;
  616. if (time_before(hr_expires, expires))
  617. return hr_expires;
  618. return expires;
  619. }
  620. #endif
  621. /******************************************************************/
  622. /*
  623. * The current time
  624. * wall_to_monotonic is what we need to add to xtime (or xtime corrected
  625. * for sub jiffie times) to get to monotonic time. Monotonic is pegged
  626. * at zero at system boot time, so wall_to_monotonic will be negative,
  627. * however, we will ALWAYS keep the tv_nsec part positive so we can use
  628. * the usual normalization.
  629. */
  630. struct timespec xtime __attribute__ ((aligned (16)));
  631. struct timespec wall_to_monotonic __attribute__ ((aligned (16)));
  632. EXPORT_SYMBOL(xtime);
  633. /* XXX - all of this timekeeping code should be later moved to time.c */
  634. #include <linux/clocksource.h>
  635. static struct clocksource *clock; /* pointer to current clocksource */
  636. #ifdef CONFIG_GENERIC_TIME
  637. /**
  638. * __get_nsec_offset - Returns nanoseconds since last call to periodic_hook
  639. *
  640. * private function, must hold xtime_lock lock when being
  641. * called. Returns the number of nanoseconds since the
  642. * last call to update_wall_time() (adjusted by NTP scaling)
  643. */
  644. static inline s64 __get_nsec_offset(void)
  645. {
  646. cycle_t cycle_now, cycle_delta;
  647. s64 ns_offset;
  648. /* read clocksource: */
  649. cycle_now = clocksource_read(clock);
  650. /* calculate the delta since the last update_wall_time: */
  651. cycle_delta = (cycle_now - clock->cycle_last) & clock->mask;
  652. /* convert to nanoseconds: */
  653. ns_offset = cyc2ns(clock, cycle_delta);
  654. return ns_offset;
  655. }
  656. /**
  657. * __get_realtime_clock_ts - Returns the time of day in a timespec
  658. * @ts: pointer to the timespec to be set
  659. *
  660. * Returns the time of day in a timespec. Used by
  661. * do_gettimeofday() and get_realtime_clock_ts().
  662. */
  663. static inline void __get_realtime_clock_ts(struct timespec *ts)
  664. {
  665. unsigned long seq;
  666. s64 nsecs;
  667. do {
  668. seq = read_seqbegin(&xtime_lock);
  669. *ts = xtime;
  670. nsecs = __get_nsec_offset();
  671. } while (read_seqretry(&xtime_lock, seq));
  672. timespec_add_ns(ts, nsecs);
  673. }
  674. /**
  675. * getnstimeofday - Returns the time of day in a timespec
  676. * @ts: pointer to the timespec to be set
  677. *
  678. * Returns the time of day in a timespec.
  679. */
  680. void getnstimeofday(struct timespec *ts)
  681. {
  682. __get_realtime_clock_ts(ts);
  683. }
  684. EXPORT_SYMBOL(getnstimeofday);
  685. /**
  686. * do_gettimeofday - Returns the time of day in a timeval
  687. * @tv: pointer to the timeval to be set
  688. *
  689. * NOTE: Users should be converted to using get_realtime_clock_ts()
  690. */
  691. void do_gettimeofday(struct timeval *tv)
  692. {
  693. struct timespec now;
  694. __get_realtime_clock_ts(&now);
  695. tv->tv_sec = now.tv_sec;
  696. tv->tv_usec = now.tv_nsec/1000;
  697. }
  698. EXPORT_SYMBOL(do_gettimeofday);
  699. /**
  700. * do_settimeofday - Sets the time of day
  701. * @tv: pointer to the timespec variable containing the new time
  702. *
  703. * Sets the time of day to the new time and update NTP and notify hrtimers
  704. */
  705. int do_settimeofday(struct timespec *tv)
  706. {
  707. unsigned long flags;
  708. time_t wtm_sec, sec = tv->tv_sec;
  709. long wtm_nsec, nsec = tv->tv_nsec;
  710. if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
  711. return -EINVAL;
  712. write_seqlock_irqsave(&xtime_lock, flags);
  713. nsec -= __get_nsec_offset();
  714. wtm_sec = wall_to_monotonic.tv_sec + (xtime.tv_sec - sec);
  715. wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - nsec);
  716. set_normalized_timespec(&xtime, sec, nsec);
  717. set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec);
  718. clock->error = 0;
  719. ntp_clear();
  720. write_sequnlock_irqrestore(&xtime_lock, flags);
  721. /* signal hrtimers about time change */
  722. clock_was_set();
  723. return 0;
  724. }
  725. EXPORT_SYMBOL(do_settimeofday);
  726. /**
  727. * change_clocksource - Swaps clocksources if a new one is available
  728. *
  729. * Accumulates current time interval and initializes new clocksource
  730. */
  731. static int change_clocksource(void)
  732. {
  733. struct clocksource *new;
  734. cycle_t now;
  735. u64 nsec;
  736. new = clocksource_get_next();
  737. if (clock != new) {
  738. now = clocksource_read(new);
  739. nsec = __get_nsec_offset();
  740. timespec_add_ns(&xtime, nsec);
  741. clock = new;
  742. clock->cycle_last = now;
  743. printk(KERN_INFO "Time: %s clocksource has been installed.\n",
  744. clock->name);
  745. return 1;
  746. } else if (clock->update_callback) {
  747. return clock->update_callback();
  748. }
  749. return 0;
  750. }
  751. #else
  752. static inline int change_clocksource(void)
  753. {
  754. return 0;
  755. }
  756. #endif
  757. /**
  758. * timeofday_is_continuous - check to see if timekeeping is free running
  759. */
  760. int timekeeping_is_continuous(void)
  761. {
  762. unsigned long seq;
  763. int ret;
  764. do {
  765. seq = read_seqbegin(&xtime_lock);
  766. ret = clock->flags & CLOCK_SOURCE_IS_CONTINUOUS;
  767. } while (read_seqretry(&xtime_lock, seq));
  768. return ret;
  769. }
  770. /**
  771. * read_persistent_clock - Return time in seconds from the persistent clock.
  772. *
  773. * Weak dummy function for arches that do not yet support it.
  774. * Returns seconds from epoch using the battery backed persistent clock.
  775. * Returns zero if unsupported.
  776. *
  777. * XXX - Do be sure to remove it once all arches implement it.
  778. */
  779. unsigned long __attribute__((weak)) read_persistent_clock(void)
  780. {
  781. return 0;
  782. }
  783. /*
  784. * timekeeping_init - Initializes the clocksource and common timekeeping values
  785. */
  786. void __init timekeeping_init(void)
  787. {
  788. unsigned long flags;
  789. unsigned long sec = read_persistent_clock();
  790. write_seqlock_irqsave(&xtime_lock, flags);
  791. ntp_clear();
  792. clock = clocksource_get_next();
  793. clocksource_calculate_interval(clock, NTP_INTERVAL_LENGTH);
  794. clock->cycle_last = clocksource_read(clock);
  795. xtime.tv_sec = sec;
  796. xtime.tv_nsec = 0;
  797. set_normalized_timespec(&wall_to_monotonic,
  798. -xtime.tv_sec, -xtime.tv_nsec);
  799. write_sequnlock_irqrestore(&xtime_lock, flags);
  800. }
  801. /* flag for if timekeeping is suspended */
  802. static int timekeeping_suspended;
  803. /* time in seconds when suspend began */
  804. static unsigned long timekeeping_suspend_time;
  805. /**
  806. * timekeeping_resume - Resumes the generic timekeeping subsystem.
  807. * @dev: unused
  808. *
  809. * This is for the generic clocksource timekeeping.
  810. * xtime/wall_to_monotonic/jiffies/etc are
  811. * still managed by arch specific suspend/resume code.
  812. */
  813. static int timekeeping_resume(struct sys_device *dev)
  814. {
  815. unsigned long flags;
  816. unsigned long now = read_persistent_clock();
  817. write_seqlock_irqsave(&xtime_lock, flags);
  818. if (now && (now > timekeeping_suspend_time)) {
  819. unsigned long sleep_length = now - timekeeping_suspend_time;
  820. xtime.tv_sec += sleep_length;
  821. wall_to_monotonic.tv_sec -= sleep_length;
  822. }
  823. /* re-base the last cycle value */
  824. clock->cycle_last = clocksource_read(clock);
  825. clock->error = 0;
  826. timekeeping_suspended = 0;
  827. write_sequnlock_irqrestore(&xtime_lock, flags);
  828. touch_softlockup_watchdog();
  829. hrtimer_notify_resume();
  830. return 0;
  831. }
  832. static int timekeeping_suspend(struct sys_device *dev, pm_message_t state)
  833. {
  834. unsigned long flags;
  835. write_seqlock_irqsave(&xtime_lock, flags);
  836. timekeeping_suspended = 1;
  837. timekeeping_suspend_time = read_persistent_clock();
  838. write_sequnlock_irqrestore(&xtime_lock, flags);
  839. return 0;
  840. }
  841. /* sysfs resume/suspend bits for timekeeping */
  842. static struct sysdev_class timekeeping_sysclass = {
  843. .resume = timekeeping_resume,
  844. .suspend = timekeeping_suspend,
  845. set_kset_name("timekeeping"),
  846. };
  847. static struct sys_device device_timer = {
  848. .id = 0,
  849. .cls = &timekeeping_sysclass,
  850. };
  851. static int __init timekeeping_init_device(void)
  852. {
  853. int error = sysdev_class_register(&timekeeping_sysclass);
  854. if (!error)
  855. error = sysdev_register(&device_timer);
  856. return error;
  857. }
  858. device_initcall(timekeeping_init_device);
  859. /*
  860. * If the error is already larger, we look ahead even further
  861. * to compensate for late or lost adjustments.
  862. */
  863. static __always_inline int clocksource_bigadjust(s64 error, s64 *interval,
  864. s64 *offset)
  865. {
  866. s64 tick_error, i;
  867. u32 look_ahead, adj;
  868. s32 error2, mult;
  869. /*
  870. * Use the current error value to determine how much to look ahead.
  871. * The larger the error the slower we adjust for it to avoid problems
  872. * with losing too many ticks, otherwise we would overadjust and
  873. * produce an even larger error. The smaller the adjustment the
  874. * faster we try to adjust for it, as lost ticks can do less harm
  875. * here. This is tuned so that an error of about 1 msec is adusted
  876. * within about 1 sec (or 2^20 nsec in 2^SHIFT_HZ ticks).
  877. */
  878. error2 = clock->error >> (TICK_LENGTH_SHIFT + 22 - 2 * SHIFT_HZ);
  879. error2 = abs(error2);
  880. for (look_ahead = 0; error2 > 0; look_ahead++)
  881. error2 >>= 2;
  882. /*
  883. * Now calculate the error in (1 << look_ahead) ticks, but first
  884. * remove the single look ahead already included in the error.
  885. */
  886. tick_error = current_tick_length() >>
  887. (TICK_LENGTH_SHIFT - clock->shift + 1);
  888. tick_error -= clock->xtime_interval >> 1;
  889. error = ((error - tick_error) >> look_ahead) + tick_error;
  890. /* Finally calculate the adjustment shift value. */
  891. i = *interval;
  892. mult = 1;
  893. if (error < 0) {
  894. error = -error;
  895. *interval = -*interval;
  896. *offset = -*offset;
  897. mult = -1;
  898. }
  899. for (adj = 0; error > i; adj++)
  900. error >>= 1;
  901. *interval <<= adj;
  902. *offset <<= adj;
  903. return mult << adj;
  904. }
  905. /*
  906. * Adjust the multiplier to reduce the error value,
  907. * this is optimized for the most common adjustments of -1,0,1,
  908. * for other values we can do a bit more work.
  909. */
  910. static void clocksource_adjust(struct clocksource *clock, s64 offset)
  911. {
  912. s64 error, interval = clock->cycle_interval;
  913. int adj;
  914. error = clock->error >> (TICK_LENGTH_SHIFT - clock->shift - 1);
  915. if (error > interval) {
  916. error >>= 2;
  917. if (likely(error <= interval))
  918. adj = 1;
  919. else
  920. adj = clocksource_bigadjust(error, &interval, &offset);
  921. } else if (error < -interval) {
  922. error >>= 2;
  923. if (likely(error >= -interval)) {
  924. adj = -1;
  925. interval = -interval;
  926. offset = -offset;
  927. } else
  928. adj = clocksource_bigadjust(error, &interval, &offset);
  929. } else
  930. return;
  931. clock->mult += adj;
  932. clock->xtime_interval += interval;
  933. clock->xtime_nsec -= offset;
  934. clock->error -= (interval - offset) <<
  935. (TICK_LENGTH_SHIFT - clock->shift);
  936. }
  937. /**
  938. * update_wall_time - Uses the current clocksource to increment the wall time
  939. *
  940. * Called from the timer interrupt, must hold a write on xtime_lock.
  941. */
  942. static void update_wall_time(void)
  943. {
  944. cycle_t offset;
  945. /* Make sure we're fully resumed: */
  946. if (unlikely(timekeeping_suspended))
  947. return;
  948. #ifdef CONFIG_GENERIC_TIME
  949. offset = (clocksource_read(clock) - clock->cycle_last) & clock->mask;
  950. #else
  951. offset = clock->cycle_interval;
  952. #endif
  953. clock->xtime_nsec += (s64)xtime.tv_nsec << clock->shift;
  954. /* normally this loop will run just once, however in the
  955. * case of lost or late ticks, it will accumulate correctly.
  956. */
  957. while (offset >= clock->cycle_interval) {
  958. /* accumulate one interval */
  959. clock->xtime_nsec += clock->xtime_interval;
  960. clock->cycle_last += clock->cycle_interval;
  961. offset -= clock->cycle_interval;
  962. if (clock->xtime_nsec >= (u64)NSEC_PER_SEC << clock->shift) {
  963. clock->xtime_nsec -= (u64)NSEC_PER_SEC << clock->shift;
  964. xtime.tv_sec++;
  965. second_overflow();
  966. }
  967. /* interpolator bits */
  968. time_interpolator_update(clock->xtime_interval
  969. >> clock->shift);
  970. /* accumulate error between NTP and clock interval */
  971. clock->error += current_tick_length();
  972. clock->error -= clock->xtime_interval << (TICK_LENGTH_SHIFT - clock->shift);
  973. }
  974. /* correct the clock when NTP error is too big */
  975. clocksource_adjust(clock, offset);
  976. /* store full nanoseconds into xtime */
  977. xtime.tv_nsec = (s64)clock->xtime_nsec >> clock->shift;
  978. clock->xtime_nsec -= (s64)xtime.tv_nsec << clock->shift;
  979. /* check to see if there is a new clocksource to use */
  980. if (change_clocksource()) {
  981. clock->error = 0;
  982. clock->xtime_nsec = 0;
  983. clocksource_calculate_interval(clock, NTP_INTERVAL_LENGTH);
  984. }
  985. }
  986. /*
  987. * Called from the timer interrupt handler to charge one tick to the current
  988. * process. user_tick is 1 if the tick is user time, 0 for system.
  989. */
  990. void update_process_times(int user_tick)
  991. {
  992. struct task_struct *p = current;
  993. int cpu = smp_processor_id();
  994. /* Note: this timer irq context must be accounted for as well. */
  995. if (user_tick)
  996. account_user_time(p, jiffies_to_cputime(1));
  997. else
  998. account_system_time(p, HARDIRQ_OFFSET, jiffies_to_cputime(1));
  999. run_local_timers();
  1000. if (rcu_pending(cpu))
  1001. rcu_check_callbacks(cpu, user_tick);
  1002. scheduler_tick();
  1003. run_posix_cpu_timers(p);
  1004. }
  1005. /*
  1006. * Nr of active tasks - counted in fixed-point numbers
  1007. */
  1008. static unsigned long count_active_tasks(void)
  1009. {
  1010. return nr_active() * FIXED_1;
  1011. }
  1012. /*
  1013. * Hmm.. Changed this, as the GNU make sources (load.c) seems to
  1014. * imply that avenrun[] is the standard name for this kind of thing.
  1015. * Nothing else seems to be standardized: the fractional size etc
  1016. * all seem to differ on different machines.
  1017. *
  1018. * Requires xtime_lock to access.
  1019. */
  1020. unsigned long avenrun[3];
  1021. EXPORT_SYMBOL(avenrun);
  1022. /*
  1023. * calc_load - given tick count, update the avenrun load estimates.
  1024. * This is called while holding a write_lock on xtime_lock.
  1025. */
  1026. static inline void calc_load(unsigned long ticks)
  1027. {
  1028. unsigned long active_tasks; /* fixed-point */
  1029. static int count = LOAD_FREQ;
  1030. count -= ticks;
  1031. if (unlikely(count < 0)) {
  1032. active_tasks = count_active_tasks();
  1033. do {
  1034. CALC_LOAD(avenrun[0], EXP_1, active_tasks);
  1035. CALC_LOAD(avenrun[1], EXP_5, active_tasks);
  1036. CALC_LOAD(avenrun[2], EXP_15, active_tasks);
  1037. count += LOAD_FREQ;
  1038. } while (count < 0);
  1039. }
  1040. }
  1041. /*
  1042. * This read-write spinlock protects us from races in SMP while
  1043. * playing with xtime and avenrun.
  1044. */
  1045. __attribute__((weak)) __cacheline_aligned_in_smp DEFINE_SEQLOCK(xtime_lock);
  1046. EXPORT_SYMBOL(xtime_lock);
  1047. /*
  1048. * This function runs timers and the timer-tq in bottom half context.
  1049. */
  1050. static void run_timer_softirq(struct softirq_action *h)
  1051. {
  1052. tvec_base_t *base = __get_cpu_var(tvec_bases);
  1053. hrtimer_run_queues();
  1054. if (time_after_eq(jiffies, base->timer_jiffies))
  1055. __run_timers(base);
  1056. }
  1057. /*
  1058. * Called by the local, per-CPU timer interrupt on SMP.
  1059. */
  1060. void run_local_timers(void)
  1061. {
  1062. raise_softirq(TIMER_SOFTIRQ);
  1063. softlockup_tick();
  1064. }
  1065. /*
  1066. * Called by the timer interrupt. xtime_lock must already be taken
  1067. * by the timer IRQ!
  1068. */
  1069. static inline void update_times(unsigned long ticks)
  1070. {
  1071. update_wall_time();
  1072. calc_load(ticks);
  1073. }
  1074. /*
  1075. * The 64-bit jiffies value is not atomic - you MUST NOT read it
  1076. * without sampling the sequence number in xtime_lock.
  1077. * jiffies is defined in the linker script...
  1078. */
  1079. void do_timer(unsigned long ticks)
  1080. {
  1081. jiffies_64 += ticks;
  1082. update_times(ticks);
  1083. }
  1084. #ifdef __ARCH_WANT_SYS_ALARM
  1085. /*
  1086. * For backwards compatibility? This can be done in libc so Alpha
  1087. * and all newer ports shouldn't need it.
  1088. */
  1089. asmlinkage unsigned long sys_alarm(unsigned int seconds)
  1090. {
  1091. return alarm_setitimer(seconds);
  1092. }
  1093. #endif
  1094. #ifndef __alpha__
  1095. /*
  1096. * The Alpha uses getxpid, getxuid, and getxgid instead. Maybe this
  1097. * should be moved into arch/i386 instead?
  1098. */
  1099. /**
  1100. * sys_getpid - return the thread group id of the current process
  1101. *
  1102. * Note, despite the name, this returns the tgid not the pid. The tgid and
  1103. * the pid are identical unless CLONE_THREAD was specified on clone() in
  1104. * which case the tgid is the same in all threads of the same group.
  1105. *
  1106. * This is SMP safe as current->tgid does not change.
  1107. */
  1108. asmlinkage long sys_getpid(void)
  1109. {
  1110. return current->tgid;
  1111. }
  1112. /*
  1113. * Accessing ->real_parent is not SMP-safe, it could
  1114. * change from under us. However, we can use a stale
  1115. * value of ->real_parent under rcu_read_lock(), see
  1116. * release_task()->call_rcu(delayed_put_task_struct).
  1117. */
  1118. asmlinkage long sys_getppid(void)
  1119. {
  1120. int pid;
  1121. rcu_read_lock();
  1122. pid = rcu_dereference(current->real_parent)->tgid;
  1123. rcu_read_unlock();
  1124. return pid;
  1125. }
  1126. asmlinkage long sys_getuid(void)
  1127. {
  1128. /* Only we change this so SMP safe */
  1129. return current->uid;
  1130. }
  1131. asmlinkage long sys_geteuid(void)
  1132. {
  1133. /* Only we change this so SMP safe */
  1134. return current->euid;
  1135. }
  1136. asmlinkage long sys_getgid(void)
  1137. {
  1138. /* Only we change this so SMP safe */
  1139. return current->gid;
  1140. }
  1141. asmlinkage long sys_getegid(void)
  1142. {
  1143. /* Only we change this so SMP safe */
  1144. return current->egid;
  1145. }
  1146. #endif
  1147. static void process_timeout(unsigned long __data)
  1148. {
  1149. wake_up_process((struct task_struct *)__data);
  1150. }
  1151. /**
  1152. * schedule_timeout - sleep until timeout
  1153. * @timeout: timeout value in jiffies
  1154. *
  1155. * Make the current task sleep until @timeout jiffies have
  1156. * elapsed. The routine will return immediately unless
  1157. * the current task state has been set (see set_current_state()).
  1158. *
  1159. * You can set the task state as follows -
  1160. *
  1161. * %TASK_UNINTERRUPTIBLE - at least @timeout jiffies are guaranteed to
  1162. * pass before the routine returns. The routine will return 0
  1163. *
  1164. * %TASK_INTERRUPTIBLE - the routine may return early if a signal is
  1165. * delivered to the current task. In this case the remaining time
  1166. * in jiffies will be returned, or 0 if the timer expired in time
  1167. *
  1168. * The current task state is guaranteed to be TASK_RUNNING when this
  1169. * routine returns.
  1170. *
  1171. * Specifying a @timeout value of %MAX_SCHEDULE_TIMEOUT will schedule
  1172. * the CPU away without a bound on the timeout. In this case the return
  1173. * value will be %MAX_SCHEDULE_TIMEOUT.
  1174. *
  1175. * In all cases the return value is guaranteed to be non-negative.
  1176. */
  1177. fastcall signed long __sched schedule_timeout(signed long timeout)
  1178. {
  1179. struct timer_list timer;
  1180. unsigned long expire;
  1181. switch (timeout)
  1182. {
  1183. case MAX_SCHEDULE_TIMEOUT:
  1184. /*
  1185. * These two special cases are useful to be comfortable
  1186. * in the caller. Nothing more. We could take
  1187. * MAX_SCHEDULE_TIMEOUT from one of the negative value
  1188. * but I' d like to return a valid offset (>=0) to allow
  1189. * the caller to do everything it want with the retval.
  1190. */
  1191. schedule();
  1192. goto out;
  1193. default:
  1194. /*
  1195. * Another bit of PARANOID. Note that the retval will be
  1196. * 0 since no piece of kernel is supposed to do a check
  1197. * for a negative retval of schedule_timeout() (since it
  1198. * should never happens anyway). You just have the printk()
  1199. * that will tell you if something is gone wrong and where.
  1200. */
  1201. if (timeout < 0) {
  1202. printk(KERN_ERR "schedule_timeout: wrong timeout "
  1203. "value %lx\n", timeout);
  1204. dump_stack();
  1205. current->state = TASK_RUNNING;
  1206. goto out;
  1207. }
  1208. }
  1209. expire = timeout + jiffies;
  1210. setup_timer(&timer, process_timeout, (unsigned long)current);
  1211. __mod_timer(&timer, expire);
  1212. schedule();
  1213. del_singleshot_timer_sync(&timer);
  1214. timeout = expire - jiffies;
  1215. out:
  1216. return timeout < 0 ? 0 : timeout;
  1217. }
  1218. EXPORT_SYMBOL(schedule_timeout);
  1219. /*
  1220. * We can use __set_current_state() here because schedule_timeout() calls
  1221. * schedule() unconditionally.
  1222. */
  1223. signed long __sched schedule_timeout_interruptible(signed long timeout)
  1224. {
  1225. __set_current_state(TASK_INTERRUPTIBLE);
  1226. return schedule_timeout(timeout);
  1227. }
  1228. EXPORT_SYMBOL(schedule_timeout_interruptible);
  1229. signed long __sched schedule_timeout_uninterruptible(signed long timeout)
  1230. {
  1231. __set_current_state(TASK_UNINTERRUPTIBLE);
  1232. return schedule_timeout(timeout);
  1233. }
  1234. EXPORT_SYMBOL(schedule_timeout_uninterruptible);
  1235. /* Thread ID - the internal kernel "pid" */
  1236. asmlinkage long sys_gettid(void)
  1237. {
  1238. return current->pid;
  1239. }
  1240. /**
  1241. * do_sysinfo - fill in sysinfo struct
  1242. * @info: pointer to buffer to fill
  1243. */
  1244. int do_sysinfo(struct sysinfo *info)
  1245. {
  1246. unsigned long mem_total, sav_total;
  1247. unsigned int mem_unit, bitcount;
  1248. unsigned long seq;
  1249. memset(info, 0, sizeof(struct sysinfo));
  1250. do {
  1251. struct timespec tp;
  1252. seq = read_seqbegin(&xtime_lock);
  1253. /*
  1254. * This is annoying. The below is the same thing
  1255. * posix_get_clock_monotonic() does, but it wants to
  1256. * take the lock which we want to cover the loads stuff
  1257. * too.
  1258. */
  1259. getnstimeofday(&tp);
  1260. tp.tv_sec += wall_to_monotonic.tv_sec;
  1261. tp.tv_nsec += wall_to_monotonic.tv_nsec;
  1262. if (tp.tv_nsec - NSEC_PER_SEC >= 0) {
  1263. tp.tv_nsec = tp.tv_nsec - NSEC_PER_SEC;
  1264. tp.tv_sec++;
  1265. }
  1266. info->uptime = tp.tv_sec + (tp.tv_nsec ? 1 : 0);
  1267. info->loads[0] = avenrun[0] << (SI_LOAD_SHIFT - FSHIFT);
  1268. info->loads[1] = avenrun[1] << (SI_LOAD_SHIFT - FSHIFT);
  1269. info->loads[2] = avenrun[2] << (SI_LOAD_SHIFT - FSHIFT);
  1270. info->procs = nr_threads;
  1271. } while (read_seqretry(&xtime_lock, seq));
  1272. si_meminfo(info);
  1273. si_swapinfo(info);
  1274. /*
  1275. * If the sum of all the available memory (i.e. ram + swap)
  1276. * is less than can be stored in a 32 bit unsigned long then
  1277. * we can be binary compatible with 2.2.x kernels. If not,
  1278. * well, in that case 2.2.x was broken anyways...
  1279. *
  1280. * -Erik Andersen <andersee@debian.org>
  1281. */
  1282. mem_total = info->totalram + info->totalswap;
  1283. if (mem_total < info->totalram || mem_total < info->totalswap)
  1284. goto out;
  1285. bitcount = 0;
  1286. mem_unit = info->mem_unit;
  1287. while (mem_unit > 1) {
  1288. bitcount++;
  1289. mem_unit >>= 1;
  1290. sav_total = mem_total;
  1291. mem_total <<= 1;
  1292. if (mem_total < sav_total)
  1293. goto out;
  1294. }
  1295. /*
  1296. * If mem_total did not overflow, multiply all memory values by
  1297. * info->mem_unit and set it to 1. This leaves things compatible
  1298. * with 2.2.x, and also retains compatibility with earlier 2.4.x
  1299. * kernels...
  1300. */
  1301. info->mem_unit = 1;
  1302. info->totalram <<= bitcount;
  1303. info->freeram <<= bitcount;
  1304. info->sharedram <<= bitcount;
  1305. info->bufferram <<= bitcount;
  1306. info->totalswap <<= bitcount;
  1307. info->freeswap <<= bitcount;
  1308. info->totalhigh <<= bitcount;
  1309. info->freehigh <<= bitcount;
  1310. out:
  1311. return 0;
  1312. }
  1313. asmlinkage long sys_sysinfo(struct sysinfo __user *info)
  1314. {
  1315. struct sysinfo val;
  1316. do_sysinfo(&val);
  1317. if (copy_to_user(info, &val, sizeof(struct sysinfo)))
  1318. return -EFAULT;
  1319. return 0;
  1320. }
  1321. /*
  1322. * lockdep: we want to track each per-CPU base as a separate lock-class,
  1323. * but timer-bases are kmalloc()-ed, so we need to attach separate
  1324. * keys to them:
  1325. */
  1326. static struct lock_class_key base_lock_keys[NR_CPUS];
  1327. static int __devinit init_timers_cpu(int cpu)
  1328. {
  1329. int j;
  1330. tvec_base_t *base;
  1331. static char __devinitdata tvec_base_done[NR_CPUS];
  1332. if (!tvec_base_done[cpu]) {
  1333. static char boot_done;
  1334. if (boot_done) {
  1335. /*
  1336. * The APs use this path later in boot
  1337. */
  1338. base = kmalloc_node(sizeof(*base), GFP_KERNEL,
  1339. cpu_to_node(cpu));
  1340. if (!base)
  1341. return -ENOMEM;
  1342. memset(base, 0, sizeof(*base));
  1343. per_cpu(tvec_bases, cpu) = base;
  1344. } else {
  1345. /*
  1346. * This is for the boot CPU - we use compile-time
  1347. * static initialisation because per-cpu memory isn't
  1348. * ready yet and because the memory allocators are not
  1349. * initialised either.
  1350. */
  1351. boot_done = 1;
  1352. base = &boot_tvec_bases;
  1353. }
  1354. tvec_base_done[cpu] = 1;
  1355. } else {
  1356. base = per_cpu(tvec_bases, cpu);
  1357. }
  1358. spin_lock_init(&base->lock);
  1359. lockdep_set_class(&base->lock, base_lock_keys + cpu);
  1360. for (j = 0; j < TVN_SIZE; j++) {
  1361. INIT_LIST_HEAD(base->tv5.vec + j);
  1362. INIT_LIST_HEAD(base->tv4.vec + j);
  1363. INIT_LIST_HEAD(base->tv3.vec + j);
  1364. INIT_LIST_HEAD(base->tv2.vec + j);
  1365. }
  1366. for (j = 0; j < TVR_SIZE; j++)
  1367. INIT_LIST_HEAD(base->tv1.vec + j);
  1368. base->timer_jiffies = jiffies;
  1369. return 0;
  1370. }
  1371. #ifdef CONFIG_HOTPLUG_CPU
  1372. static void migrate_timer_list(tvec_base_t *new_base, struct list_head *head)
  1373. {
  1374. struct timer_list *timer;
  1375. while (!list_empty(head)) {
  1376. timer = list_entry(head->next, struct timer_list, entry);
  1377. detach_timer(timer, 0);
  1378. timer->base = new_base;
  1379. internal_add_timer(new_base, timer);
  1380. }
  1381. }
  1382. static void __devinit migrate_timers(int cpu)
  1383. {
  1384. tvec_base_t *old_base;
  1385. tvec_base_t *new_base;
  1386. int i;
  1387. BUG_ON(cpu_online(cpu));
  1388. old_base = per_cpu(tvec_bases, cpu);
  1389. new_base = get_cpu_var(tvec_bases);
  1390. local_irq_disable();
  1391. spin_lock(&new_base->lock);
  1392. spin_lock(&old_base->lock);
  1393. BUG_ON(old_base->running_timer);
  1394. for (i = 0; i < TVR_SIZE; i++)
  1395. migrate_timer_list(new_base, old_base->tv1.vec + i);
  1396. for (i = 0; i < TVN_SIZE; i++) {
  1397. migrate_timer_list(new_base, old_base->tv2.vec + i);
  1398. migrate_timer_list(new_base, old_base->tv3.vec + i);
  1399. migrate_timer_list(new_base, old_base->tv4.vec + i);
  1400. migrate_timer_list(new_base, old_base->tv5.vec + i);
  1401. }
  1402. spin_unlock(&old_base->lock);
  1403. spin_unlock(&new_base->lock);
  1404. local_irq_enable();
  1405. put_cpu_var(tvec_bases);
  1406. }
  1407. #endif /* CONFIG_HOTPLUG_CPU */
  1408. static int __cpuinit timer_cpu_notify(struct notifier_block *self,
  1409. unsigned long action, void *hcpu)
  1410. {
  1411. long cpu = (long)hcpu;
  1412. switch(action) {
  1413. case CPU_UP_PREPARE:
  1414. if (init_timers_cpu(cpu) < 0)
  1415. return NOTIFY_BAD;
  1416. break;
  1417. #ifdef CONFIG_HOTPLUG_CPU
  1418. case CPU_DEAD:
  1419. migrate_timers(cpu);
  1420. break;
  1421. #endif
  1422. default:
  1423. break;
  1424. }
  1425. return NOTIFY_OK;
  1426. }
  1427. static struct notifier_block __cpuinitdata timers_nb = {
  1428. .notifier_call = timer_cpu_notify,
  1429. };
  1430. void __init init_timers(void)
  1431. {
  1432. int err = timer_cpu_notify(&timers_nb, (unsigned long)CPU_UP_PREPARE,
  1433. (void *)(long)smp_processor_id());
  1434. BUG_ON(err == NOTIFY_BAD);
  1435. register_cpu_notifier(&timers_nb);
  1436. open_softirq(TIMER_SOFTIRQ, run_timer_softirq, NULL);
  1437. }
  1438. #ifdef CONFIG_TIME_INTERPOLATION
  1439. struct time_interpolator *time_interpolator __read_mostly;
  1440. static struct time_interpolator *time_interpolator_list __read_mostly;
  1441. static DEFINE_SPINLOCK(time_interpolator_lock);
  1442. static inline cycles_t time_interpolator_get_cycles(unsigned int src)
  1443. {
  1444. unsigned long (*x)(void);
  1445. switch (src)
  1446. {
  1447. case TIME_SOURCE_FUNCTION:
  1448. x = time_interpolator->addr;
  1449. return x();
  1450. case TIME_SOURCE_MMIO64 :
  1451. return readq_relaxed((void __iomem *)time_interpolator->addr);
  1452. case TIME_SOURCE_MMIO32 :
  1453. return readl_relaxed((void __iomem *)time_interpolator->addr);
  1454. default: return get_cycles();
  1455. }
  1456. }
  1457. static inline u64 time_interpolator_get_counter(int writelock)
  1458. {
  1459. unsigned int src = time_interpolator->source;
  1460. if (time_interpolator->jitter)
  1461. {
  1462. cycles_t lcycle;
  1463. cycles_t now;
  1464. do {
  1465. lcycle = time_interpolator->last_cycle;
  1466. now = time_interpolator_get_cycles(src);
  1467. if (lcycle && time_after(lcycle, now))
  1468. return lcycle;
  1469. /* When holding the xtime write lock, there's no need
  1470. * to add the overhead of the cmpxchg. Readers are
  1471. * force to retry until the write lock is released.
  1472. */
  1473. if (writelock) {
  1474. time_interpolator->last_cycle = now;
  1475. return now;
  1476. }
  1477. /* Keep track of the last timer value returned. The use of cmpxchg here
  1478. * will cause contention in an SMP environment.
  1479. */
  1480. } while (unlikely(cmpxchg(&time_interpolator->last_cycle, lcycle, now) != lcycle));
  1481. return now;
  1482. }
  1483. else
  1484. return time_interpolator_get_cycles(src);
  1485. }
  1486. void time_interpolator_reset(void)
  1487. {
  1488. time_interpolator->offset = 0;
  1489. time_interpolator->last_counter = time_interpolator_get_counter(1);
  1490. }
  1491. #define GET_TI_NSECS(count,i) (((((count) - i->last_counter) & (i)->mask) * (i)->nsec_per_cyc) >> (i)->shift)
  1492. unsigned long time_interpolator_get_offset(void)
  1493. {
  1494. /* If we do not have a time interpolator set up then just return zero */
  1495. if (!time_interpolator)
  1496. return 0;
  1497. return time_interpolator->offset +
  1498. GET_TI_NSECS(time_interpolator_get_counter(0), time_interpolator);
  1499. }
  1500. #define INTERPOLATOR_ADJUST 65536
  1501. #define INTERPOLATOR_MAX_SKIP 10*INTERPOLATOR_ADJUST
  1502. void time_interpolator_update(long delta_nsec)
  1503. {
  1504. u64 counter;
  1505. unsigned long offset;
  1506. /* If there is no time interpolator set up then do nothing */
  1507. if (!time_interpolator)
  1508. return;
  1509. /*
  1510. * The interpolator compensates for late ticks by accumulating the late
  1511. * time in time_interpolator->offset. A tick earlier than expected will
  1512. * lead to a reset of the offset and a corresponding jump of the clock
  1513. * forward. Again this only works if the interpolator clock is running
  1514. * slightly slower than the regular clock and the tuning logic insures
  1515. * that.
  1516. */
  1517. counter = time_interpolator_get_counter(1);
  1518. offset = time_interpolator->offset +
  1519. GET_TI_NSECS(counter, time_interpolator);
  1520. if (delta_nsec < 0 || (unsigned long) delta_nsec < offset)
  1521. time_interpolator->offset = offset - delta_nsec;
  1522. else {
  1523. time_interpolator->skips++;
  1524. time_interpolator->ns_skipped += delta_nsec - offset;
  1525. time_interpolator->offset = 0;
  1526. }
  1527. time_interpolator->last_counter = counter;
  1528. /* Tuning logic for time interpolator invoked every minute or so.
  1529. * Decrease interpolator clock speed if no skips occurred and an offset is carried.
  1530. * Increase interpolator clock speed if we skip too much time.
  1531. */
  1532. if (jiffies % INTERPOLATOR_ADJUST == 0)
  1533. {
  1534. if (time_interpolator->skips == 0 && time_interpolator->offset > tick_nsec)
  1535. time_interpolator->nsec_per_cyc--;
  1536. if (time_interpolator->ns_skipped > INTERPOLATOR_MAX_SKIP && time_interpolator->offset == 0)
  1537. time_interpolator->nsec_per_cyc++;
  1538. time_interpolator->skips = 0;
  1539. time_interpolator->ns_skipped = 0;
  1540. }
  1541. }
  1542. static inline int
  1543. is_better_time_interpolator(struct time_interpolator *new)
  1544. {
  1545. if (!time_interpolator)
  1546. return 1;
  1547. return new->frequency > 2*time_interpolator->frequency ||
  1548. (unsigned long)new->drift < (unsigned long)time_interpolator->drift;
  1549. }
  1550. void
  1551. register_time_interpolator(struct time_interpolator *ti)
  1552. {
  1553. unsigned long flags;
  1554. /* Sanity check */
  1555. BUG_ON(ti->frequency == 0 || ti->mask == 0);
  1556. ti->nsec_per_cyc = ((u64)NSEC_PER_SEC << ti->shift) / ti->frequency;
  1557. spin_lock(&time_interpolator_lock);
  1558. write_seqlock_irqsave(&xtime_lock, flags);
  1559. if (is_better_time_interpolator(ti)) {
  1560. time_interpolator = ti;
  1561. time_interpolator_reset();
  1562. }
  1563. write_sequnlock_irqrestore(&xtime_lock, flags);
  1564. ti->next = time_interpolator_list;
  1565. time_interpolator_list = ti;
  1566. spin_unlock(&time_interpolator_lock);
  1567. }
  1568. void
  1569. unregister_time_interpolator(struct time_interpolator *ti)
  1570. {
  1571. struct time_interpolator *curr, **prev;
  1572. unsigned long flags;
  1573. spin_lock(&time_interpolator_lock);
  1574. prev = &time_interpolator_list;
  1575. for (curr = *prev; curr; curr = curr->next) {
  1576. if (curr == ti) {
  1577. *prev = curr->next;
  1578. break;
  1579. }
  1580. prev = &curr->next;
  1581. }
  1582. write_seqlock_irqsave(&xtime_lock, flags);
  1583. if (ti == time_interpolator) {
  1584. /* we lost the best time-interpolator: */
  1585. time_interpolator = NULL;
  1586. /* find the next-best interpolator */
  1587. for (curr = time_interpolator_list; curr; curr = curr->next)
  1588. if (is_better_time_interpolator(curr))
  1589. time_interpolator = curr;
  1590. time_interpolator_reset();
  1591. }
  1592. write_sequnlock_irqrestore(&xtime_lock, flags);
  1593. spin_unlock(&time_interpolator_lock);
  1594. }
  1595. #endif /* CONFIG_TIME_INTERPOLATION */
  1596. /**
  1597. * msleep - sleep safely even with waitqueue interruptions
  1598. * @msecs: Time in milliseconds to sleep for
  1599. */
  1600. void msleep(unsigned int msecs)
  1601. {
  1602. unsigned long timeout = msecs_to_jiffies(msecs) + 1;
  1603. while (timeout)
  1604. timeout = schedule_timeout_uninterruptible(timeout);
  1605. }
  1606. EXPORT_SYMBOL(msleep);
  1607. /**
  1608. * msleep_interruptible - sleep waiting for signals
  1609. * @msecs: Time in milliseconds to sleep for
  1610. */
  1611. unsigned long msleep_interruptible(unsigned int msecs)
  1612. {
  1613. unsigned long timeout = msecs_to_jiffies(msecs) + 1;
  1614. while (timeout && !signal_pending(current))
  1615. timeout = schedule_timeout_interruptible(timeout);
  1616. return jiffies_to_msecs(timeout);
  1617. }
  1618. EXPORT_SYMBOL(msleep_interruptible);