smpboot.c 41 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696
  1. /*
  2. * x86 SMP booting functions
  3. *
  4. * (c) 1995 Alan Cox, Building #3 <alan@lxorguk.ukuu.org.uk>
  5. * (c) 1998, 1999, 2000, 2009 Ingo Molnar <mingo@redhat.com>
  6. * Copyright 2001 Andi Kleen, SuSE Labs.
  7. *
  8. * Much of the core SMP work is based on previous work by Thomas Radke, to
  9. * whom a great many thanks are extended.
  10. *
  11. * Thanks to Intel for making available several different Pentium,
  12. * Pentium Pro and Pentium-II/Xeon MP machines.
  13. * Original development of Linux SMP code supported by Caldera.
  14. *
  15. * This code is released under the GNU General Public License version 2 or
  16. * later.
  17. *
  18. * Fixes
  19. * Felix Koop : NR_CPUS used properly
  20. * Jose Renau : Handle single CPU case.
  21. * Alan Cox : By repeated request 8) - Total BogoMIPS report.
  22. * Greg Wright : Fix for kernel stacks panic.
  23. * Erich Boleyn : MP v1.4 and additional changes.
  24. * Matthias Sattler : Changes for 2.1 kernel map.
  25. * Michel Lespinasse : Changes for 2.1 kernel map.
  26. * Michael Chastain : Change trampoline.S to gnu as.
  27. * Alan Cox : Dumb bug: 'B' step PPro's are fine
  28. * Ingo Molnar : Added APIC timers, based on code
  29. * from Jose Renau
  30. * Ingo Molnar : various cleanups and rewrites
  31. * Tigran Aivazian : fixed "0.00 in /proc/uptime on SMP" bug.
  32. * Maciej W. Rozycki : Bits for genuine 82489DX APICs
  33. * Andi Kleen : Changed for SMP boot into long mode.
  34. * Martin J. Bligh : Added support for multi-quad systems
  35. * Dave Jones : Report invalid combinations of Athlon CPUs.
  36. * Rusty Russell : Hacked into shape for new "hotplug" boot process.
  37. * Andi Kleen : Converted to new state machine.
  38. * Ashok Raj : CPU hotplug support
  39. * Glauber Costa : i386 and x86_64 integration
  40. */
  41. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  42. #include <linux/init.h>
  43. #include <linux/smp.h>
  44. #include <linux/export.h>
  45. #include <linux/sched.h>
  46. #include <linux/percpu.h>
  47. #include <linux/bootmem.h>
  48. #include <linux/err.h>
  49. #include <linux/nmi.h>
  50. #include <linux/tboot.h>
  51. #include <linux/stackprotector.h>
  52. #include <linux/gfp.h>
  53. #include <linux/cpuidle.h>
  54. #include <asm/acpi.h>
  55. #include <asm/desc.h>
  56. #include <asm/nmi.h>
  57. #include <asm/irq.h>
  58. #include <asm/idle.h>
  59. #include <asm/realmode.h>
  60. #include <asm/cpu.h>
  61. #include <asm/numa.h>
  62. #include <asm/pgtable.h>
  63. #include <asm/tlbflush.h>
  64. #include <asm/mtrr.h>
  65. #include <asm/mwait.h>
  66. #include <asm/apic.h>
  67. #include <asm/io_apic.h>
  68. #include <asm/fpu/internal.h>
  69. #include <asm/setup.h>
  70. #include <asm/uv/uv.h>
  71. #include <linux/mc146818rtc.h>
  72. #include <asm/i8259.h>
  73. #include <asm/realmode.h>
  74. #include <asm/misc.h>
  75. /* Number of siblings per CPU package */
  76. int smp_num_siblings = 1;
  77. EXPORT_SYMBOL(smp_num_siblings);
  78. /* Last level cache ID of each logical CPU */
  79. DEFINE_PER_CPU_READ_MOSTLY(u16, cpu_llc_id) = BAD_APICID;
  80. /* representing HT siblings of each logical CPU */
  81. DEFINE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_sibling_map);
  82. EXPORT_PER_CPU_SYMBOL(cpu_sibling_map);
  83. /* representing HT and core siblings of each logical CPU */
  84. DEFINE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_core_map);
  85. EXPORT_PER_CPU_SYMBOL(cpu_core_map);
  86. DEFINE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_llc_shared_map);
  87. /* Per CPU bogomips and other parameters */
  88. DEFINE_PER_CPU_READ_MOSTLY(struct cpuinfo_x86, cpu_info);
  89. EXPORT_PER_CPU_SYMBOL(cpu_info);
  90. /* Logical package management. We might want to allocate that dynamically */
  91. static int *physical_to_logical_pkg __read_mostly;
  92. static unsigned long *physical_package_map __read_mostly;;
  93. static unsigned int max_physical_pkg_id __read_mostly;
  94. unsigned int __max_logical_packages __read_mostly;
  95. EXPORT_SYMBOL(__max_logical_packages);
  96. static unsigned int logical_packages __read_mostly;
  97. static bool logical_packages_frozen __read_mostly;
  98. /* Maximum number of SMT threads on any online core */
  99. int __max_smt_threads __read_mostly;
  100. static inline void smpboot_setup_warm_reset_vector(unsigned long start_eip)
  101. {
  102. unsigned long flags;
  103. spin_lock_irqsave(&rtc_lock, flags);
  104. CMOS_WRITE(0xa, 0xf);
  105. spin_unlock_irqrestore(&rtc_lock, flags);
  106. local_flush_tlb();
  107. pr_debug("1.\n");
  108. *((volatile unsigned short *)phys_to_virt(TRAMPOLINE_PHYS_HIGH)) =
  109. start_eip >> 4;
  110. pr_debug("2.\n");
  111. *((volatile unsigned short *)phys_to_virt(TRAMPOLINE_PHYS_LOW)) =
  112. start_eip & 0xf;
  113. pr_debug("3.\n");
  114. }
  115. static inline void smpboot_restore_warm_reset_vector(void)
  116. {
  117. unsigned long flags;
  118. /*
  119. * Install writable page 0 entry to set BIOS data area.
  120. */
  121. local_flush_tlb();
  122. /*
  123. * Paranoid: Set warm reset code and vector here back
  124. * to default values.
  125. */
  126. spin_lock_irqsave(&rtc_lock, flags);
  127. CMOS_WRITE(0, 0xf);
  128. spin_unlock_irqrestore(&rtc_lock, flags);
  129. *((volatile u32 *)phys_to_virt(TRAMPOLINE_PHYS_LOW)) = 0;
  130. }
  131. /*
  132. * Report back to the Boot Processor during boot time or to the caller processor
  133. * during CPU online.
  134. */
  135. static void smp_callin(void)
  136. {
  137. int cpuid, phys_id;
  138. /*
  139. * If waken up by an INIT in an 82489DX configuration
  140. * cpu_callout_mask guarantees we don't get here before
  141. * an INIT_deassert IPI reaches our local APIC, so it is
  142. * now safe to touch our local APIC.
  143. */
  144. cpuid = smp_processor_id();
  145. /*
  146. * (This works even if the APIC is not enabled.)
  147. */
  148. phys_id = read_apic_id();
  149. /*
  150. * the boot CPU has finished the init stage and is spinning
  151. * on callin_map until we finish. We are free to set up this
  152. * CPU, first the APIC. (this is probably redundant on most
  153. * boards)
  154. */
  155. apic_ap_setup();
  156. /*
  157. * Save our processor parameters. Note: this information
  158. * is needed for clock calibration.
  159. */
  160. smp_store_cpu_info(cpuid);
  161. /*
  162. * Get our bogomips.
  163. * Update loops_per_jiffy in cpu_data. Previous call to
  164. * smp_store_cpu_info() stored a value that is close but not as
  165. * accurate as the value just calculated.
  166. */
  167. calibrate_delay();
  168. cpu_data(cpuid).loops_per_jiffy = loops_per_jiffy;
  169. pr_debug("Stack at about %p\n", &cpuid);
  170. /*
  171. * This must be done before setting cpu_online_mask
  172. * or calling notify_cpu_starting.
  173. */
  174. set_cpu_sibling_map(raw_smp_processor_id());
  175. wmb();
  176. notify_cpu_starting(cpuid);
  177. /*
  178. * Allow the master to continue.
  179. */
  180. cpumask_set_cpu(cpuid, cpu_callin_mask);
  181. }
  182. static int cpu0_logical_apicid;
  183. static int enable_start_cpu0;
  184. /*
  185. * Activate a secondary processor.
  186. */
  187. static void notrace start_secondary(void *unused)
  188. {
  189. /*
  190. * Don't put *anything* before cpu_init(), SMP booting is too
  191. * fragile that we want to limit the things done here to the
  192. * most necessary things.
  193. */
  194. cpu_init();
  195. x86_cpuinit.early_percpu_clock_init();
  196. preempt_disable();
  197. smp_callin();
  198. enable_start_cpu0 = 0;
  199. #ifdef CONFIG_X86_32
  200. /* switch away from the initial page table */
  201. load_cr3(swapper_pg_dir);
  202. __flush_tlb_all();
  203. #endif
  204. /* otherwise gcc will move up smp_processor_id before the cpu_init */
  205. barrier();
  206. /*
  207. * Check TSC synchronization with the BP:
  208. */
  209. check_tsc_sync_target();
  210. /*
  211. * Lock vector_lock and initialize the vectors on this cpu
  212. * before setting the cpu online. We must set it online with
  213. * vector_lock held to prevent a concurrent setup/teardown
  214. * from seeing a half valid vector space.
  215. */
  216. lock_vector_lock();
  217. setup_vector_irq(smp_processor_id());
  218. set_cpu_online(smp_processor_id(), true);
  219. unlock_vector_lock();
  220. cpu_set_state_online(smp_processor_id());
  221. x86_platform.nmi_init();
  222. /* enable local interrupts */
  223. local_irq_enable();
  224. /* to prevent fake stack check failure in clock setup */
  225. boot_init_stack_canary();
  226. x86_cpuinit.setup_percpu_clockev();
  227. wmb();
  228. cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
  229. }
  230. int topology_update_package_map(unsigned int apicid, unsigned int cpu)
  231. {
  232. unsigned int new, pkg = apicid >> boot_cpu_data.x86_coreid_bits;
  233. /* Called from early boot ? */
  234. if (!physical_package_map)
  235. return 0;
  236. if (pkg >= max_physical_pkg_id)
  237. return -EINVAL;
  238. /* Set the logical package id */
  239. if (test_and_set_bit(pkg, physical_package_map))
  240. goto found;
  241. if (logical_packages_frozen) {
  242. physical_to_logical_pkg[pkg] = -1;
  243. pr_warn("APIC(%x) Package %u exceeds logical package max\n",
  244. apicid, pkg);
  245. return -ENOSPC;
  246. }
  247. new = logical_packages++;
  248. pr_info("APIC(%x) Converting physical %u to logical package %u\n",
  249. apicid, pkg, new);
  250. physical_to_logical_pkg[pkg] = new;
  251. found:
  252. cpu_data(cpu).logical_proc_id = physical_to_logical_pkg[pkg];
  253. return 0;
  254. }
  255. /**
  256. * topology_phys_to_logical_pkg - Map a physical package id to a logical
  257. *
  258. * Returns logical package id or -1 if not found
  259. */
  260. int topology_phys_to_logical_pkg(unsigned int phys_pkg)
  261. {
  262. if (phys_pkg >= max_physical_pkg_id)
  263. return -1;
  264. return physical_to_logical_pkg[phys_pkg];
  265. }
  266. EXPORT_SYMBOL(topology_phys_to_logical_pkg);
  267. static void __init smp_init_package_map(void)
  268. {
  269. unsigned int ncpus, cpu;
  270. size_t size;
  271. /*
  272. * Today neither Intel nor AMD support heterogenous systems. That
  273. * might change in the future....
  274. *
  275. * While ideally we'd want '* smp_num_siblings' in the below @ncpus
  276. * computation, this won't actually work since some Intel BIOSes
  277. * report inconsistent HT data when they disable HT.
  278. *
  279. * In particular, they reduce the APIC-IDs to only include the cores,
  280. * but leave the CPUID topology to say there are (2) siblings.
  281. * This means we don't know how many threads there will be until
  282. * after the APIC enumeration.
  283. *
  284. * By not including this we'll sometimes over-estimate the number of
  285. * logical packages by the amount of !present siblings, but this is
  286. * still better than MAX_LOCAL_APIC.
  287. *
  288. * We use total_cpus not nr_cpu_ids because nr_cpu_ids can be limited
  289. * on the command line leading to a similar issue as the HT disable
  290. * problem because the hyperthreads are usually enumerated after the
  291. * primary cores.
  292. */
  293. ncpus = boot_cpu_data.x86_max_cores;
  294. if (!ncpus) {
  295. pr_warn("x86_max_cores == zero !?!?");
  296. ncpus = 1;
  297. }
  298. __max_logical_packages = DIV_ROUND_UP(total_cpus, ncpus);
  299. logical_packages = 0;
  300. /*
  301. * Possibly larger than what we need as the number of apic ids per
  302. * package can be smaller than the actual used apic ids.
  303. */
  304. max_physical_pkg_id = DIV_ROUND_UP(MAX_LOCAL_APIC, ncpus);
  305. size = max_physical_pkg_id * sizeof(unsigned int);
  306. physical_to_logical_pkg = kmalloc(size, GFP_KERNEL);
  307. memset(physical_to_logical_pkg, 0xff, size);
  308. size = BITS_TO_LONGS(max_physical_pkg_id) * sizeof(unsigned long);
  309. physical_package_map = kzalloc(size, GFP_KERNEL);
  310. for_each_present_cpu(cpu) {
  311. unsigned int apicid = apic->cpu_present_to_apicid(cpu);
  312. if (apicid == BAD_APICID || !apic->apic_id_valid(apicid))
  313. continue;
  314. if (!topology_update_package_map(apicid, cpu))
  315. continue;
  316. pr_warn("CPU %u APICId %x disabled\n", cpu, apicid);
  317. per_cpu(x86_bios_cpu_apicid, cpu) = BAD_APICID;
  318. set_cpu_possible(cpu, false);
  319. set_cpu_present(cpu, false);
  320. }
  321. if (logical_packages > __max_logical_packages) {
  322. pr_warn("Detected more packages (%u), then computed by BIOS data (%u).\n",
  323. logical_packages, __max_logical_packages);
  324. logical_packages_frozen = true;
  325. __max_logical_packages = logical_packages;
  326. }
  327. pr_info("Max logical packages: %u\n", __max_logical_packages);
  328. }
  329. void __init smp_store_boot_cpu_info(void)
  330. {
  331. int id = 0; /* CPU 0 */
  332. struct cpuinfo_x86 *c = &cpu_data(id);
  333. *c = boot_cpu_data;
  334. c->cpu_index = id;
  335. smp_init_package_map();
  336. }
  337. /*
  338. * The bootstrap kernel entry code has set these up. Save them for
  339. * a given CPU
  340. */
  341. void smp_store_cpu_info(int id)
  342. {
  343. struct cpuinfo_x86 *c = &cpu_data(id);
  344. *c = boot_cpu_data;
  345. c->cpu_index = id;
  346. /*
  347. * During boot time, CPU0 has this setup already. Save the info when
  348. * bringing up AP or offlined CPU0.
  349. */
  350. identify_secondary_cpu(c);
  351. }
  352. static bool
  353. topology_same_node(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
  354. {
  355. int cpu1 = c->cpu_index, cpu2 = o->cpu_index;
  356. return (cpu_to_node(cpu1) == cpu_to_node(cpu2));
  357. }
  358. static bool
  359. topology_sane(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o, const char *name)
  360. {
  361. int cpu1 = c->cpu_index, cpu2 = o->cpu_index;
  362. return !WARN_ONCE(!topology_same_node(c, o),
  363. "sched: CPU #%d's %s-sibling CPU #%d is not on the same node! "
  364. "[node: %d != %d]. Ignoring dependency.\n",
  365. cpu1, name, cpu2, cpu_to_node(cpu1), cpu_to_node(cpu2));
  366. }
  367. #define link_mask(mfunc, c1, c2) \
  368. do { \
  369. cpumask_set_cpu((c1), mfunc(c2)); \
  370. cpumask_set_cpu((c2), mfunc(c1)); \
  371. } while (0)
  372. static bool match_smt(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
  373. {
  374. if (boot_cpu_has(X86_FEATURE_TOPOEXT)) {
  375. int cpu1 = c->cpu_index, cpu2 = o->cpu_index;
  376. if (c->phys_proc_id == o->phys_proc_id &&
  377. per_cpu(cpu_llc_id, cpu1) == per_cpu(cpu_llc_id, cpu2) &&
  378. c->cpu_core_id == o->cpu_core_id)
  379. return topology_sane(c, o, "smt");
  380. } else if (c->phys_proc_id == o->phys_proc_id &&
  381. c->cpu_core_id == o->cpu_core_id) {
  382. return topology_sane(c, o, "smt");
  383. }
  384. return false;
  385. }
  386. static bool match_llc(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
  387. {
  388. int cpu1 = c->cpu_index, cpu2 = o->cpu_index;
  389. if (per_cpu(cpu_llc_id, cpu1) != BAD_APICID &&
  390. per_cpu(cpu_llc_id, cpu1) == per_cpu(cpu_llc_id, cpu2))
  391. return topology_sane(c, o, "llc");
  392. return false;
  393. }
  394. /*
  395. * Unlike the other levels, we do not enforce keeping a
  396. * multicore group inside a NUMA node. If this happens, we will
  397. * discard the MC level of the topology later.
  398. */
  399. static bool match_die(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
  400. {
  401. if (c->phys_proc_id == o->phys_proc_id)
  402. return true;
  403. return false;
  404. }
  405. static struct sched_domain_topology_level numa_inside_package_topology[] = {
  406. #ifdef CONFIG_SCHED_SMT
  407. { cpu_smt_mask, cpu_smt_flags, SD_INIT_NAME(SMT) },
  408. #endif
  409. #ifdef CONFIG_SCHED_MC
  410. { cpu_coregroup_mask, cpu_core_flags, SD_INIT_NAME(MC) },
  411. #endif
  412. { NULL, },
  413. };
  414. /*
  415. * set_sched_topology() sets the topology internal to a CPU. The
  416. * NUMA topologies are layered on top of it to build the full
  417. * system topology.
  418. *
  419. * If NUMA nodes are observed to occur within a CPU package, this
  420. * function should be called. It forces the sched domain code to
  421. * only use the SMT level for the CPU portion of the topology.
  422. * This essentially falls back to relying on NUMA information
  423. * from the SRAT table to describe the entire system topology
  424. * (except for hyperthreads).
  425. */
  426. static void primarily_use_numa_for_topology(void)
  427. {
  428. set_sched_topology(numa_inside_package_topology);
  429. }
  430. void set_cpu_sibling_map(int cpu)
  431. {
  432. bool has_smt = smp_num_siblings > 1;
  433. bool has_mp = has_smt || boot_cpu_data.x86_max_cores > 1;
  434. struct cpuinfo_x86 *c = &cpu_data(cpu);
  435. struct cpuinfo_x86 *o;
  436. int i, threads;
  437. cpumask_set_cpu(cpu, cpu_sibling_setup_mask);
  438. if (!has_mp) {
  439. cpumask_set_cpu(cpu, topology_sibling_cpumask(cpu));
  440. cpumask_set_cpu(cpu, cpu_llc_shared_mask(cpu));
  441. cpumask_set_cpu(cpu, topology_core_cpumask(cpu));
  442. c->booted_cores = 1;
  443. return;
  444. }
  445. for_each_cpu(i, cpu_sibling_setup_mask) {
  446. o = &cpu_data(i);
  447. if ((i == cpu) || (has_smt && match_smt(c, o)))
  448. link_mask(topology_sibling_cpumask, cpu, i);
  449. if ((i == cpu) || (has_mp && match_llc(c, o)))
  450. link_mask(cpu_llc_shared_mask, cpu, i);
  451. }
  452. /*
  453. * This needs a separate iteration over the cpus because we rely on all
  454. * topology_sibling_cpumask links to be set-up.
  455. */
  456. for_each_cpu(i, cpu_sibling_setup_mask) {
  457. o = &cpu_data(i);
  458. if ((i == cpu) || (has_mp && match_die(c, o))) {
  459. link_mask(topology_core_cpumask, cpu, i);
  460. /*
  461. * Does this new cpu bringup a new core?
  462. */
  463. if (cpumask_weight(
  464. topology_sibling_cpumask(cpu)) == 1) {
  465. /*
  466. * for each core in package, increment
  467. * the booted_cores for this new cpu
  468. */
  469. if (cpumask_first(
  470. topology_sibling_cpumask(i)) == i)
  471. c->booted_cores++;
  472. /*
  473. * increment the core count for all
  474. * the other cpus in this package
  475. */
  476. if (i != cpu)
  477. cpu_data(i).booted_cores++;
  478. } else if (i != cpu && !c->booted_cores)
  479. c->booted_cores = cpu_data(i).booted_cores;
  480. }
  481. if (match_die(c, o) && !topology_same_node(c, o))
  482. primarily_use_numa_for_topology();
  483. }
  484. threads = cpumask_weight(topology_sibling_cpumask(cpu));
  485. if (threads > __max_smt_threads)
  486. __max_smt_threads = threads;
  487. }
  488. /* maps the cpu to the sched domain representing multi-core */
  489. const struct cpumask *cpu_coregroup_mask(int cpu)
  490. {
  491. return cpu_llc_shared_mask(cpu);
  492. }
  493. static void impress_friends(void)
  494. {
  495. int cpu;
  496. unsigned long bogosum = 0;
  497. /*
  498. * Allow the user to impress friends.
  499. */
  500. pr_debug("Before bogomips\n");
  501. for_each_possible_cpu(cpu)
  502. if (cpumask_test_cpu(cpu, cpu_callout_mask))
  503. bogosum += cpu_data(cpu).loops_per_jiffy;
  504. pr_info("Total of %d processors activated (%lu.%02lu BogoMIPS)\n",
  505. num_online_cpus(),
  506. bogosum/(500000/HZ),
  507. (bogosum/(5000/HZ))%100);
  508. pr_debug("Before bogocount - setting activated=1\n");
  509. }
  510. void __inquire_remote_apic(int apicid)
  511. {
  512. unsigned i, regs[] = { APIC_ID >> 4, APIC_LVR >> 4, APIC_SPIV >> 4 };
  513. const char * const names[] = { "ID", "VERSION", "SPIV" };
  514. int timeout;
  515. u32 status;
  516. pr_info("Inquiring remote APIC 0x%x...\n", apicid);
  517. for (i = 0; i < ARRAY_SIZE(regs); i++) {
  518. pr_info("... APIC 0x%x %s: ", apicid, names[i]);
  519. /*
  520. * Wait for idle.
  521. */
  522. status = safe_apic_wait_icr_idle();
  523. if (status)
  524. pr_cont("a previous APIC delivery may have failed\n");
  525. apic_icr_write(APIC_DM_REMRD | regs[i], apicid);
  526. timeout = 0;
  527. do {
  528. udelay(100);
  529. status = apic_read(APIC_ICR) & APIC_ICR_RR_MASK;
  530. } while (status == APIC_ICR_RR_INPROG && timeout++ < 1000);
  531. switch (status) {
  532. case APIC_ICR_RR_VALID:
  533. status = apic_read(APIC_RRR);
  534. pr_cont("%08x\n", status);
  535. break;
  536. default:
  537. pr_cont("failed\n");
  538. }
  539. }
  540. }
  541. /*
  542. * The Multiprocessor Specification 1.4 (1997) example code suggests
  543. * that there should be a 10ms delay between the BSP asserting INIT
  544. * and de-asserting INIT, when starting a remote processor.
  545. * But that slows boot and resume on modern processors, which include
  546. * many cores and don't require that delay.
  547. *
  548. * Cmdline "init_cpu_udelay=" is available to over-ride this delay.
  549. * Modern processor families are quirked to remove the delay entirely.
  550. */
  551. #define UDELAY_10MS_DEFAULT 10000
  552. static unsigned int init_udelay = UINT_MAX;
  553. static int __init cpu_init_udelay(char *str)
  554. {
  555. get_option(&str, &init_udelay);
  556. return 0;
  557. }
  558. early_param("cpu_init_udelay", cpu_init_udelay);
  559. static void __init smp_quirk_init_udelay(void)
  560. {
  561. /* if cmdline changed it from default, leave it alone */
  562. if (init_udelay != UINT_MAX)
  563. return;
  564. /* if modern processor, use no delay */
  565. if (((boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) && (boot_cpu_data.x86 == 6)) ||
  566. ((boot_cpu_data.x86_vendor == X86_VENDOR_AMD) && (boot_cpu_data.x86 >= 0xF))) {
  567. init_udelay = 0;
  568. return;
  569. }
  570. /* else, use legacy delay */
  571. init_udelay = UDELAY_10MS_DEFAULT;
  572. }
  573. /*
  574. * Poke the other CPU in the eye via NMI to wake it up. Remember that the normal
  575. * INIT, INIT, STARTUP sequence will reset the chip hard for us, and this
  576. * won't ... remember to clear down the APIC, etc later.
  577. */
  578. int
  579. wakeup_secondary_cpu_via_nmi(int apicid, unsigned long start_eip)
  580. {
  581. unsigned long send_status, accept_status = 0;
  582. int maxlvt;
  583. /* Target chip */
  584. /* Boot on the stack */
  585. /* Kick the second */
  586. apic_icr_write(APIC_DM_NMI | apic->dest_logical, apicid);
  587. pr_debug("Waiting for send to finish...\n");
  588. send_status = safe_apic_wait_icr_idle();
  589. /*
  590. * Give the other CPU some time to accept the IPI.
  591. */
  592. udelay(200);
  593. if (APIC_INTEGRATED(apic_version[boot_cpu_physical_apicid])) {
  594. maxlvt = lapic_get_maxlvt();
  595. if (maxlvt > 3) /* Due to the Pentium erratum 3AP. */
  596. apic_write(APIC_ESR, 0);
  597. accept_status = (apic_read(APIC_ESR) & 0xEF);
  598. }
  599. pr_debug("NMI sent\n");
  600. if (send_status)
  601. pr_err("APIC never delivered???\n");
  602. if (accept_status)
  603. pr_err("APIC delivery error (%lx)\n", accept_status);
  604. return (send_status | accept_status);
  605. }
  606. static int
  607. wakeup_secondary_cpu_via_init(int phys_apicid, unsigned long start_eip)
  608. {
  609. unsigned long send_status = 0, accept_status = 0;
  610. int maxlvt, num_starts, j;
  611. maxlvt = lapic_get_maxlvt();
  612. /*
  613. * Be paranoid about clearing APIC errors.
  614. */
  615. if (APIC_INTEGRATED(apic_version[phys_apicid])) {
  616. if (maxlvt > 3) /* Due to the Pentium erratum 3AP. */
  617. apic_write(APIC_ESR, 0);
  618. apic_read(APIC_ESR);
  619. }
  620. pr_debug("Asserting INIT\n");
  621. /*
  622. * Turn INIT on target chip
  623. */
  624. /*
  625. * Send IPI
  626. */
  627. apic_icr_write(APIC_INT_LEVELTRIG | APIC_INT_ASSERT | APIC_DM_INIT,
  628. phys_apicid);
  629. pr_debug("Waiting for send to finish...\n");
  630. send_status = safe_apic_wait_icr_idle();
  631. udelay(init_udelay);
  632. pr_debug("Deasserting INIT\n");
  633. /* Target chip */
  634. /* Send IPI */
  635. apic_icr_write(APIC_INT_LEVELTRIG | APIC_DM_INIT, phys_apicid);
  636. pr_debug("Waiting for send to finish...\n");
  637. send_status = safe_apic_wait_icr_idle();
  638. mb();
  639. /*
  640. * Should we send STARTUP IPIs ?
  641. *
  642. * Determine this based on the APIC version.
  643. * If we don't have an integrated APIC, don't send the STARTUP IPIs.
  644. */
  645. if (APIC_INTEGRATED(apic_version[phys_apicid]))
  646. num_starts = 2;
  647. else
  648. num_starts = 0;
  649. /*
  650. * Run STARTUP IPI loop.
  651. */
  652. pr_debug("#startup loops: %d\n", num_starts);
  653. for (j = 1; j <= num_starts; j++) {
  654. pr_debug("Sending STARTUP #%d\n", j);
  655. if (maxlvt > 3) /* Due to the Pentium erratum 3AP. */
  656. apic_write(APIC_ESR, 0);
  657. apic_read(APIC_ESR);
  658. pr_debug("After apic_write\n");
  659. /*
  660. * STARTUP IPI
  661. */
  662. /* Target chip */
  663. /* Boot on the stack */
  664. /* Kick the second */
  665. apic_icr_write(APIC_DM_STARTUP | (start_eip >> 12),
  666. phys_apicid);
  667. /*
  668. * Give the other CPU some time to accept the IPI.
  669. */
  670. if (init_udelay == 0)
  671. udelay(10);
  672. else
  673. udelay(300);
  674. pr_debug("Startup point 1\n");
  675. pr_debug("Waiting for send to finish...\n");
  676. send_status = safe_apic_wait_icr_idle();
  677. /*
  678. * Give the other CPU some time to accept the IPI.
  679. */
  680. if (init_udelay == 0)
  681. udelay(10);
  682. else
  683. udelay(200);
  684. if (maxlvt > 3) /* Due to the Pentium erratum 3AP. */
  685. apic_write(APIC_ESR, 0);
  686. accept_status = (apic_read(APIC_ESR) & 0xEF);
  687. if (send_status || accept_status)
  688. break;
  689. }
  690. pr_debug("After Startup\n");
  691. if (send_status)
  692. pr_err("APIC never delivered???\n");
  693. if (accept_status)
  694. pr_err("APIC delivery error (%lx)\n", accept_status);
  695. return (send_status | accept_status);
  696. }
  697. void smp_announce(void)
  698. {
  699. int num_nodes = num_online_nodes();
  700. printk(KERN_INFO "x86: Booted up %d node%s, %d CPUs\n",
  701. num_nodes, (num_nodes > 1 ? "s" : ""), num_online_cpus());
  702. }
  703. /* reduce the number of lines printed when booting a large cpu count system */
  704. static void announce_cpu(int cpu, int apicid)
  705. {
  706. static int current_node = -1;
  707. int node = early_cpu_to_node(cpu);
  708. static int width, node_width;
  709. if (!width)
  710. width = num_digits(num_possible_cpus()) + 1; /* + '#' sign */
  711. if (!node_width)
  712. node_width = num_digits(num_possible_nodes()) + 1; /* + '#' */
  713. if (cpu == 1)
  714. printk(KERN_INFO "x86: Booting SMP configuration:\n");
  715. if (system_state == SYSTEM_BOOTING) {
  716. if (node != current_node) {
  717. if (current_node > (-1))
  718. pr_cont("\n");
  719. current_node = node;
  720. printk(KERN_INFO ".... node %*s#%d, CPUs: ",
  721. node_width - num_digits(node), " ", node);
  722. }
  723. /* Add padding for the BSP */
  724. if (cpu == 1)
  725. pr_cont("%*s", width + 1, " ");
  726. pr_cont("%*s#%d", width - num_digits(cpu), " ", cpu);
  727. } else
  728. pr_info("Booting Node %d Processor %d APIC 0x%x\n",
  729. node, cpu, apicid);
  730. }
  731. static int wakeup_cpu0_nmi(unsigned int cmd, struct pt_regs *regs)
  732. {
  733. int cpu;
  734. cpu = smp_processor_id();
  735. if (cpu == 0 && !cpu_online(cpu) && enable_start_cpu0)
  736. return NMI_HANDLED;
  737. return NMI_DONE;
  738. }
  739. /*
  740. * Wake up AP by INIT, INIT, STARTUP sequence.
  741. *
  742. * Instead of waiting for STARTUP after INITs, BSP will execute the BIOS
  743. * boot-strap code which is not a desired behavior for waking up BSP. To
  744. * void the boot-strap code, wake up CPU0 by NMI instead.
  745. *
  746. * This works to wake up soft offlined CPU0 only. If CPU0 is hard offlined
  747. * (i.e. physically hot removed and then hot added), NMI won't wake it up.
  748. * We'll change this code in the future to wake up hard offlined CPU0 if
  749. * real platform and request are available.
  750. */
  751. static int
  752. wakeup_cpu_via_init_nmi(int cpu, unsigned long start_ip, int apicid,
  753. int *cpu0_nmi_registered)
  754. {
  755. int id;
  756. int boot_error;
  757. preempt_disable();
  758. /*
  759. * Wake up AP by INIT, INIT, STARTUP sequence.
  760. */
  761. if (cpu) {
  762. boot_error = wakeup_secondary_cpu_via_init(apicid, start_ip);
  763. goto out;
  764. }
  765. /*
  766. * Wake up BSP by nmi.
  767. *
  768. * Register a NMI handler to help wake up CPU0.
  769. */
  770. boot_error = register_nmi_handler(NMI_LOCAL,
  771. wakeup_cpu0_nmi, 0, "wake_cpu0");
  772. if (!boot_error) {
  773. enable_start_cpu0 = 1;
  774. *cpu0_nmi_registered = 1;
  775. if (apic->dest_logical == APIC_DEST_LOGICAL)
  776. id = cpu0_logical_apicid;
  777. else
  778. id = apicid;
  779. boot_error = wakeup_secondary_cpu_via_nmi(id, start_ip);
  780. }
  781. out:
  782. preempt_enable();
  783. return boot_error;
  784. }
  785. void common_cpu_up(unsigned int cpu, struct task_struct *idle)
  786. {
  787. /* Just in case we booted with a single CPU. */
  788. alternatives_enable_smp();
  789. per_cpu(current_task, cpu) = idle;
  790. #ifdef CONFIG_X86_32
  791. /* Stack for startup_32 can be just as for start_secondary onwards */
  792. irq_ctx_init(cpu);
  793. per_cpu(cpu_current_top_of_stack, cpu) =
  794. (unsigned long)task_stack_page(idle) + THREAD_SIZE;
  795. #else
  796. clear_tsk_thread_flag(idle, TIF_FORK);
  797. initial_gs = per_cpu_offset(cpu);
  798. #endif
  799. }
  800. /*
  801. * NOTE - on most systems this is a PHYSICAL apic ID, but on multiquad
  802. * (ie clustered apic addressing mode), this is a LOGICAL apic ID.
  803. * Returns zero if CPU booted OK, else error code from
  804. * ->wakeup_secondary_cpu.
  805. */
  806. static int do_boot_cpu(int apicid, int cpu, struct task_struct *idle)
  807. {
  808. volatile u32 *trampoline_status =
  809. (volatile u32 *) __va(real_mode_header->trampoline_status);
  810. /* start_ip had better be page-aligned! */
  811. unsigned long start_ip = real_mode_header->trampoline_start;
  812. unsigned long boot_error = 0;
  813. int cpu0_nmi_registered = 0;
  814. unsigned long timeout;
  815. idle->thread.sp = (unsigned long) (((struct pt_regs *)
  816. (THREAD_SIZE + task_stack_page(idle))) - 1);
  817. early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
  818. initial_code = (unsigned long)start_secondary;
  819. stack_start = idle->thread.sp;
  820. /*
  821. * Enable the espfix hack for this CPU
  822. */
  823. #ifdef CONFIG_X86_ESPFIX64
  824. init_espfix_ap(cpu);
  825. #endif
  826. /* So we see what's up */
  827. announce_cpu(cpu, apicid);
  828. /*
  829. * This grunge runs the startup process for
  830. * the targeted processor.
  831. */
  832. if (get_uv_system_type() != UV_NON_UNIQUE_APIC) {
  833. pr_debug("Setting warm reset code and vector.\n");
  834. smpboot_setup_warm_reset_vector(start_ip);
  835. /*
  836. * Be paranoid about clearing APIC errors.
  837. */
  838. if (APIC_INTEGRATED(apic_version[boot_cpu_physical_apicid])) {
  839. apic_write(APIC_ESR, 0);
  840. apic_read(APIC_ESR);
  841. }
  842. }
  843. /*
  844. * AP might wait on cpu_callout_mask in cpu_init() with
  845. * cpu_initialized_mask set if previous attempt to online
  846. * it timed-out. Clear cpu_initialized_mask so that after
  847. * INIT/SIPI it could start with a clean state.
  848. */
  849. cpumask_clear_cpu(cpu, cpu_initialized_mask);
  850. smp_mb();
  851. /*
  852. * Wake up a CPU in difference cases:
  853. * - Use the method in the APIC driver if it's defined
  854. * Otherwise,
  855. * - Use an INIT boot APIC message for APs or NMI for BSP.
  856. */
  857. if (apic->wakeup_secondary_cpu)
  858. boot_error = apic->wakeup_secondary_cpu(apicid, start_ip);
  859. else
  860. boot_error = wakeup_cpu_via_init_nmi(cpu, start_ip, apicid,
  861. &cpu0_nmi_registered);
  862. if (!boot_error) {
  863. /*
  864. * Wait 10s total for first sign of life from AP
  865. */
  866. boot_error = -1;
  867. timeout = jiffies + 10*HZ;
  868. while (time_before(jiffies, timeout)) {
  869. if (cpumask_test_cpu(cpu, cpu_initialized_mask)) {
  870. /*
  871. * Tell AP to proceed with initialization
  872. */
  873. cpumask_set_cpu(cpu, cpu_callout_mask);
  874. boot_error = 0;
  875. break;
  876. }
  877. schedule();
  878. }
  879. }
  880. if (!boot_error) {
  881. /*
  882. * Wait till AP completes initial initialization
  883. */
  884. while (!cpumask_test_cpu(cpu, cpu_callin_mask)) {
  885. /*
  886. * Allow other tasks to run while we wait for the
  887. * AP to come online. This also gives a chance
  888. * for the MTRR work(triggered by the AP coming online)
  889. * to be completed in the stop machine context.
  890. */
  891. schedule();
  892. }
  893. }
  894. /* mark "stuck" area as not stuck */
  895. *trampoline_status = 0;
  896. if (get_uv_system_type() != UV_NON_UNIQUE_APIC) {
  897. /*
  898. * Cleanup possible dangling ends...
  899. */
  900. smpboot_restore_warm_reset_vector();
  901. }
  902. /*
  903. * Clean up the nmi handler. Do this after the callin and callout sync
  904. * to avoid impact of possible long unregister time.
  905. */
  906. if (cpu0_nmi_registered)
  907. unregister_nmi_handler(NMI_LOCAL, "wake_cpu0");
  908. return boot_error;
  909. }
  910. int native_cpu_up(unsigned int cpu, struct task_struct *tidle)
  911. {
  912. int apicid = apic->cpu_present_to_apicid(cpu);
  913. unsigned long flags;
  914. int err;
  915. WARN_ON(irqs_disabled());
  916. pr_debug("++++++++++++++++++++=_---CPU UP %u\n", cpu);
  917. if (apicid == BAD_APICID ||
  918. !physid_isset(apicid, phys_cpu_present_map) ||
  919. !apic->apic_id_valid(apicid)) {
  920. pr_err("%s: bad cpu %d\n", __func__, cpu);
  921. return -EINVAL;
  922. }
  923. /*
  924. * Already booted CPU?
  925. */
  926. if (cpumask_test_cpu(cpu, cpu_callin_mask)) {
  927. pr_debug("do_boot_cpu %d Already started\n", cpu);
  928. return -ENOSYS;
  929. }
  930. /*
  931. * Save current MTRR state in case it was changed since early boot
  932. * (e.g. by the ACPI SMI) to initialize new CPUs with MTRRs in sync:
  933. */
  934. mtrr_save_state();
  935. /* x86 CPUs take themselves offline, so delayed offline is OK. */
  936. err = cpu_check_up_prepare(cpu);
  937. if (err && err != -EBUSY)
  938. return err;
  939. /* the FPU context is blank, nobody can own it */
  940. __cpu_disable_lazy_restore(cpu);
  941. common_cpu_up(cpu, tidle);
  942. /*
  943. * We have to walk the irq descriptors to setup the vector
  944. * space for the cpu which comes online. Prevent irq
  945. * alloc/free across the bringup.
  946. */
  947. irq_lock_sparse();
  948. err = do_boot_cpu(apicid, cpu, tidle);
  949. if (err) {
  950. irq_unlock_sparse();
  951. pr_err("do_boot_cpu failed(%d) to wakeup CPU#%u\n", err, cpu);
  952. return -EIO;
  953. }
  954. /*
  955. * Check TSC synchronization with the AP (keep irqs disabled
  956. * while doing so):
  957. */
  958. local_irq_save(flags);
  959. check_tsc_sync_source(cpu);
  960. local_irq_restore(flags);
  961. while (!cpu_online(cpu)) {
  962. cpu_relax();
  963. touch_nmi_watchdog();
  964. }
  965. irq_unlock_sparse();
  966. return 0;
  967. }
  968. /**
  969. * arch_disable_smp_support() - disables SMP support for x86 at runtime
  970. */
  971. void arch_disable_smp_support(void)
  972. {
  973. disable_ioapic_support();
  974. }
  975. /*
  976. * Fall back to non SMP mode after errors.
  977. *
  978. * RED-PEN audit/test this more. I bet there is more state messed up here.
  979. */
  980. static __init void disable_smp(void)
  981. {
  982. pr_info("SMP disabled\n");
  983. disable_ioapic_support();
  984. init_cpu_present(cpumask_of(0));
  985. init_cpu_possible(cpumask_of(0));
  986. if (smp_found_config)
  987. physid_set_mask_of_physid(boot_cpu_physical_apicid, &phys_cpu_present_map);
  988. else
  989. physid_set_mask_of_physid(0, &phys_cpu_present_map);
  990. cpumask_set_cpu(0, topology_sibling_cpumask(0));
  991. cpumask_set_cpu(0, topology_core_cpumask(0));
  992. }
  993. enum {
  994. SMP_OK,
  995. SMP_NO_CONFIG,
  996. SMP_NO_APIC,
  997. SMP_FORCE_UP,
  998. };
  999. /*
  1000. * Various sanity checks.
  1001. */
  1002. static int __init smp_sanity_check(unsigned max_cpus)
  1003. {
  1004. preempt_disable();
  1005. #if !defined(CONFIG_X86_BIGSMP) && defined(CONFIG_X86_32)
  1006. if (def_to_bigsmp && nr_cpu_ids > 8) {
  1007. unsigned int cpu;
  1008. unsigned nr;
  1009. pr_warn("More than 8 CPUs detected - skipping them\n"
  1010. "Use CONFIG_X86_BIGSMP\n");
  1011. nr = 0;
  1012. for_each_present_cpu(cpu) {
  1013. if (nr >= 8)
  1014. set_cpu_present(cpu, false);
  1015. nr++;
  1016. }
  1017. nr = 0;
  1018. for_each_possible_cpu(cpu) {
  1019. if (nr >= 8)
  1020. set_cpu_possible(cpu, false);
  1021. nr++;
  1022. }
  1023. nr_cpu_ids = 8;
  1024. }
  1025. #endif
  1026. if (!physid_isset(hard_smp_processor_id(), phys_cpu_present_map)) {
  1027. pr_warn("weird, boot CPU (#%d) not listed by the BIOS\n",
  1028. hard_smp_processor_id());
  1029. physid_set(hard_smp_processor_id(), phys_cpu_present_map);
  1030. }
  1031. /*
  1032. * If we couldn't find an SMP configuration at boot time,
  1033. * get out of here now!
  1034. */
  1035. if (!smp_found_config && !acpi_lapic) {
  1036. preempt_enable();
  1037. pr_notice("SMP motherboard not detected\n");
  1038. return SMP_NO_CONFIG;
  1039. }
  1040. /*
  1041. * Should not be necessary because the MP table should list the boot
  1042. * CPU too, but we do it for the sake of robustness anyway.
  1043. */
  1044. if (!apic->check_phys_apicid_present(boot_cpu_physical_apicid)) {
  1045. pr_notice("weird, boot CPU (#%d) not listed by the BIOS\n",
  1046. boot_cpu_physical_apicid);
  1047. physid_set(hard_smp_processor_id(), phys_cpu_present_map);
  1048. }
  1049. preempt_enable();
  1050. /*
  1051. * If we couldn't find a local APIC, then get out of here now!
  1052. */
  1053. if (APIC_INTEGRATED(apic_version[boot_cpu_physical_apicid]) &&
  1054. !boot_cpu_has(X86_FEATURE_APIC)) {
  1055. if (!disable_apic) {
  1056. pr_err("BIOS bug, local APIC #%d not detected!...\n",
  1057. boot_cpu_physical_apicid);
  1058. pr_err("... forcing use of dummy APIC emulation (tell your hw vendor)\n");
  1059. }
  1060. return SMP_NO_APIC;
  1061. }
  1062. /*
  1063. * If SMP should be disabled, then really disable it!
  1064. */
  1065. if (!max_cpus) {
  1066. pr_info("SMP mode deactivated\n");
  1067. return SMP_FORCE_UP;
  1068. }
  1069. return SMP_OK;
  1070. }
  1071. static void __init smp_cpu_index_default(void)
  1072. {
  1073. int i;
  1074. struct cpuinfo_x86 *c;
  1075. for_each_possible_cpu(i) {
  1076. c = &cpu_data(i);
  1077. /* mark all to hotplug */
  1078. c->cpu_index = nr_cpu_ids;
  1079. }
  1080. }
  1081. /*
  1082. * Prepare for SMP bootup. The MP table or ACPI has been read
  1083. * earlier. Just do some sanity checking here and enable APIC mode.
  1084. */
  1085. void __init native_smp_prepare_cpus(unsigned int max_cpus)
  1086. {
  1087. unsigned int i;
  1088. smp_cpu_index_default();
  1089. /*
  1090. * Setup boot CPU information
  1091. */
  1092. smp_store_boot_cpu_info(); /* Final full version of the data */
  1093. cpumask_copy(cpu_callin_mask, cpumask_of(0));
  1094. mb();
  1095. for_each_possible_cpu(i) {
  1096. zalloc_cpumask_var(&per_cpu(cpu_sibling_map, i), GFP_KERNEL);
  1097. zalloc_cpumask_var(&per_cpu(cpu_core_map, i), GFP_KERNEL);
  1098. zalloc_cpumask_var(&per_cpu(cpu_llc_shared_map, i), GFP_KERNEL);
  1099. }
  1100. set_cpu_sibling_map(0);
  1101. switch (smp_sanity_check(max_cpus)) {
  1102. case SMP_NO_CONFIG:
  1103. disable_smp();
  1104. if (APIC_init_uniprocessor())
  1105. pr_notice("Local APIC not detected. Using dummy APIC emulation.\n");
  1106. return;
  1107. case SMP_NO_APIC:
  1108. disable_smp();
  1109. return;
  1110. case SMP_FORCE_UP:
  1111. disable_smp();
  1112. apic_bsp_setup(false);
  1113. return;
  1114. case SMP_OK:
  1115. break;
  1116. }
  1117. default_setup_apic_routing();
  1118. if (read_apic_id() != boot_cpu_physical_apicid) {
  1119. panic("Boot APIC ID in local APIC unexpected (%d vs %d)",
  1120. read_apic_id(), boot_cpu_physical_apicid);
  1121. /* Or can we switch back to PIC here? */
  1122. }
  1123. cpu0_logical_apicid = apic_bsp_setup(false);
  1124. pr_info("CPU%d: ", 0);
  1125. print_cpu_info(&cpu_data(0));
  1126. if (is_uv_system())
  1127. uv_system_init();
  1128. set_mtrr_aps_delayed_init();
  1129. smp_quirk_init_udelay();
  1130. }
  1131. void arch_enable_nonboot_cpus_begin(void)
  1132. {
  1133. set_mtrr_aps_delayed_init();
  1134. }
  1135. void arch_enable_nonboot_cpus_end(void)
  1136. {
  1137. mtrr_aps_init();
  1138. }
  1139. /*
  1140. * Early setup to make printk work.
  1141. */
  1142. void __init native_smp_prepare_boot_cpu(void)
  1143. {
  1144. int me = smp_processor_id();
  1145. switch_to_new_gdt(me);
  1146. /* already set me in cpu_online_mask in boot_cpu_init() */
  1147. cpumask_set_cpu(me, cpu_callout_mask);
  1148. cpu_set_state_online(me);
  1149. }
  1150. void __init native_smp_cpus_done(unsigned int max_cpus)
  1151. {
  1152. pr_debug("Boot done\n");
  1153. nmi_selftest();
  1154. impress_friends();
  1155. setup_ioapic_dest();
  1156. mtrr_aps_init();
  1157. }
  1158. static int __initdata setup_possible_cpus = -1;
  1159. static int __init _setup_possible_cpus(char *str)
  1160. {
  1161. get_option(&str, &setup_possible_cpus);
  1162. return 0;
  1163. }
  1164. early_param("possible_cpus", _setup_possible_cpus);
  1165. /*
  1166. * cpu_possible_mask should be static, it cannot change as cpu's
  1167. * are onlined, or offlined. The reason is per-cpu data-structures
  1168. * are allocated by some modules at init time, and dont expect to
  1169. * do this dynamically on cpu arrival/departure.
  1170. * cpu_present_mask on the other hand can change dynamically.
  1171. * In case when cpu_hotplug is not compiled, then we resort to current
  1172. * behaviour, which is cpu_possible == cpu_present.
  1173. * - Ashok Raj
  1174. *
  1175. * Three ways to find out the number of additional hotplug CPUs:
  1176. * - If the BIOS specified disabled CPUs in ACPI/mptables use that.
  1177. * - The user can overwrite it with possible_cpus=NUM
  1178. * - Otherwise don't reserve additional CPUs.
  1179. * We do this because additional CPUs waste a lot of memory.
  1180. * -AK
  1181. */
  1182. __init void prefill_possible_map(void)
  1183. {
  1184. int i, possible;
  1185. /* no processor from mptable or madt */
  1186. if (!num_processors)
  1187. num_processors = 1;
  1188. i = setup_max_cpus ?: 1;
  1189. if (setup_possible_cpus == -1) {
  1190. possible = num_processors;
  1191. #ifdef CONFIG_HOTPLUG_CPU
  1192. if (setup_max_cpus)
  1193. possible += disabled_cpus;
  1194. #else
  1195. if (possible > i)
  1196. possible = i;
  1197. #endif
  1198. } else
  1199. possible = setup_possible_cpus;
  1200. total_cpus = max_t(int, possible, num_processors + disabled_cpus);
  1201. /* nr_cpu_ids could be reduced via nr_cpus= */
  1202. if (possible > nr_cpu_ids) {
  1203. pr_warn("%d Processors exceeds NR_CPUS limit of %d\n",
  1204. possible, nr_cpu_ids);
  1205. possible = nr_cpu_ids;
  1206. }
  1207. #ifdef CONFIG_HOTPLUG_CPU
  1208. if (!setup_max_cpus)
  1209. #endif
  1210. if (possible > i) {
  1211. pr_warn("%d Processors exceeds max_cpus limit of %u\n",
  1212. possible, setup_max_cpus);
  1213. possible = i;
  1214. }
  1215. pr_info("Allowing %d CPUs, %d hotplug CPUs\n",
  1216. possible, max_t(int, possible - num_processors, 0));
  1217. for (i = 0; i < possible; i++)
  1218. set_cpu_possible(i, true);
  1219. for (; i < NR_CPUS; i++)
  1220. set_cpu_possible(i, false);
  1221. nr_cpu_ids = possible;
  1222. }
  1223. #ifdef CONFIG_HOTPLUG_CPU
  1224. /* Recompute SMT state for all CPUs on offline */
  1225. static void recompute_smt_state(void)
  1226. {
  1227. int max_threads, cpu;
  1228. max_threads = 0;
  1229. for_each_online_cpu (cpu) {
  1230. int threads = cpumask_weight(topology_sibling_cpumask(cpu));
  1231. if (threads > max_threads)
  1232. max_threads = threads;
  1233. }
  1234. __max_smt_threads = max_threads;
  1235. }
  1236. static void remove_siblinginfo(int cpu)
  1237. {
  1238. int sibling;
  1239. struct cpuinfo_x86 *c = &cpu_data(cpu);
  1240. for_each_cpu(sibling, topology_core_cpumask(cpu)) {
  1241. cpumask_clear_cpu(cpu, topology_core_cpumask(sibling));
  1242. /*/
  1243. * last thread sibling in this cpu core going down
  1244. */
  1245. if (cpumask_weight(topology_sibling_cpumask(cpu)) == 1)
  1246. cpu_data(sibling).booted_cores--;
  1247. }
  1248. for_each_cpu(sibling, topology_sibling_cpumask(cpu))
  1249. cpumask_clear_cpu(cpu, topology_sibling_cpumask(sibling));
  1250. for_each_cpu(sibling, cpu_llc_shared_mask(cpu))
  1251. cpumask_clear_cpu(cpu, cpu_llc_shared_mask(sibling));
  1252. cpumask_clear(cpu_llc_shared_mask(cpu));
  1253. cpumask_clear(topology_sibling_cpumask(cpu));
  1254. cpumask_clear(topology_core_cpumask(cpu));
  1255. c->phys_proc_id = 0;
  1256. c->cpu_core_id = 0;
  1257. cpumask_clear_cpu(cpu, cpu_sibling_setup_mask);
  1258. recompute_smt_state();
  1259. }
  1260. static void remove_cpu_from_maps(int cpu)
  1261. {
  1262. set_cpu_online(cpu, false);
  1263. cpumask_clear_cpu(cpu, cpu_callout_mask);
  1264. cpumask_clear_cpu(cpu, cpu_callin_mask);
  1265. /* was set by cpu_init() */
  1266. cpumask_clear_cpu(cpu, cpu_initialized_mask);
  1267. numa_remove_cpu(cpu);
  1268. }
  1269. void cpu_disable_common(void)
  1270. {
  1271. int cpu = smp_processor_id();
  1272. remove_siblinginfo(cpu);
  1273. /* It's now safe to remove this processor from the online map */
  1274. lock_vector_lock();
  1275. remove_cpu_from_maps(cpu);
  1276. unlock_vector_lock();
  1277. fixup_irqs();
  1278. }
  1279. int native_cpu_disable(void)
  1280. {
  1281. int ret;
  1282. ret = check_irq_vectors_for_cpu_disable();
  1283. if (ret)
  1284. return ret;
  1285. clear_local_APIC();
  1286. cpu_disable_common();
  1287. return 0;
  1288. }
  1289. int common_cpu_die(unsigned int cpu)
  1290. {
  1291. int ret = 0;
  1292. /* We don't do anything here: idle task is faking death itself. */
  1293. /* They ack this in play_dead() by setting CPU_DEAD */
  1294. if (cpu_wait_death(cpu, 5)) {
  1295. if (system_state == SYSTEM_RUNNING)
  1296. pr_info("CPU %u is now offline\n", cpu);
  1297. } else {
  1298. pr_err("CPU %u didn't die...\n", cpu);
  1299. ret = -1;
  1300. }
  1301. return ret;
  1302. }
  1303. void native_cpu_die(unsigned int cpu)
  1304. {
  1305. common_cpu_die(cpu);
  1306. }
  1307. void play_dead_common(void)
  1308. {
  1309. idle_task_exit();
  1310. reset_lazy_tlbstate();
  1311. amd_e400_remove_cpu(raw_smp_processor_id());
  1312. /* Ack it */
  1313. (void)cpu_report_death();
  1314. /*
  1315. * With physical CPU hotplug, we should halt the cpu
  1316. */
  1317. local_irq_disable();
  1318. }
  1319. static bool wakeup_cpu0(void)
  1320. {
  1321. if (smp_processor_id() == 0 && enable_start_cpu0)
  1322. return true;
  1323. return false;
  1324. }
  1325. /*
  1326. * We need to flush the caches before going to sleep, lest we have
  1327. * dirty data in our caches when we come back up.
  1328. */
  1329. static inline void mwait_play_dead(void)
  1330. {
  1331. unsigned int eax, ebx, ecx, edx;
  1332. unsigned int highest_cstate = 0;
  1333. unsigned int highest_subcstate = 0;
  1334. void *mwait_ptr;
  1335. int i;
  1336. if (!this_cpu_has(X86_FEATURE_MWAIT))
  1337. return;
  1338. if (!this_cpu_has(X86_FEATURE_CLFLUSH))
  1339. return;
  1340. if (__this_cpu_read(cpu_info.cpuid_level) < CPUID_MWAIT_LEAF)
  1341. return;
  1342. eax = CPUID_MWAIT_LEAF;
  1343. ecx = 0;
  1344. native_cpuid(&eax, &ebx, &ecx, &edx);
  1345. /*
  1346. * eax will be 0 if EDX enumeration is not valid.
  1347. * Initialized below to cstate, sub_cstate value when EDX is valid.
  1348. */
  1349. if (!(ecx & CPUID5_ECX_EXTENSIONS_SUPPORTED)) {
  1350. eax = 0;
  1351. } else {
  1352. edx >>= MWAIT_SUBSTATE_SIZE;
  1353. for (i = 0; i < 7 && edx; i++, edx >>= MWAIT_SUBSTATE_SIZE) {
  1354. if (edx & MWAIT_SUBSTATE_MASK) {
  1355. highest_cstate = i;
  1356. highest_subcstate = edx & MWAIT_SUBSTATE_MASK;
  1357. }
  1358. }
  1359. eax = (highest_cstate << MWAIT_SUBSTATE_SIZE) |
  1360. (highest_subcstate - 1);
  1361. }
  1362. /*
  1363. * This should be a memory location in a cache line which is
  1364. * unlikely to be touched by other processors. The actual
  1365. * content is immaterial as it is not actually modified in any way.
  1366. */
  1367. mwait_ptr = &current_thread_info()->flags;
  1368. wbinvd();
  1369. while (1) {
  1370. /*
  1371. * The CLFLUSH is a workaround for erratum AAI65 for
  1372. * the Xeon 7400 series. It's not clear it is actually
  1373. * needed, but it should be harmless in either case.
  1374. * The WBINVD is insufficient due to the spurious-wakeup
  1375. * case where we return around the loop.
  1376. */
  1377. mb();
  1378. clflush(mwait_ptr);
  1379. mb();
  1380. __monitor(mwait_ptr, 0, 0);
  1381. mb();
  1382. __mwait(eax, 0);
  1383. /*
  1384. * If NMI wants to wake up CPU0, start CPU0.
  1385. */
  1386. if (wakeup_cpu0())
  1387. start_cpu0();
  1388. }
  1389. }
  1390. void hlt_play_dead(void)
  1391. {
  1392. if (__this_cpu_read(cpu_info.x86) >= 4)
  1393. wbinvd();
  1394. while (1) {
  1395. native_halt();
  1396. /*
  1397. * If NMI wants to wake up CPU0, start CPU0.
  1398. */
  1399. if (wakeup_cpu0())
  1400. start_cpu0();
  1401. }
  1402. }
  1403. void native_play_dead(void)
  1404. {
  1405. play_dead_common();
  1406. tboot_shutdown(TB_SHUTDOWN_WFS);
  1407. mwait_play_dead(); /* Only returns on failure */
  1408. if (cpuidle_play_dead())
  1409. hlt_play_dead();
  1410. }
  1411. #else /* ... !CONFIG_HOTPLUG_CPU */
  1412. int native_cpu_disable(void)
  1413. {
  1414. return -ENOSYS;
  1415. }
  1416. void native_cpu_die(unsigned int cpu)
  1417. {
  1418. /* We said "no" in __cpu_disable */
  1419. BUG();
  1420. }
  1421. void native_play_dead(void)
  1422. {
  1423. BUG();
  1424. }
  1425. #endif