common.c 38 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580
  1. #include <linux/bootmem.h>
  2. #include <linux/linkage.h>
  3. #include <linux/bitops.h>
  4. #include <linux/kernel.h>
  5. #include <linux/module.h>
  6. #include <linux/percpu.h>
  7. #include <linux/string.h>
  8. #include <linux/ctype.h>
  9. #include <linux/delay.h>
  10. #include <linux/sched.h>
  11. #include <linux/init.h>
  12. #include <linux/kprobes.h>
  13. #include <linux/kgdb.h>
  14. #include <linux/smp.h>
  15. #include <linux/io.h>
  16. #include <linux/syscore_ops.h>
  17. #include <asm/stackprotector.h>
  18. #include <asm/perf_event.h>
  19. #include <asm/mmu_context.h>
  20. #include <asm/archrandom.h>
  21. #include <asm/hypervisor.h>
  22. #include <asm/processor.h>
  23. #include <asm/tlbflush.h>
  24. #include <asm/debugreg.h>
  25. #include <asm/sections.h>
  26. #include <asm/vsyscall.h>
  27. #include <linux/topology.h>
  28. #include <linux/cpumask.h>
  29. #include <asm/pgtable.h>
  30. #include <linux/atomic.h>
  31. #include <asm/proto.h>
  32. #include <asm/setup.h>
  33. #include <asm/apic.h>
  34. #include <asm/desc.h>
  35. #include <asm/fpu/internal.h>
  36. #include <asm/mtrr.h>
  37. #include <linux/numa.h>
  38. #include <asm/asm.h>
  39. #include <asm/cpu.h>
  40. #include <asm/mce.h>
  41. #include <asm/msr.h>
  42. #include <asm/pat.h>
  43. #include <asm/microcode.h>
  44. #include <asm/microcode_intel.h>
  45. #ifdef CONFIG_X86_LOCAL_APIC
  46. #include <asm/uv/uv.h>
  47. #endif
  48. #include "cpu.h"
  49. /* all of these masks are initialized in setup_cpu_local_masks() */
  50. cpumask_var_t cpu_initialized_mask;
  51. cpumask_var_t cpu_callout_mask;
  52. cpumask_var_t cpu_callin_mask;
  53. /* representing cpus for which sibling maps can be computed */
  54. cpumask_var_t cpu_sibling_setup_mask;
  55. /* correctly size the local cpu masks */
  56. void __init setup_cpu_local_masks(void)
  57. {
  58. alloc_bootmem_cpumask_var(&cpu_initialized_mask);
  59. alloc_bootmem_cpumask_var(&cpu_callin_mask);
  60. alloc_bootmem_cpumask_var(&cpu_callout_mask);
  61. alloc_bootmem_cpumask_var(&cpu_sibling_setup_mask);
  62. }
  63. static void default_init(struct cpuinfo_x86 *c)
  64. {
  65. #ifdef CONFIG_X86_64
  66. cpu_detect_cache_sizes(c);
  67. #else
  68. /* Not much we can do here... */
  69. /* Check if at least it has cpuid */
  70. if (c->cpuid_level == -1) {
  71. /* No cpuid. It must be an ancient CPU */
  72. if (c->x86 == 4)
  73. strcpy(c->x86_model_id, "486");
  74. else if (c->x86 == 3)
  75. strcpy(c->x86_model_id, "386");
  76. }
  77. #endif
  78. }
  79. static const struct cpu_dev default_cpu = {
  80. .c_init = default_init,
  81. .c_vendor = "Unknown",
  82. .c_x86_vendor = X86_VENDOR_UNKNOWN,
  83. };
  84. static const struct cpu_dev *this_cpu = &default_cpu;
  85. DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
  86. #ifdef CONFIG_X86_64
  87. /*
  88. * We need valid kernel segments for data and code in long mode too
  89. * IRET will check the segment types kkeil 2000/10/28
  90. * Also sysret mandates a special GDT layout
  91. *
  92. * TLS descriptors are currently at a different place compared to i386.
  93. * Hopefully nobody expects them at a fixed place (Wine?)
  94. */
  95. [GDT_ENTRY_KERNEL32_CS] = GDT_ENTRY_INIT(0xc09b, 0, 0xfffff),
  96. [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xa09b, 0, 0xfffff),
  97. [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc093, 0, 0xfffff),
  98. [GDT_ENTRY_DEFAULT_USER32_CS] = GDT_ENTRY_INIT(0xc0fb, 0, 0xfffff),
  99. [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f3, 0, 0xfffff),
  100. [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xa0fb, 0, 0xfffff),
  101. #else
  102. [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xc09a, 0, 0xfffff),
  103. [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
  104. [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xc0fa, 0, 0xfffff),
  105. [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f2, 0, 0xfffff),
  106. /*
  107. * Segments used for calling PnP BIOS have byte granularity.
  108. * They code segments and data segments have fixed 64k limits,
  109. * the transfer segment sizes are set at run time.
  110. */
  111. /* 32-bit code */
  112. [GDT_ENTRY_PNPBIOS_CS32] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
  113. /* 16-bit code */
  114. [GDT_ENTRY_PNPBIOS_CS16] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
  115. /* 16-bit data */
  116. [GDT_ENTRY_PNPBIOS_DS] = GDT_ENTRY_INIT(0x0092, 0, 0xffff),
  117. /* 16-bit data */
  118. [GDT_ENTRY_PNPBIOS_TS1] = GDT_ENTRY_INIT(0x0092, 0, 0),
  119. /* 16-bit data */
  120. [GDT_ENTRY_PNPBIOS_TS2] = GDT_ENTRY_INIT(0x0092, 0, 0),
  121. /*
  122. * The APM segments have byte granularity and their bases
  123. * are set at run time. All have 64k limits.
  124. */
  125. /* 32-bit code */
  126. [GDT_ENTRY_APMBIOS_BASE] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
  127. /* 16-bit code */
  128. [GDT_ENTRY_APMBIOS_BASE+1] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
  129. /* data */
  130. [GDT_ENTRY_APMBIOS_BASE+2] = GDT_ENTRY_INIT(0x4092, 0, 0xffff),
  131. [GDT_ENTRY_ESPFIX_SS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
  132. [GDT_ENTRY_PERCPU] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
  133. GDT_STACK_CANARY_INIT
  134. #endif
  135. } };
  136. EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
  137. static int __init x86_mpx_setup(char *s)
  138. {
  139. /* require an exact match without trailing characters */
  140. if (strlen(s))
  141. return 0;
  142. /* do not emit a message if the feature is not present */
  143. if (!boot_cpu_has(X86_FEATURE_MPX))
  144. return 1;
  145. setup_clear_cpu_cap(X86_FEATURE_MPX);
  146. pr_info("nompx: Intel Memory Protection Extensions (MPX) disabled\n");
  147. return 1;
  148. }
  149. __setup("nompx", x86_mpx_setup);
  150. static int __init x86_noinvpcid_setup(char *s)
  151. {
  152. /* noinvpcid doesn't accept parameters */
  153. if (s)
  154. return -EINVAL;
  155. /* do not emit a message if the feature is not present */
  156. if (!boot_cpu_has(X86_FEATURE_INVPCID))
  157. return 0;
  158. setup_clear_cpu_cap(X86_FEATURE_INVPCID);
  159. pr_info("noinvpcid: INVPCID feature disabled\n");
  160. return 0;
  161. }
  162. early_param("noinvpcid", x86_noinvpcid_setup);
  163. #ifdef CONFIG_X86_32
  164. static int cachesize_override = -1;
  165. static int disable_x86_serial_nr = 1;
  166. static int __init cachesize_setup(char *str)
  167. {
  168. get_option(&str, &cachesize_override);
  169. return 1;
  170. }
  171. __setup("cachesize=", cachesize_setup);
  172. static int __init x86_sep_setup(char *s)
  173. {
  174. setup_clear_cpu_cap(X86_FEATURE_SEP);
  175. return 1;
  176. }
  177. __setup("nosep", x86_sep_setup);
  178. /* Standard macro to see if a specific flag is changeable */
  179. static inline int flag_is_changeable_p(u32 flag)
  180. {
  181. u32 f1, f2;
  182. /*
  183. * Cyrix and IDT cpus allow disabling of CPUID
  184. * so the code below may return different results
  185. * when it is executed before and after enabling
  186. * the CPUID. Add "volatile" to not allow gcc to
  187. * optimize the subsequent calls to this function.
  188. */
  189. asm volatile ("pushfl \n\t"
  190. "pushfl \n\t"
  191. "popl %0 \n\t"
  192. "movl %0, %1 \n\t"
  193. "xorl %2, %0 \n\t"
  194. "pushl %0 \n\t"
  195. "popfl \n\t"
  196. "pushfl \n\t"
  197. "popl %0 \n\t"
  198. "popfl \n\t"
  199. : "=&r" (f1), "=&r" (f2)
  200. : "ir" (flag));
  201. return ((f1^f2) & flag) != 0;
  202. }
  203. /* Probe for the CPUID instruction */
  204. int have_cpuid_p(void)
  205. {
  206. return flag_is_changeable_p(X86_EFLAGS_ID);
  207. }
  208. static void squash_the_stupid_serial_number(struct cpuinfo_x86 *c)
  209. {
  210. unsigned long lo, hi;
  211. if (!cpu_has(c, X86_FEATURE_PN) || !disable_x86_serial_nr)
  212. return;
  213. /* Disable processor serial number: */
  214. rdmsr(MSR_IA32_BBL_CR_CTL, lo, hi);
  215. lo |= 0x200000;
  216. wrmsr(MSR_IA32_BBL_CR_CTL, lo, hi);
  217. pr_notice("CPU serial number disabled.\n");
  218. clear_cpu_cap(c, X86_FEATURE_PN);
  219. /* Disabling the serial number may affect the cpuid level */
  220. c->cpuid_level = cpuid_eax(0);
  221. }
  222. static int __init x86_serial_nr_setup(char *s)
  223. {
  224. disable_x86_serial_nr = 0;
  225. return 1;
  226. }
  227. __setup("serialnumber", x86_serial_nr_setup);
  228. #else
  229. static inline int flag_is_changeable_p(u32 flag)
  230. {
  231. return 1;
  232. }
  233. static inline void squash_the_stupid_serial_number(struct cpuinfo_x86 *c)
  234. {
  235. }
  236. #endif
  237. static __init int setup_disable_smep(char *arg)
  238. {
  239. setup_clear_cpu_cap(X86_FEATURE_SMEP);
  240. return 1;
  241. }
  242. __setup("nosmep", setup_disable_smep);
  243. static __always_inline void setup_smep(struct cpuinfo_x86 *c)
  244. {
  245. if (cpu_has(c, X86_FEATURE_SMEP))
  246. cr4_set_bits(X86_CR4_SMEP);
  247. }
  248. static __init int setup_disable_smap(char *arg)
  249. {
  250. setup_clear_cpu_cap(X86_FEATURE_SMAP);
  251. return 1;
  252. }
  253. __setup("nosmap", setup_disable_smap);
  254. static __always_inline void setup_smap(struct cpuinfo_x86 *c)
  255. {
  256. unsigned long eflags = native_save_fl();
  257. /* This should have been cleared long ago */
  258. BUG_ON(eflags & X86_EFLAGS_AC);
  259. if (cpu_has(c, X86_FEATURE_SMAP)) {
  260. #ifdef CONFIG_X86_SMAP
  261. cr4_set_bits(X86_CR4_SMAP);
  262. #else
  263. cr4_clear_bits(X86_CR4_SMAP);
  264. #endif
  265. }
  266. }
  267. /*
  268. * Protection Keys are not available in 32-bit mode.
  269. */
  270. static bool pku_disabled;
  271. static __always_inline void setup_pku(struct cpuinfo_x86 *c)
  272. {
  273. if (!cpu_has(c, X86_FEATURE_PKU))
  274. return;
  275. if (pku_disabled)
  276. return;
  277. cr4_set_bits(X86_CR4_PKE);
  278. /*
  279. * Seting X86_CR4_PKE will cause the X86_FEATURE_OSPKE
  280. * cpuid bit to be set. We need to ensure that we
  281. * update that bit in this CPU's "cpu_info".
  282. */
  283. get_cpu_cap(c);
  284. }
  285. #ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
  286. static __init int setup_disable_pku(char *arg)
  287. {
  288. /*
  289. * Do not clear the X86_FEATURE_PKU bit. All of the
  290. * runtime checks are against OSPKE so clearing the
  291. * bit does nothing.
  292. *
  293. * This way, we will see "pku" in cpuinfo, but not
  294. * "ospke", which is exactly what we want. It shows
  295. * that the CPU has PKU, but the OS has not enabled it.
  296. * This happens to be exactly how a system would look
  297. * if we disabled the config option.
  298. */
  299. pr_info("x86: 'nopku' specified, disabling Memory Protection Keys\n");
  300. pku_disabled = true;
  301. return 1;
  302. }
  303. __setup("nopku", setup_disable_pku);
  304. #endif /* CONFIG_X86_64 */
  305. /*
  306. * Some CPU features depend on higher CPUID levels, which may not always
  307. * be available due to CPUID level capping or broken virtualization
  308. * software. Add those features to this table to auto-disable them.
  309. */
  310. struct cpuid_dependent_feature {
  311. u32 feature;
  312. u32 level;
  313. };
  314. static const struct cpuid_dependent_feature
  315. cpuid_dependent_features[] = {
  316. { X86_FEATURE_MWAIT, 0x00000005 },
  317. { X86_FEATURE_DCA, 0x00000009 },
  318. { X86_FEATURE_XSAVE, 0x0000000d },
  319. { 0, 0 }
  320. };
  321. static void filter_cpuid_features(struct cpuinfo_x86 *c, bool warn)
  322. {
  323. const struct cpuid_dependent_feature *df;
  324. for (df = cpuid_dependent_features; df->feature; df++) {
  325. if (!cpu_has(c, df->feature))
  326. continue;
  327. /*
  328. * Note: cpuid_level is set to -1 if unavailable, but
  329. * extended_extended_level is set to 0 if unavailable
  330. * and the legitimate extended levels are all negative
  331. * when signed; hence the weird messing around with
  332. * signs here...
  333. */
  334. if (!((s32)df->level < 0 ?
  335. (u32)df->level > (u32)c->extended_cpuid_level :
  336. (s32)df->level > (s32)c->cpuid_level))
  337. continue;
  338. clear_cpu_cap(c, df->feature);
  339. if (!warn)
  340. continue;
  341. pr_warn("CPU: CPU feature " X86_CAP_FMT " disabled, no CPUID level 0x%x\n",
  342. x86_cap_flag(df->feature), df->level);
  343. }
  344. }
  345. /*
  346. * Naming convention should be: <Name> [(<Codename>)]
  347. * This table only is used unless init_<vendor>() below doesn't set it;
  348. * in particular, if CPUID levels 0x80000002..4 are supported, this
  349. * isn't used
  350. */
  351. /* Look up CPU names by table lookup. */
  352. static const char *table_lookup_model(struct cpuinfo_x86 *c)
  353. {
  354. #ifdef CONFIG_X86_32
  355. const struct legacy_cpu_model_info *info;
  356. if (c->x86_model >= 16)
  357. return NULL; /* Range check */
  358. if (!this_cpu)
  359. return NULL;
  360. info = this_cpu->legacy_models;
  361. while (info->family) {
  362. if (info->family == c->x86)
  363. return info->model_names[c->x86_model];
  364. info++;
  365. }
  366. #endif
  367. return NULL; /* Not found */
  368. }
  369. __u32 cpu_caps_cleared[NCAPINTS];
  370. __u32 cpu_caps_set[NCAPINTS];
  371. void load_percpu_segment(int cpu)
  372. {
  373. #ifdef CONFIG_X86_32
  374. loadsegment(fs, __KERNEL_PERCPU);
  375. #else
  376. loadsegment(gs, 0);
  377. wrmsrl(MSR_GS_BASE, (unsigned long)per_cpu(irq_stack_union.gs_base, cpu));
  378. #endif
  379. load_stack_canary_segment();
  380. }
  381. /*
  382. * Current gdt points %fs at the "master" per-cpu area: after this,
  383. * it's on the real one.
  384. */
  385. void switch_to_new_gdt(int cpu)
  386. {
  387. struct desc_ptr gdt_descr;
  388. gdt_descr.address = (long)get_cpu_gdt_table(cpu);
  389. gdt_descr.size = GDT_SIZE - 1;
  390. load_gdt(&gdt_descr);
  391. /* Reload the per-cpu base */
  392. load_percpu_segment(cpu);
  393. }
  394. static const struct cpu_dev *cpu_devs[X86_VENDOR_NUM] = {};
  395. static void get_model_name(struct cpuinfo_x86 *c)
  396. {
  397. unsigned int *v;
  398. char *p, *q, *s;
  399. if (c->extended_cpuid_level < 0x80000004)
  400. return;
  401. v = (unsigned int *)c->x86_model_id;
  402. cpuid(0x80000002, &v[0], &v[1], &v[2], &v[3]);
  403. cpuid(0x80000003, &v[4], &v[5], &v[6], &v[7]);
  404. cpuid(0x80000004, &v[8], &v[9], &v[10], &v[11]);
  405. c->x86_model_id[48] = 0;
  406. /* Trim whitespace */
  407. p = q = s = &c->x86_model_id[0];
  408. while (*p == ' ')
  409. p++;
  410. while (*p) {
  411. /* Note the last non-whitespace index */
  412. if (!isspace(*p))
  413. s = q;
  414. *q++ = *p++;
  415. }
  416. *(s + 1) = '\0';
  417. }
  418. void cpu_detect_cache_sizes(struct cpuinfo_x86 *c)
  419. {
  420. unsigned int n, dummy, ebx, ecx, edx, l2size;
  421. n = c->extended_cpuid_level;
  422. if (n >= 0x80000005) {
  423. cpuid(0x80000005, &dummy, &ebx, &ecx, &edx);
  424. c->x86_cache_size = (ecx>>24) + (edx>>24);
  425. #ifdef CONFIG_X86_64
  426. /* On K8 L1 TLB is inclusive, so don't count it */
  427. c->x86_tlbsize = 0;
  428. #endif
  429. }
  430. if (n < 0x80000006) /* Some chips just has a large L1. */
  431. return;
  432. cpuid(0x80000006, &dummy, &ebx, &ecx, &edx);
  433. l2size = ecx >> 16;
  434. #ifdef CONFIG_X86_64
  435. c->x86_tlbsize += ((ebx >> 16) & 0xfff) + (ebx & 0xfff);
  436. #else
  437. /* do processor-specific cache resizing */
  438. if (this_cpu->legacy_cache_size)
  439. l2size = this_cpu->legacy_cache_size(c, l2size);
  440. /* Allow user to override all this if necessary. */
  441. if (cachesize_override != -1)
  442. l2size = cachesize_override;
  443. if (l2size == 0)
  444. return; /* Again, no L2 cache is possible */
  445. #endif
  446. c->x86_cache_size = l2size;
  447. }
  448. u16 __read_mostly tlb_lli_4k[NR_INFO];
  449. u16 __read_mostly tlb_lli_2m[NR_INFO];
  450. u16 __read_mostly tlb_lli_4m[NR_INFO];
  451. u16 __read_mostly tlb_lld_4k[NR_INFO];
  452. u16 __read_mostly tlb_lld_2m[NR_INFO];
  453. u16 __read_mostly tlb_lld_4m[NR_INFO];
  454. u16 __read_mostly tlb_lld_1g[NR_INFO];
  455. static void cpu_detect_tlb(struct cpuinfo_x86 *c)
  456. {
  457. if (this_cpu->c_detect_tlb)
  458. this_cpu->c_detect_tlb(c);
  459. pr_info("Last level iTLB entries: 4KB %d, 2MB %d, 4MB %d\n",
  460. tlb_lli_4k[ENTRIES], tlb_lli_2m[ENTRIES],
  461. tlb_lli_4m[ENTRIES]);
  462. pr_info("Last level dTLB entries: 4KB %d, 2MB %d, 4MB %d, 1GB %d\n",
  463. tlb_lld_4k[ENTRIES], tlb_lld_2m[ENTRIES],
  464. tlb_lld_4m[ENTRIES], tlb_lld_1g[ENTRIES]);
  465. }
  466. void detect_ht(struct cpuinfo_x86 *c)
  467. {
  468. #ifdef CONFIG_SMP
  469. u32 eax, ebx, ecx, edx;
  470. int index_msb, core_bits;
  471. static bool printed;
  472. if (!cpu_has(c, X86_FEATURE_HT))
  473. return;
  474. if (cpu_has(c, X86_FEATURE_CMP_LEGACY))
  475. goto out;
  476. if (cpu_has(c, X86_FEATURE_XTOPOLOGY))
  477. return;
  478. cpuid(1, &eax, &ebx, &ecx, &edx);
  479. smp_num_siblings = (ebx & 0xff0000) >> 16;
  480. if (smp_num_siblings == 1) {
  481. pr_info_once("CPU0: Hyper-Threading is disabled\n");
  482. goto out;
  483. }
  484. if (smp_num_siblings <= 1)
  485. goto out;
  486. index_msb = get_count_order(smp_num_siblings);
  487. c->phys_proc_id = apic->phys_pkg_id(c->initial_apicid, index_msb);
  488. smp_num_siblings = smp_num_siblings / c->x86_max_cores;
  489. index_msb = get_count_order(smp_num_siblings);
  490. core_bits = get_count_order(c->x86_max_cores);
  491. c->cpu_core_id = apic->phys_pkg_id(c->initial_apicid, index_msb) &
  492. ((1 << core_bits) - 1);
  493. out:
  494. if (!printed && (c->x86_max_cores * smp_num_siblings) > 1) {
  495. pr_info("CPU: Physical Processor ID: %d\n",
  496. c->phys_proc_id);
  497. pr_info("CPU: Processor Core ID: %d\n",
  498. c->cpu_core_id);
  499. printed = 1;
  500. }
  501. #endif
  502. }
  503. static void get_cpu_vendor(struct cpuinfo_x86 *c)
  504. {
  505. char *v = c->x86_vendor_id;
  506. int i;
  507. for (i = 0; i < X86_VENDOR_NUM; i++) {
  508. if (!cpu_devs[i])
  509. break;
  510. if (!strcmp(v, cpu_devs[i]->c_ident[0]) ||
  511. (cpu_devs[i]->c_ident[1] &&
  512. !strcmp(v, cpu_devs[i]->c_ident[1]))) {
  513. this_cpu = cpu_devs[i];
  514. c->x86_vendor = this_cpu->c_x86_vendor;
  515. return;
  516. }
  517. }
  518. pr_err_once("CPU: vendor_id '%s' unknown, using generic init.\n" \
  519. "CPU: Your system may be unstable.\n", v);
  520. c->x86_vendor = X86_VENDOR_UNKNOWN;
  521. this_cpu = &default_cpu;
  522. }
  523. void cpu_detect(struct cpuinfo_x86 *c)
  524. {
  525. /* Get vendor name */
  526. cpuid(0x00000000, (unsigned int *)&c->cpuid_level,
  527. (unsigned int *)&c->x86_vendor_id[0],
  528. (unsigned int *)&c->x86_vendor_id[8],
  529. (unsigned int *)&c->x86_vendor_id[4]);
  530. c->x86 = 4;
  531. /* Intel-defined flags: level 0x00000001 */
  532. if (c->cpuid_level >= 0x00000001) {
  533. u32 junk, tfms, cap0, misc;
  534. cpuid(0x00000001, &tfms, &misc, &junk, &cap0);
  535. c->x86 = x86_family(tfms);
  536. c->x86_model = x86_model(tfms);
  537. c->x86_mask = x86_stepping(tfms);
  538. if (cap0 & (1<<19)) {
  539. c->x86_clflush_size = ((misc >> 8) & 0xff) * 8;
  540. c->x86_cache_alignment = c->x86_clflush_size;
  541. }
  542. }
  543. }
  544. void get_cpu_cap(struct cpuinfo_x86 *c)
  545. {
  546. u32 eax, ebx, ecx, edx;
  547. /* Intel-defined flags: level 0x00000001 */
  548. if (c->cpuid_level >= 0x00000001) {
  549. cpuid(0x00000001, &eax, &ebx, &ecx, &edx);
  550. c->x86_capability[CPUID_1_ECX] = ecx;
  551. c->x86_capability[CPUID_1_EDX] = edx;
  552. }
  553. /* Additional Intel-defined flags: level 0x00000007 */
  554. if (c->cpuid_level >= 0x00000007) {
  555. cpuid_count(0x00000007, 0, &eax, &ebx, &ecx, &edx);
  556. c->x86_capability[CPUID_7_0_EBX] = ebx;
  557. c->x86_capability[CPUID_6_EAX] = cpuid_eax(0x00000006);
  558. c->x86_capability[CPUID_7_ECX] = ecx;
  559. }
  560. /* Extended state features: level 0x0000000d */
  561. if (c->cpuid_level >= 0x0000000d) {
  562. cpuid_count(0x0000000d, 1, &eax, &ebx, &ecx, &edx);
  563. c->x86_capability[CPUID_D_1_EAX] = eax;
  564. }
  565. /* Additional Intel-defined flags: level 0x0000000F */
  566. if (c->cpuid_level >= 0x0000000F) {
  567. /* QoS sub-leaf, EAX=0Fh, ECX=0 */
  568. cpuid_count(0x0000000F, 0, &eax, &ebx, &ecx, &edx);
  569. c->x86_capability[CPUID_F_0_EDX] = edx;
  570. if (cpu_has(c, X86_FEATURE_CQM_LLC)) {
  571. /* will be overridden if occupancy monitoring exists */
  572. c->x86_cache_max_rmid = ebx;
  573. /* QoS sub-leaf, EAX=0Fh, ECX=1 */
  574. cpuid_count(0x0000000F, 1, &eax, &ebx, &ecx, &edx);
  575. c->x86_capability[CPUID_F_1_EDX] = edx;
  576. if ((cpu_has(c, X86_FEATURE_CQM_OCCUP_LLC)) ||
  577. ((cpu_has(c, X86_FEATURE_CQM_MBM_TOTAL)) ||
  578. (cpu_has(c, X86_FEATURE_CQM_MBM_LOCAL)))) {
  579. c->x86_cache_max_rmid = ecx;
  580. c->x86_cache_occ_scale = ebx;
  581. }
  582. } else {
  583. c->x86_cache_max_rmid = -1;
  584. c->x86_cache_occ_scale = -1;
  585. }
  586. }
  587. /* AMD-defined flags: level 0x80000001 */
  588. eax = cpuid_eax(0x80000000);
  589. c->extended_cpuid_level = eax;
  590. if ((eax & 0xffff0000) == 0x80000000) {
  591. if (eax >= 0x80000001) {
  592. cpuid(0x80000001, &eax, &ebx, &ecx, &edx);
  593. c->x86_capability[CPUID_8000_0001_ECX] = ecx;
  594. c->x86_capability[CPUID_8000_0001_EDX] = edx;
  595. }
  596. }
  597. if (c->extended_cpuid_level >= 0x80000008) {
  598. cpuid(0x80000008, &eax, &ebx, &ecx, &edx);
  599. c->x86_virt_bits = (eax >> 8) & 0xff;
  600. c->x86_phys_bits = eax & 0xff;
  601. c->x86_capability[CPUID_8000_0008_EBX] = ebx;
  602. }
  603. #ifdef CONFIG_X86_32
  604. else if (cpu_has(c, X86_FEATURE_PAE) || cpu_has(c, X86_FEATURE_PSE36))
  605. c->x86_phys_bits = 36;
  606. #endif
  607. if (c->extended_cpuid_level >= 0x80000007)
  608. c->x86_power = cpuid_edx(0x80000007);
  609. if (c->extended_cpuid_level >= 0x8000000a)
  610. c->x86_capability[CPUID_8000_000A_EDX] = cpuid_edx(0x8000000a);
  611. init_scattered_cpuid_features(c);
  612. }
  613. static void identify_cpu_without_cpuid(struct cpuinfo_x86 *c)
  614. {
  615. #ifdef CONFIG_X86_32
  616. int i;
  617. /*
  618. * First of all, decide if this is a 486 or higher
  619. * It's a 486 if we can modify the AC flag
  620. */
  621. if (flag_is_changeable_p(X86_EFLAGS_AC))
  622. c->x86 = 4;
  623. else
  624. c->x86 = 3;
  625. for (i = 0; i < X86_VENDOR_NUM; i++)
  626. if (cpu_devs[i] && cpu_devs[i]->c_identify) {
  627. c->x86_vendor_id[0] = 0;
  628. cpu_devs[i]->c_identify(c);
  629. if (c->x86_vendor_id[0]) {
  630. get_cpu_vendor(c);
  631. break;
  632. }
  633. }
  634. #endif
  635. }
  636. /*
  637. * Do minimum CPU detection early.
  638. * Fields really needed: vendor, cpuid_level, family, model, mask,
  639. * cache alignment.
  640. * The others are not touched to avoid unwanted side effects.
  641. *
  642. * WARNING: this function is only called on the BP. Don't add code here
  643. * that is supposed to run on all CPUs.
  644. */
  645. static void __init early_identify_cpu(struct cpuinfo_x86 *c)
  646. {
  647. #ifdef CONFIG_X86_64
  648. c->x86_clflush_size = 64;
  649. c->x86_phys_bits = 36;
  650. c->x86_virt_bits = 48;
  651. #else
  652. c->x86_clflush_size = 32;
  653. c->x86_phys_bits = 32;
  654. c->x86_virt_bits = 32;
  655. #endif
  656. c->x86_cache_alignment = c->x86_clflush_size;
  657. memset(&c->x86_capability, 0, sizeof c->x86_capability);
  658. c->extended_cpuid_level = 0;
  659. if (!have_cpuid_p())
  660. identify_cpu_without_cpuid(c);
  661. /* cyrix could have cpuid enabled via c_identify()*/
  662. if (!have_cpuid_p())
  663. return;
  664. cpu_detect(c);
  665. get_cpu_vendor(c);
  666. get_cpu_cap(c);
  667. if (this_cpu->c_early_init)
  668. this_cpu->c_early_init(c);
  669. c->cpu_index = 0;
  670. filter_cpuid_features(c, false);
  671. if (this_cpu->c_bsp_init)
  672. this_cpu->c_bsp_init(c);
  673. setup_force_cpu_cap(X86_FEATURE_ALWAYS);
  674. fpu__init_system(c);
  675. }
  676. void __init early_cpu_init(void)
  677. {
  678. const struct cpu_dev *const *cdev;
  679. int count = 0;
  680. #ifdef CONFIG_PROCESSOR_SELECT
  681. pr_info("KERNEL supported cpus:\n");
  682. #endif
  683. for (cdev = __x86_cpu_dev_start; cdev < __x86_cpu_dev_end; cdev++) {
  684. const struct cpu_dev *cpudev = *cdev;
  685. if (count >= X86_VENDOR_NUM)
  686. break;
  687. cpu_devs[count] = cpudev;
  688. count++;
  689. #ifdef CONFIG_PROCESSOR_SELECT
  690. {
  691. unsigned int j;
  692. for (j = 0; j < 2; j++) {
  693. if (!cpudev->c_ident[j])
  694. continue;
  695. pr_info(" %s %s\n", cpudev->c_vendor,
  696. cpudev->c_ident[j]);
  697. }
  698. }
  699. #endif
  700. }
  701. early_identify_cpu(&boot_cpu_data);
  702. }
  703. /*
  704. * The NOPL instruction is supposed to exist on all CPUs of family >= 6;
  705. * unfortunately, that's not true in practice because of early VIA
  706. * chips and (more importantly) broken virtualizers that are not easy
  707. * to detect. In the latter case it doesn't even *fail* reliably, so
  708. * probing for it doesn't even work. Disable it completely on 32-bit
  709. * unless we can find a reliable way to detect all the broken cases.
  710. * Enable it explicitly on 64-bit for non-constant inputs of cpu_has().
  711. */
  712. static void detect_nopl(struct cpuinfo_x86 *c)
  713. {
  714. #ifdef CONFIG_X86_32
  715. clear_cpu_cap(c, X86_FEATURE_NOPL);
  716. #else
  717. set_cpu_cap(c, X86_FEATURE_NOPL);
  718. #endif
  719. /*
  720. * ESPFIX is a strange bug. All real CPUs have it. Paravirt
  721. * systems that run Linux at CPL > 0 may or may not have the
  722. * issue, but, even if they have the issue, there's absolutely
  723. * nothing we can do about it because we can't use the real IRET
  724. * instruction.
  725. *
  726. * NB: For the time being, only 32-bit kernels support
  727. * X86_BUG_ESPFIX as such. 64-bit kernels directly choose
  728. * whether to apply espfix using paravirt hooks. If any
  729. * non-paravirt system ever shows up that does *not* have the
  730. * ESPFIX issue, we can change this.
  731. */
  732. #ifdef CONFIG_X86_32
  733. #ifdef CONFIG_PARAVIRT
  734. do {
  735. extern void native_iret(void);
  736. if (pv_cpu_ops.iret == native_iret)
  737. set_cpu_bug(c, X86_BUG_ESPFIX);
  738. } while (0);
  739. #else
  740. set_cpu_bug(c, X86_BUG_ESPFIX);
  741. #endif
  742. #endif
  743. }
  744. static void generic_identify(struct cpuinfo_x86 *c)
  745. {
  746. c->extended_cpuid_level = 0;
  747. if (!have_cpuid_p())
  748. identify_cpu_without_cpuid(c);
  749. /* cyrix could have cpuid enabled via c_identify()*/
  750. if (!have_cpuid_p())
  751. return;
  752. cpu_detect(c);
  753. get_cpu_vendor(c);
  754. get_cpu_cap(c);
  755. if (c->cpuid_level >= 0x00000001) {
  756. c->initial_apicid = (cpuid_ebx(1) >> 24) & 0xFF;
  757. #ifdef CONFIG_X86_32
  758. # ifdef CONFIG_SMP
  759. c->apicid = apic->phys_pkg_id(c->initial_apicid, 0);
  760. # else
  761. c->apicid = c->initial_apicid;
  762. # endif
  763. #endif
  764. c->phys_proc_id = c->initial_apicid;
  765. }
  766. get_model_name(c); /* Default name */
  767. detect_nopl(c);
  768. }
  769. static void x86_init_cache_qos(struct cpuinfo_x86 *c)
  770. {
  771. /*
  772. * The heavy lifting of max_rmid and cache_occ_scale are handled
  773. * in get_cpu_cap(). Here we just set the max_rmid for the boot_cpu
  774. * in case CQM bits really aren't there in this CPU.
  775. */
  776. if (c != &boot_cpu_data) {
  777. boot_cpu_data.x86_cache_max_rmid =
  778. min(boot_cpu_data.x86_cache_max_rmid,
  779. c->x86_cache_max_rmid);
  780. }
  781. }
  782. /*
  783. * This does the hard work of actually picking apart the CPU stuff...
  784. */
  785. static void identify_cpu(struct cpuinfo_x86 *c)
  786. {
  787. int i;
  788. c->loops_per_jiffy = loops_per_jiffy;
  789. c->x86_cache_size = -1;
  790. c->x86_vendor = X86_VENDOR_UNKNOWN;
  791. c->x86_model = c->x86_mask = 0; /* So far unknown... */
  792. c->x86_vendor_id[0] = '\0'; /* Unset */
  793. c->x86_model_id[0] = '\0'; /* Unset */
  794. c->x86_max_cores = 1;
  795. c->x86_coreid_bits = 0;
  796. #ifdef CONFIG_X86_64
  797. c->x86_clflush_size = 64;
  798. c->x86_phys_bits = 36;
  799. c->x86_virt_bits = 48;
  800. #else
  801. c->cpuid_level = -1; /* CPUID not detected */
  802. c->x86_clflush_size = 32;
  803. c->x86_phys_bits = 32;
  804. c->x86_virt_bits = 32;
  805. #endif
  806. c->x86_cache_alignment = c->x86_clflush_size;
  807. memset(&c->x86_capability, 0, sizeof c->x86_capability);
  808. generic_identify(c);
  809. if (this_cpu->c_identify)
  810. this_cpu->c_identify(c);
  811. /* Clear/Set all flags overridden by options, after probe */
  812. for (i = 0; i < NCAPINTS; i++) {
  813. c->x86_capability[i] &= ~cpu_caps_cleared[i];
  814. c->x86_capability[i] |= cpu_caps_set[i];
  815. }
  816. #ifdef CONFIG_X86_64
  817. c->apicid = apic->phys_pkg_id(c->initial_apicid, 0);
  818. #endif
  819. /*
  820. * Vendor-specific initialization. In this section we
  821. * canonicalize the feature flags, meaning if there are
  822. * features a certain CPU supports which CPUID doesn't
  823. * tell us, CPUID claiming incorrect flags, or other bugs,
  824. * we handle them here.
  825. *
  826. * At the end of this section, c->x86_capability better
  827. * indicate the features this CPU genuinely supports!
  828. */
  829. if (this_cpu->c_init)
  830. this_cpu->c_init(c);
  831. /* Disable the PN if appropriate */
  832. squash_the_stupid_serial_number(c);
  833. /* Set up SMEP/SMAP */
  834. setup_smep(c);
  835. setup_smap(c);
  836. /*
  837. * The vendor-specific functions might have changed features.
  838. * Now we do "generic changes."
  839. */
  840. /* Filter out anything that depends on CPUID levels we don't have */
  841. filter_cpuid_features(c, true);
  842. /* If the model name is still unset, do table lookup. */
  843. if (!c->x86_model_id[0]) {
  844. const char *p;
  845. p = table_lookup_model(c);
  846. if (p)
  847. strcpy(c->x86_model_id, p);
  848. else
  849. /* Last resort... */
  850. sprintf(c->x86_model_id, "%02x/%02x",
  851. c->x86, c->x86_model);
  852. }
  853. #ifdef CONFIG_X86_64
  854. detect_ht(c);
  855. #endif
  856. init_hypervisor(c);
  857. x86_init_rdrand(c);
  858. x86_init_cache_qos(c);
  859. setup_pku(c);
  860. /*
  861. * Clear/Set all flags overridden by options, need do it
  862. * before following smp all cpus cap AND.
  863. */
  864. for (i = 0; i < NCAPINTS; i++) {
  865. c->x86_capability[i] &= ~cpu_caps_cleared[i];
  866. c->x86_capability[i] |= cpu_caps_set[i];
  867. }
  868. /*
  869. * On SMP, boot_cpu_data holds the common feature set between
  870. * all CPUs; so make sure that we indicate which features are
  871. * common between the CPUs. The first time this routine gets
  872. * executed, c == &boot_cpu_data.
  873. */
  874. if (c != &boot_cpu_data) {
  875. /* AND the already accumulated flags with these */
  876. for (i = 0; i < NCAPINTS; i++)
  877. boot_cpu_data.x86_capability[i] &= c->x86_capability[i];
  878. /* OR, i.e. replicate the bug flags */
  879. for (i = NCAPINTS; i < NCAPINTS + NBUGINTS; i++)
  880. c->x86_capability[i] |= boot_cpu_data.x86_capability[i];
  881. }
  882. /* Init Machine Check Exception if available. */
  883. mcheck_cpu_init(c);
  884. select_idle_routine(c);
  885. #ifdef CONFIG_NUMA
  886. numa_add_cpu(smp_processor_id());
  887. #endif
  888. /* The boot/hotplug time assigment got cleared, restore it */
  889. c->logical_proc_id = topology_phys_to_logical_pkg(c->phys_proc_id);
  890. }
  891. /*
  892. * Set up the CPU state needed to execute SYSENTER/SYSEXIT instructions
  893. * on 32-bit kernels:
  894. */
  895. #ifdef CONFIG_X86_32
  896. void enable_sep_cpu(void)
  897. {
  898. struct tss_struct *tss;
  899. int cpu;
  900. cpu = get_cpu();
  901. tss = &per_cpu(cpu_tss, cpu);
  902. if (!boot_cpu_has(X86_FEATURE_SEP))
  903. goto out;
  904. /*
  905. * We cache MSR_IA32_SYSENTER_CS's value in the TSS's ss1 field --
  906. * see the big comment in struct x86_hw_tss's definition.
  907. */
  908. tss->x86_tss.ss1 = __KERNEL_CS;
  909. wrmsr(MSR_IA32_SYSENTER_CS, tss->x86_tss.ss1, 0);
  910. wrmsr(MSR_IA32_SYSENTER_ESP,
  911. (unsigned long)tss + offsetofend(struct tss_struct, SYSENTER_stack),
  912. 0);
  913. wrmsr(MSR_IA32_SYSENTER_EIP, (unsigned long)entry_SYSENTER_32, 0);
  914. out:
  915. put_cpu();
  916. }
  917. #endif
  918. void __init identify_boot_cpu(void)
  919. {
  920. identify_cpu(&boot_cpu_data);
  921. init_amd_e400_c1e_mask();
  922. #ifdef CONFIG_X86_32
  923. sysenter_setup();
  924. enable_sep_cpu();
  925. #endif
  926. cpu_detect_tlb(&boot_cpu_data);
  927. }
  928. void identify_secondary_cpu(struct cpuinfo_x86 *c)
  929. {
  930. BUG_ON(c == &boot_cpu_data);
  931. identify_cpu(c);
  932. #ifdef CONFIG_X86_32
  933. enable_sep_cpu();
  934. #endif
  935. mtrr_ap_init();
  936. }
  937. struct msr_range {
  938. unsigned min;
  939. unsigned max;
  940. };
  941. static const struct msr_range msr_range_array[] = {
  942. { 0x00000000, 0x00000418},
  943. { 0xc0000000, 0xc000040b},
  944. { 0xc0010000, 0xc0010142},
  945. { 0xc0011000, 0xc001103b},
  946. };
  947. static void __print_cpu_msr(void)
  948. {
  949. unsigned index_min, index_max;
  950. unsigned index;
  951. u64 val;
  952. int i;
  953. for (i = 0; i < ARRAY_SIZE(msr_range_array); i++) {
  954. index_min = msr_range_array[i].min;
  955. index_max = msr_range_array[i].max;
  956. for (index = index_min; index < index_max; index++) {
  957. if (rdmsrl_safe(index, &val))
  958. continue;
  959. pr_info(" MSR%08x: %016llx\n", index, val);
  960. }
  961. }
  962. }
  963. static int show_msr;
  964. static __init int setup_show_msr(char *arg)
  965. {
  966. int num;
  967. get_option(&arg, &num);
  968. if (num > 0)
  969. show_msr = num;
  970. return 1;
  971. }
  972. __setup("show_msr=", setup_show_msr);
  973. static __init int setup_noclflush(char *arg)
  974. {
  975. setup_clear_cpu_cap(X86_FEATURE_CLFLUSH);
  976. setup_clear_cpu_cap(X86_FEATURE_CLFLUSHOPT);
  977. return 1;
  978. }
  979. __setup("noclflush", setup_noclflush);
  980. void print_cpu_info(struct cpuinfo_x86 *c)
  981. {
  982. const char *vendor = NULL;
  983. if (c->x86_vendor < X86_VENDOR_NUM) {
  984. vendor = this_cpu->c_vendor;
  985. } else {
  986. if (c->cpuid_level >= 0)
  987. vendor = c->x86_vendor_id;
  988. }
  989. if (vendor && !strstr(c->x86_model_id, vendor))
  990. pr_cont("%s ", vendor);
  991. if (c->x86_model_id[0])
  992. pr_cont("%s", c->x86_model_id);
  993. else
  994. pr_cont("%d86", c->x86);
  995. pr_cont(" (family: 0x%x, model: 0x%x", c->x86, c->x86_model);
  996. if (c->x86_mask || c->cpuid_level >= 0)
  997. pr_cont(", stepping: 0x%x)\n", c->x86_mask);
  998. else
  999. pr_cont(")\n");
  1000. print_cpu_msr(c);
  1001. }
  1002. void print_cpu_msr(struct cpuinfo_x86 *c)
  1003. {
  1004. if (c->cpu_index < show_msr)
  1005. __print_cpu_msr();
  1006. }
  1007. static __init int setup_disablecpuid(char *arg)
  1008. {
  1009. int bit;
  1010. if (get_option(&arg, &bit) && bit < NCAPINTS*32)
  1011. setup_clear_cpu_cap(bit);
  1012. else
  1013. return 0;
  1014. return 1;
  1015. }
  1016. __setup("clearcpuid=", setup_disablecpuid);
  1017. #ifdef CONFIG_X86_64
  1018. struct desc_ptr idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) idt_table };
  1019. struct desc_ptr debug_idt_descr = { NR_VECTORS * 16 - 1,
  1020. (unsigned long) debug_idt_table };
  1021. DEFINE_PER_CPU_FIRST(union irq_stack_union,
  1022. irq_stack_union) __aligned(PAGE_SIZE) __visible;
  1023. /*
  1024. * The following percpu variables are hot. Align current_task to
  1025. * cacheline size such that they fall in the same cacheline.
  1026. */
  1027. DEFINE_PER_CPU(struct task_struct *, current_task) ____cacheline_aligned =
  1028. &init_task;
  1029. EXPORT_PER_CPU_SYMBOL(current_task);
  1030. DEFINE_PER_CPU(char *, irq_stack_ptr) =
  1031. init_per_cpu_var(irq_stack_union.irq_stack) + IRQ_STACK_SIZE - 64;
  1032. DEFINE_PER_CPU(unsigned int, irq_count) __visible = -1;
  1033. DEFINE_PER_CPU(int, __preempt_count) = INIT_PREEMPT_COUNT;
  1034. EXPORT_PER_CPU_SYMBOL(__preempt_count);
  1035. /*
  1036. * Special IST stacks which the CPU switches to when it calls
  1037. * an IST-marked descriptor entry. Up to 7 stacks (hardware
  1038. * limit), all of them are 4K, except the debug stack which
  1039. * is 8K.
  1040. */
  1041. static const unsigned int exception_stack_sizes[N_EXCEPTION_STACKS] = {
  1042. [0 ... N_EXCEPTION_STACKS - 1] = EXCEPTION_STKSZ,
  1043. [DEBUG_STACK - 1] = DEBUG_STKSZ
  1044. };
  1045. static DEFINE_PER_CPU_PAGE_ALIGNED(char, exception_stacks
  1046. [(N_EXCEPTION_STACKS - 1) * EXCEPTION_STKSZ + DEBUG_STKSZ]);
  1047. /* May not be marked __init: used by software suspend */
  1048. void syscall_init(void)
  1049. {
  1050. /*
  1051. * LSTAR and STAR live in a bit strange symbiosis.
  1052. * They both write to the same internal register. STAR allows to
  1053. * set CS/DS but only a 32bit target. LSTAR sets the 64bit rip.
  1054. */
  1055. wrmsr(MSR_STAR, 0, (__USER32_CS << 16) | __KERNEL_CS);
  1056. wrmsrl(MSR_LSTAR, (unsigned long)entry_SYSCALL_64);
  1057. #ifdef CONFIG_IA32_EMULATION
  1058. wrmsrl(MSR_CSTAR, (unsigned long)entry_SYSCALL_compat);
  1059. /*
  1060. * This only works on Intel CPUs.
  1061. * On AMD CPUs these MSRs are 32-bit, CPU truncates MSR_IA32_SYSENTER_EIP.
  1062. * This does not cause SYSENTER to jump to the wrong location, because
  1063. * AMD doesn't allow SYSENTER in long mode (either 32- or 64-bit).
  1064. */
  1065. wrmsrl_safe(MSR_IA32_SYSENTER_CS, (u64)__KERNEL_CS);
  1066. wrmsrl_safe(MSR_IA32_SYSENTER_ESP, 0ULL);
  1067. wrmsrl_safe(MSR_IA32_SYSENTER_EIP, (u64)entry_SYSENTER_compat);
  1068. #else
  1069. wrmsrl(MSR_CSTAR, (unsigned long)ignore_sysret);
  1070. wrmsrl_safe(MSR_IA32_SYSENTER_CS, (u64)GDT_ENTRY_INVALID_SEG);
  1071. wrmsrl_safe(MSR_IA32_SYSENTER_ESP, 0ULL);
  1072. wrmsrl_safe(MSR_IA32_SYSENTER_EIP, 0ULL);
  1073. #endif
  1074. /* Flags to clear on syscall */
  1075. wrmsrl(MSR_SYSCALL_MASK,
  1076. X86_EFLAGS_TF|X86_EFLAGS_DF|X86_EFLAGS_IF|
  1077. X86_EFLAGS_IOPL|X86_EFLAGS_AC|X86_EFLAGS_NT);
  1078. }
  1079. /*
  1080. * Copies of the original ist values from the tss are only accessed during
  1081. * debugging, no special alignment required.
  1082. */
  1083. DEFINE_PER_CPU(struct orig_ist, orig_ist);
  1084. static DEFINE_PER_CPU(unsigned long, debug_stack_addr);
  1085. DEFINE_PER_CPU(int, debug_stack_usage);
  1086. int is_debug_stack(unsigned long addr)
  1087. {
  1088. return __this_cpu_read(debug_stack_usage) ||
  1089. (addr <= __this_cpu_read(debug_stack_addr) &&
  1090. addr > (__this_cpu_read(debug_stack_addr) - DEBUG_STKSZ));
  1091. }
  1092. NOKPROBE_SYMBOL(is_debug_stack);
  1093. DEFINE_PER_CPU(u32, debug_idt_ctr);
  1094. void debug_stack_set_zero(void)
  1095. {
  1096. this_cpu_inc(debug_idt_ctr);
  1097. load_current_idt();
  1098. }
  1099. NOKPROBE_SYMBOL(debug_stack_set_zero);
  1100. void debug_stack_reset(void)
  1101. {
  1102. if (WARN_ON(!this_cpu_read(debug_idt_ctr)))
  1103. return;
  1104. if (this_cpu_dec_return(debug_idt_ctr) == 0)
  1105. load_current_idt();
  1106. }
  1107. NOKPROBE_SYMBOL(debug_stack_reset);
  1108. #else /* CONFIG_X86_64 */
  1109. DEFINE_PER_CPU(struct task_struct *, current_task) = &init_task;
  1110. EXPORT_PER_CPU_SYMBOL(current_task);
  1111. DEFINE_PER_CPU(int, __preempt_count) = INIT_PREEMPT_COUNT;
  1112. EXPORT_PER_CPU_SYMBOL(__preempt_count);
  1113. /*
  1114. * On x86_32, vm86 modifies tss.sp0, so sp0 isn't a reliable way to find
  1115. * the top of the kernel stack. Use an extra percpu variable to track the
  1116. * top of the kernel stack directly.
  1117. */
  1118. DEFINE_PER_CPU(unsigned long, cpu_current_top_of_stack) =
  1119. (unsigned long)&init_thread_union + THREAD_SIZE;
  1120. EXPORT_PER_CPU_SYMBOL(cpu_current_top_of_stack);
  1121. #ifdef CONFIG_CC_STACKPROTECTOR
  1122. DEFINE_PER_CPU_ALIGNED(struct stack_canary, stack_canary);
  1123. #endif
  1124. #endif /* CONFIG_X86_64 */
  1125. /*
  1126. * Clear all 6 debug registers:
  1127. */
  1128. static void clear_all_debug_regs(void)
  1129. {
  1130. int i;
  1131. for (i = 0; i < 8; i++) {
  1132. /* Ignore db4, db5 */
  1133. if ((i == 4) || (i == 5))
  1134. continue;
  1135. set_debugreg(0, i);
  1136. }
  1137. }
  1138. #ifdef CONFIG_KGDB
  1139. /*
  1140. * Restore debug regs if using kgdbwait and you have a kernel debugger
  1141. * connection established.
  1142. */
  1143. static void dbg_restore_debug_regs(void)
  1144. {
  1145. if (unlikely(kgdb_connected && arch_kgdb_ops.correct_hw_break))
  1146. arch_kgdb_ops.correct_hw_break();
  1147. }
  1148. #else /* ! CONFIG_KGDB */
  1149. #define dbg_restore_debug_regs()
  1150. #endif /* ! CONFIG_KGDB */
  1151. static void wait_for_master_cpu(int cpu)
  1152. {
  1153. #ifdef CONFIG_SMP
  1154. /*
  1155. * wait for ACK from master CPU before continuing
  1156. * with AP initialization
  1157. */
  1158. WARN_ON(cpumask_test_and_set_cpu(cpu, cpu_initialized_mask));
  1159. while (!cpumask_test_cpu(cpu, cpu_callout_mask))
  1160. cpu_relax();
  1161. #endif
  1162. }
  1163. /*
  1164. * cpu_init() initializes state that is per-CPU. Some data is already
  1165. * initialized (naturally) in the bootstrap process, such as the GDT
  1166. * and IDT. We reload them nevertheless, this function acts as a
  1167. * 'CPU state barrier', nothing should get across.
  1168. * A lot of state is already set up in PDA init for 64 bit
  1169. */
  1170. #ifdef CONFIG_X86_64
  1171. void cpu_init(void)
  1172. {
  1173. struct orig_ist *oist;
  1174. struct task_struct *me;
  1175. struct tss_struct *t;
  1176. unsigned long v;
  1177. int cpu = stack_smp_processor_id();
  1178. int i;
  1179. wait_for_master_cpu(cpu);
  1180. /*
  1181. * Initialize the CR4 shadow before doing anything that could
  1182. * try to read it.
  1183. */
  1184. cr4_init_shadow();
  1185. /*
  1186. * Load microcode on this cpu if a valid microcode is available.
  1187. * This is early microcode loading procedure.
  1188. */
  1189. load_ucode_ap();
  1190. t = &per_cpu(cpu_tss, cpu);
  1191. oist = &per_cpu(orig_ist, cpu);
  1192. #ifdef CONFIG_NUMA
  1193. if (this_cpu_read(numa_node) == 0 &&
  1194. early_cpu_to_node(cpu) != NUMA_NO_NODE)
  1195. set_numa_node(early_cpu_to_node(cpu));
  1196. #endif
  1197. me = current;
  1198. pr_debug("Initializing CPU#%d\n", cpu);
  1199. cr4_clear_bits(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE);
  1200. /*
  1201. * Initialize the per-CPU GDT with the boot GDT,
  1202. * and set up the GDT descriptor:
  1203. */
  1204. switch_to_new_gdt(cpu);
  1205. loadsegment(fs, 0);
  1206. load_current_idt();
  1207. memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8);
  1208. syscall_init();
  1209. wrmsrl(MSR_FS_BASE, 0);
  1210. wrmsrl(MSR_KERNEL_GS_BASE, 0);
  1211. barrier();
  1212. x86_configure_nx();
  1213. x2apic_setup();
  1214. /*
  1215. * set up and load the per-CPU TSS
  1216. */
  1217. if (!oist->ist[0]) {
  1218. char *estacks = per_cpu(exception_stacks, cpu);
  1219. for (v = 0; v < N_EXCEPTION_STACKS; v++) {
  1220. estacks += exception_stack_sizes[v];
  1221. oist->ist[v] = t->x86_tss.ist[v] =
  1222. (unsigned long)estacks;
  1223. if (v == DEBUG_STACK-1)
  1224. per_cpu(debug_stack_addr, cpu) = (unsigned long)estacks;
  1225. }
  1226. }
  1227. t->x86_tss.io_bitmap_base = offsetof(struct tss_struct, io_bitmap);
  1228. /*
  1229. * <= is required because the CPU will access up to
  1230. * 8 bits beyond the end of the IO permission bitmap.
  1231. */
  1232. for (i = 0; i <= IO_BITMAP_LONGS; i++)
  1233. t->io_bitmap[i] = ~0UL;
  1234. atomic_inc(&init_mm.mm_count);
  1235. me->active_mm = &init_mm;
  1236. BUG_ON(me->mm);
  1237. enter_lazy_tlb(&init_mm, me);
  1238. load_sp0(t, &current->thread);
  1239. set_tss_desc(cpu, t);
  1240. load_TR_desc();
  1241. load_mm_ldt(&init_mm);
  1242. clear_all_debug_regs();
  1243. dbg_restore_debug_regs();
  1244. fpu__init_cpu();
  1245. if (is_uv_system())
  1246. uv_cpu_init();
  1247. }
  1248. #else
  1249. void cpu_init(void)
  1250. {
  1251. int cpu = smp_processor_id();
  1252. struct task_struct *curr = current;
  1253. struct tss_struct *t = &per_cpu(cpu_tss, cpu);
  1254. struct thread_struct *thread = &curr->thread;
  1255. wait_for_master_cpu(cpu);
  1256. /*
  1257. * Initialize the CR4 shadow before doing anything that could
  1258. * try to read it.
  1259. */
  1260. cr4_init_shadow();
  1261. show_ucode_info_early();
  1262. pr_info("Initializing CPU#%d\n", cpu);
  1263. if (cpu_feature_enabled(X86_FEATURE_VME) ||
  1264. cpu_has_tsc ||
  1265. boot_cpu_has(X86_FEATURE_DE))
  1266. cr4_clear_bits(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE);
  1267. load_current_idt();
  1268. switch_to_new_gdt(cpu);
  1269. /*
  1270. * Set up and load the per-CPU TSS and LDT
  1271. */
  1272. atomic_inc(&init_mm.mm_count);
  1273. curr->active_mm = &init_mm;
  1274. BUG_ON(curr->mm);
  1275. enter_lazy_tlb(&init_mm, curr);
  1276. load_sp0(t, thread);
  1277. set_tss_desc(cpu, t);
  1278. load_TR_desc();
  1279. load_mm_ldt(&init_mm);
  1280. t->x86_tss.io_bitmap_base = offsetof(struct tss_struct, io_bitmap);
  1281. #ifdef CONFIG_DOUBLEFAULT
  1282. /* Set up doublefault TSS pointer in the GDT */
  1283. __set_tss_desc(cpu, GDT_ENTRY_DOUBLEFAULT_TSS, &doublefault_tss);
  1284. #endif
  1285. clear_all_debug_regs();
  1286. dbg_restore_debug_regs();
  1287. fpu__init_cpu();
  1288. }
  1289. #endif
  1290. static void bsp_resume(void)
  1291. {
  1292. if (this_cpu->c_bsp_resume)
  1293. this_cpu->c_bsp_resume(&boot_cpu_data);
  1294. }
  1295. static struct syscore_ops cpu_syscore_ops = {
  1296. .resume = bsp_resume,
  1297. };
  1298. static int __init init_cpu_syscore(void)
  1299. {
  1300. register_syscore_ops(&cpu_syscore_ops);
  1301. return 0;
  1302. }
  1303. core_initcall(init_cpu_syscore);