processor.h 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925
  1. #ifndef _ASM_X86_PROCESSOR_H
  2. #define _ASM_X86_PROCESSOR_H
  3. #include <asm/processor-flags.h>
  4. /* Forward declaration, a strange C thing */
  5. struct task_struct;
  6. struct mm_struct;
  7. struct vm86;
  8. #include <asm/math_emu.h>
  9. #include <asm/segment.h>
  10. #include <asm/types.h>
  11. #include <uapi/asm/sigcontext.h>
  12. #include <asm/current.h>
  13. #include <asm/cpufeatures.h>
  14. #include <asm/page.h>
  15. #include <asm/pgtable_types.h>
  16. #include <asm/percpu.h>
  17. #include <asm/msr.h>
  18. #include <asm/desc_defs.h>
  19. #include <asm/nops.h>
  20. #include <asm/special_insns.h>
  21. #include <asm/fpu/types.h>
  22. #include <linux/personality.h>
  23. #include <linux/cache.h>
  24. #include <linux/threads.h>
  25. #include <linux/math64.h>
  26. #include <linux/err.h>
  27. #include <linux/irqflags.h>
  28. /*
  29. * We handle most unaligned accesses in hardware. On the other hand
  30. * unaligned DMA can be quite expensive on some Nehalem processors.
  31. *
  32. * Based on this we disable the IP header alignment in network drivers.
  33. */
  34. #define NET_IP_ALIGN 0
  35. #define HBP_NUM 4
  36. /*
  37. * Default implementation of macro that returns current
  38. * instruction pointer ("program counter").
  39. */
  40. static inline void *current_text_addr(void)
  41. {
  42. void *pc;
  43. asm volatile("mov $1f, %0; 1:":"=r" (pc));
  44. return pc;
  45. }
  46. /*
  47. * These alignment constraints are for performance in the vSMP case,
  48. * but in the task_struct case we must also meet hardware imposed
  49. * alignment requirements of the FPU state:
  50. */
  51. #ifdef CONFIG_X86_VSMP
  52. # define ARCH_MIN_TASKALIGN (1 << INTERNODE_CACHE_SHIFT)
  53. # define ARCH_MIN_MMSTRUCT_ALIGN (1 << INTERNODE_CACHE_SHIFT)
  54. #else
  55. # define ARCH_MIN_TASKALIGN __alignof__(union fpregs_state)
  56. # define ARCH_MIN_MMSTRUCT_ALIGN 0
  57. #endif
  58. enum tlb_infos {
  59. ENTRIES,
  60. NR_INFO
  61. };
  62. extern u16 __read_mostly tlb_lli_4k[NR_INFO];
  63. extern u16 __read_mostly tlb_lli_2m[NR_INFO];
  64. extern u16 __read_mostly tlb_lli_4m[NR_INFO];
  65. extern u16 __read_mostly tlb_lld_4k[NR_INFO];
  66. extern u16 __read_mostly tlb_lld_2m[NR_INFO];
  67. extern u16 __read_mostly tlb_lld_4m[NR_INFO];
  68. extern u16 __read_mostly tlb_lld_1g[NR_INFO];
  69. /*
  70. * CPU type and hardware bug flags. Kept separately for each CPU.
  71. * Members of this structure are referenced in head.S, so think twice
  72. * before touching them. [mj]
  73. */
  74. struct cpuinfo_x86 {
  75. __u8 x86; /* CPU family */
  76. __u8 x86_vendor; /* CPU vendor */
  77. __u8 x86_model;
  78. __u8 x86_mask;
  79. #ifdef CONFIG_X86_32
  80. char wp_works_ok; /* It doesn't on 386's */
  81. /* Problems on some 486Dx4's and old 386's: */
  82. char rfu;
  83. char pad0;
  84. char pad1;
  85. #else
  86. /* Number of 4K pages in DTLB/ITLB combined(in pages): */
  87. int x86_tlbsize;
  88. #endif
  89. __u8 x86_virt_bits;
  90. __u8 x86_phys_bits;
  91. /* CPUID returned core id bits: */
  92. __u8 x86_coreid_bits;
  93. __u8 cu_id;
  94. /* Max extended CPUID function supported: */
  95. __u32 extended_cpuid_level;
  96. /* Maximum supported CPUID level, -1=no CPUID: */
  97. int cpuid_level;
  98. __u32 x86_capability[NCAPINTS + NBUGINTS];
  99. char x86_vendor_id[16];
  100. char x86_model_id[64];
  101. /* in KB - valid for CPUS which support this call: */
  102. int x86_cache_size;
  103. int x86_cache_alignment; /* In bytes */
  104. /* Cache QoS architectural values: */
  105. int x86_cache_max_rmid; /* max index */
  106. int x86_cache_occ_scale; /* scale to bytes */
  107. int x86_power;
  108. unsigned long loops_per_jiffy;
  109. /* cpuid returned max cores value: */
  110. u16 x86_max_cores;
  111. u16 apicid;
  112. u16 initial_apicid;
  113. u16 x86_clflush_size;
  114. /* number of cores as seen by the OS: */
  115. u16 booted_cores;
  116. /* Physical processor id: */
  117. u16 phys_proc_id;
  118. /* Logical processor id: */
  119. u16 logical_proc_id;
  120. /* Core id: */
  121. u16 cpu_core_id;
  122. /* Index into per_cpu list: */
  123. u16 cpu_index;
  124. u32 microcode;
  125. };
  126. struct cpuid_regs {
  127. u32 eax, ebx, ecx, edx;
  128. };
  129. enum cpuid_regs_idx {
  130. CPUID_EAX = 0,
  131. CPUID_EBX,
  132. CPUID_ECX,
  133. CPUID_EDX,
  134. };
  135. #define X86_VENDOR_INTEL 0
  136. #define X86_VENDOR_CYRIX 1
  137. #define X86_VENDOR_AMD 2
  138. #define X86_VENDOR_UMC 3
  139. #define X86_VENDOR_CENTAUR 5
  140. #define X86_VENDOR_TRANSMETA 7
  141. #define X86_VENDOR_NSC 8
  142. #define X86_VENDOR_NUM 9
  143. #define X86_VENDOR_UNKNOWN 0xff
  144. /*
  145. * capabilities of CPUs
  146. */
  147. extern struct cpuinfo_x86 boot_cpu_data;
  148. extern struct cpuinfo_x86 new_cpu_data;
  149. extern struct tss_struct doublefault_tss;
  150. extern __u32 cpu_caps_cleared[NCAPINTS];
  151. extern __u32 cpu_caps_set[NCAPINTS];
  152. #ifdef CONFIG_SMP
  153. DECLARE_PER_CPU_READ_MOSTLY(struct cpuinfo_x86, cpu_info);
  154. #define cpu_data(cpu) per_cpu(cpu_info, cpu)
  155. #else
  156. #define cpu_info boot_cpu_data
  157. #define cpu_data(cpu) boot_cpu_data
  158. #endif
  159. extern const struct seq_operations cpuinfo_op;
  160. #define cache_line_size() (boot_cpu_data.x86_cache_alignment)
  161. extern void cpu_detect(struct cpuinfo_x86 *c);
  162. extern void early_cpu_init(void);
  163. extern void identify_boot_cpu(void);
  164. extern void identify_secondary_cpu(struct cpuinfo_x86 *);
  165. extern void print_cpu_info(struct cpuinfo_x86 *);
  166. void print_cpu_msr(struct cpuinfo_x86 *);
  167. extern void init_scattered_cpuid_features(struct cpuinfo_x86 *c);
  168. extern u32 get_scattered_cpuid_leaf(unsigned int level,
  169. unsigned int sub_leaf,
  170. enum cpuid_regs_idx reg);
  171. extern unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c);
  172. extern void init_amd_cacheinfo(struct cpuinfo_x86 *c);
  173. extern void detect_extended_topology(struct cpuinfo_x86 *c);
  174. extern void detect_ht(struct cpuinfo_x86 *c);
  175. #ifdef CONFIG_X86_32
  176. extern int have_cpuid_p(void);
  177. #else
  178. static inline int have_cpuid_p(void)
  179. {
  180. return 1;
  181. }
  182. #endif
  183. static inline void native_cpuid(unsigned int *eax, unsigned int *ebx,
  184. unsigned int *ecx, unsigned int *edx)
  185. {
  186. /* ecx is often an input as well as an output. */
  187. asm volatile("cpuid"
  188. : "=a" (*eax),
  189. "=b" (*ebx),
  190. "=c" (*ecx),
  191. "=d" (*edx)
  192. : "0" (*eax), "2" (*ecx)
  193. : "memory");
  194. }
  195. #define native_cpuid_reg(reg) \
  196. static inline unsigned int native_cpuid_##reg(unsigned int op) \
  197. { \
  198. unsigned int eax = op, ebx, ecx = 0, edx; \
  199. \
  200. native_cpuid(&eax, &ebx, &ecx, &edx); \
  201. \
  202. return reg; \
  203. }
  204. /*
  205. * Native CPUID functions returning a single datum.
  206. */
  207. native_cpuid_reg(eax)
  208. native_cpuid_reg(ebx)
  209. native_cpuid_reg(ecx)
  210. native_cpuid_reg(edx)
  211. static inline void load_cr3(pgd_t *pgdir)
  212. {
  213. write_cr3(__pa(pgdir));
  214. }
  215. #ifdef CONFIG_X86_32
  216. /* This is the TSS defined by the hardware. */
  217. struct x86_hw_tss {
  218. unsigned short back_link, __blh;
  219. unsigned long sp0;
  220. unsigned short ss0, __ss0h;
  221. unsigned long sp1;
  222. /*
  223. * We don't use ring 1, so ss1 is a convenient scratch space in
  224. * the same cacheline as sp0. We use ss1 to cache the value in
  225. * MSR_IA32_SYSENTER_CS. When we context switch
  226. * MSR_IA32_SYSENTER_CS, we first check if the new value being
  227. * written matches ss1, and, if it's not, then we wrmsr the new
  228. * value and update ss1.
  229. *
  230. * The only reason we context switch MSR_IA32_SYSENTER_CS is
  231. * that we set it to zero in vm86 tasks to avoid corrupting the
  232. * stack if we were to go through the sysenter path from vm86
  233. * mode.
  234. */
  235. unsigned short ss1; /* MSR_IA32_SYSENTER_CS */
  236. unsigned short __ss1h;
  237. unsigned long sp2;
  238. unsigned short ss2, __ss2h;
  239. unsigned long __cr3;
  240. unsigned long ip;
  241. unsigned long flags;
  242. unsigned long ax;
  243. unsigned long cx;
  244. unsigned long dx;
  245. unsigned long bx;
  246. unsigned long sp;
  247. unsigned long bp;
  248. unsigned long si;
  249. unsigned long di;
  250. unsigned short es, __esh;
  251. unsigned short cs, __csh;
  252. unsigned short ss, __ssh;
  253. unsigned short ds, __dsh;
  254. unsigned short fs, __fsh;
  255. unsigned short gs, __gsh;
  256. unsigned short ldt, __ldth;
  257. unsigned short trace;
  258. unsigned short io_bitmap_base;
  259. } __attribute__((packed));
  260. #else
  261. struct x86_hw_tss {
  262. u32 reserved1;
  263. u64 sp0;
  264. u64 sp1;
  265. u64 sp2;
  266. u64 reserved2;
  267. u64 ist[7];
  268. u32 reserved3;
  269. u32 reserved4;
  270. u16 reserved5;
  271. u16 io_bitmap_base;
  272. } __attribute__((packed)) ____cacheline_aligned;
  273. #endif
  274. /*
  275. * IO-bitmap sizes:
  276. */
  277. #define IO_BITMAP_BITS 65536
  278. #define IO_BITMAP_BYTES (IO_BITMAP_BITS/8)
  279. #define IO_BITMAP_LONGS (IO_BITMAP_BYTES/sizeof(long))
  280. #define IO_BITMAP_OFFSET offsetof(struct tss_struct, io_bitmap)
  281. #define INVALID_IO_BITMAP_OFFSET 0x8000
  282. struct tss_struct {
  283. /*
  284. * The hardware state:
  285. */
  286. struct x86_hw_tss x86_tss;
  287. /*
  288. * The extra 1 is there because the CPU will access an
  289. * additional byte beyond the end of the IO permission
  290. * bitmap. The extra byte must be all 1 bits, and must
  291. * be within the limit.
  292. */
  293. unsigned long io_bitmap[IO_BITMAP_LONGS + 1];
  294. #ifdef CONFIG_X86_32
  295. /*
  296. * Space for the temporary SYSENTER stack.
  297. */
  298. unsigned long SYSENTER_stack_canary;
  299. unsigned long SYSENTER_stack[64];
  300. #endif
  301. } ____cacheline_aligned;
  302. DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct, cpu_tss);
  303. #ifdef CONFIG_X86_32
  304. DECLARE_PER_CPU(unsigned long, cpu_current_top_of_stack);
  305. #endif
  306. /*
  307. * Save the original ist values for checking stack pointers during debugging
  308. */
  309. struct orig_ist {
  310. unsigned long ist[7];
  311. };
  312. #ifdef CONFIG_X86_64
  313. DECLARE_PER_CPU(struct orig_ist, orig_ist);
  314. union irq_stack_union {
  315. char irq_stack[IRQ_STACK_SIZE];
  316. /*
  317. * GCC hardcodes the stack canary as %gs:40. Since the
  318. * irq_stack is the object at %gs:0, we reserve the bottom
  319. * 48 bytes of the irq stack for the canary.
  320. */
  321. struct {
  322. char gs_base[40];
  323. unsigned long stack_canary;
  324. };
  325. };
  326. DECLARE_PER_CPU_FIRST(union irq_stack_union, irq_stack_union) __visible;
  327. DECLARE_INIT_PER_CPU(irq_stack_union);
  328. DECLARE_PER_CPU(char *, irq_stack_ptr);
  329. DECLARE_PER_CPU(unsigned int, irq_count);
  330. extern asmlinkage void ignore_sysret(void);
  331. #else /* X86_64 */
  332. #ifdef CONFIG_CC_STACKPROTECTOR
  333. /*
  334. * Make sure stack canary segment base is cached-aligned:
  335. * "For Intel Atom processors, avoid non zero segment base address
  336. * that is not aligned to cache line boundary at all cost."
  337. * (Optim Ref Manual Assembly/Compiler Coding Rule 15.)
  338. */
  339. struct stack_canary {
  340. char __pad[20]; /* canary at %gs:20 */
  341. unsigned long canary;
  342. };
  343. DECLARE_PER_CPU_ALIGNED(struct stack_canary, stack_canary);
  344. #endif
  345. /*
  346. * per-CPU IRQ handling stacks
  347. */
  348. struct irq_stack {
  349. u32 stack[THREAD_SIZE/sizeof(u32)];
  350. } __aligned(THREAD_SIZE);
  351. DECLARE_PER_CPU(struct irq_stack *, hardirq_stack);
  352. DECLARE_PER_CPU(struct irq_stack *, softirq_stack);
  353. #endif /* X86_64 */
  354. extern unsigned int fpu_kernel_xstate_size;
  355. extern unsigned int fpu_user_xstate_size;
  356. struct perf_event;
  357. typedef struct {
  358. unsigned long seg;
  359. } mm_segment_t;
  360. struct thread_struct {
  361. /* Cached TLS descriptors: */
  362. struct desc_struct tls_array[GDT_ENTRY_TLS_ENTRIES];
  363. unsigned long sp0;
  364. unsigned long sp;
  365. #ifdef CONFIG_X86_32
  366. unsigned long sysenter_cs;
  367. #else
  368. unsigned short es;
  369. unsigned short ds;
  370. unsigned short fsindex;
  371. unsigned short gsindex;
  372. #endif
  373. u32 status; /* thread synchronous flags */
  374. #ifdef CONFIG_X86_64
  375. unsigned long fsbase;
  376. unsigned long gsbase;
  377. #else
  378. /*
  379. * XXX: this could presumably be unsigned short. Alternatively,
  380. * 32-bit kernels could be taught to use fsindex instead.
  381. */
  382. unsigned long fs;
  383. unsigned long gs;
  384. #endif
  385. /* Save middle states of ptrace breakpoints */
  386. struct perf_event *ptrace_bps[HBP_NUM];
  387. /* Debug status used for traps, single steps, etc... */
  388. unsigned long debugreg6;
  389. /* Keep track of the exact dr7 value set by the user */
  390. unsigned long ptrace_dr7;
  391. /* Fault info: */
  392. unsigned long cr2;
  393. unsigned long trap_nr;
  394. unsigned long error_code;
  395. #ifdef CONFIG_VM86
  396. /* Virtual 86 mode info */
  397. struct vm86 *vm86;
  398. #endif
  399. /* IO permissions: */
  400. unsigned long *io_bitmap_ptr;
  401. unsigned long iopl;
  402. /* Max allowed port in the bitmap, in bytes: */
  403. unsigned io_bitmap_max;
  404. mm_segment_t addr_limit;
  405. unsigned int sig_on_uaccess_err:1;
  406. unsigned int uaccess_err:1; /* uaccess failed */
  407. /* Floating point and extended processor state */
  408. struct fpu fpu;
  409. /*
  410. * WARNING: 'fpu' is dynamically-sized. It *MUST* be at
  411. * the end.
  412. */
  413. };
  414. /*
  415. * Thread-synchronous status.
  416. *
  417. * This is different from the flags in that nobody else
  418. * ever touches our thread-synchronous status, so we don't
  419. * have to worry about atomic accesses.
  420. */
  421. #define TS_COMPAT 0x0002 /* 32bit syscall active (64BIT)*/
  422. /*
  423. * Set IOPL bits in EFLAGS from given mask
  424. */
  425. static inline void native_set_iopl_mask(unsigned mask)
  426. {
  427. #ifdef CONFIG_X86_32
  428. unsigned int reg;
  429. asm volatile ("pushfl;"
  430. "popl %0;"
  431. "andl %1, %0;"
  432. "orl %2, %0;"
  433. "pushl %0;"
  434. "popfl"
  435. : "=&r" (reg)
  436. : "i" (~X86_EFLAGS_IOPL), "r" (mask));
  437. #endif
  438. }
  439. static inline void
  440. native_load_sp0(struct tss_struct *tss, struct thread_struct *thread)
  441. {
  442. tss->x86_tss.sp0 = thread->sp0;
  443. #ifdef CONFIG_X86_32
  444. /* Only happens when SEP is enabled, no need to test "SEP"arately: */
  445. if (unlikely(tss->x86_tss.ss1 != thread->sysenter_cs)) {
  446. tss->x86_tss.ss1 = thread->sysenter_cs;
  447. wrmsr(MSR_IA32_SYSENTER_CS, thread->sysenter_cs, 0);
  448. }
  449. #endif
  450. }
  451. static inline void native_swapgs(void)
  452. {
  453. #ifdef CONFIG_X86_64
  454. asm volatile("swapgs" ::: "memory");
  455. #endif
  456. }
  457. static inline unsigned long current_top_of_stack(void)
  458. {
  459. #ifdef CONFIG_X86_64
  460. return this_cpu_read_stable(cpu_tss.x86_tss.sp0);
  461. #else
  462. /* sp0 on x86_32 is special in and around vm86 mode. */
  463. return this_cpu_read_stable(cpu_current_top_of_stack);
  464. #endif
  465. }
  466. #ifdef CONFIG_PARAVIRT
  467. #include <asm/paravirt.h>
  468. #else
  469. #define __cpuid native_cpuid
  470. static inline void load_sp0(struct tss_struct *tss,
  471. struct thread_struct *thread)
  472. {
  473. native_load_sp0(tss, thread);
  474. }
  475. #define set_iopl_mask native_set_iopl_mask
  476. #endif /* CONFIG_PARAVIRT */
  477. /* Free all resources held by a thread. */
  478. extern void release_thread(struct task_struct *);
  479. unsigned long get_wchan(struct task_struct *p);
  480. /*
  481. * Generic CPUID function
  482. * clear %ecx since some cpus (Cyrix MII) do not set or clear %ecx
  483. * resulting in stale register contents being returned.
  484. */
  485. static inline void cpuid(unsigned int op,
  486. unsigned int *eax, unsigned int *ebx,
  487. unsigned int *ecx, unsigned int *edx)
  488. {
  489. *eax = op;
  490. *ecx = 0;
  491. __cpuid(eax, ebx, ecx, edx);
  492. }
  493. /* Some CPUID calls want 'count' to be placed in ecx */
  494. static inline void cpuid_count(unsigned int op, int count,
  495. unsigned int *eax, unsigned int *ebx,
  496. unsigned int *ecx, unsigned int *edx)
  497. {
  498. *eax = op;
  499. *ecx = count;
  500. __cpuid(eax, ebx, ecx, edx);
  501. }
  502. /*
  503. * CPUID functions returning a single datum
  504. */
  505. static inline unsigned int cpuid_eax(unsigned int op)
  506. {
  507. unsigned int eax, ebx, ecx, edx;
  508. cpuid(op, &eax, &ebx, &ecx, &edx);
  509. return eax;
  510. }
  511. static inline unsigned int cpuid_ebx(unsigned int op)
  512. {
  513. unsigned int eax, ebx, ecx, edx;
  514. cpuid(op, &eax, &ebx, &ecx, &edx);
  515. return ebx;
  516. }
  517. static inline unsigned int cpuid_ecx(unsigned int op)
  518. {
  519. unsigned int eax, ebx, ecx, edx;
  520. cpuid(op, &eax, &ebx, &ecx, &edx);
  521. return ecx;
  522. }
  523. static inline unsigned int cpuid_edx(unsigned int op)
  524. {
  525. unsigned int eax, ebx, ecx, edx;
  526. cpuid(op, &eax, &ebx, &ecx, &edx);
  527. return edx;
  528. }
  529. /* REP NOP (PAUSE) is a good thing to insert into busy-wait loops. */
  530. static __always_inline void rep_nop(void)
  531. {
  532. asm volatile("rep; nop" ::: "memory");
  533. }
  534. static __always_inline void cpu_relax(void)
  535. {
  536. rep_nop();
  537. }
  538. /*
  539. * This function forces the icache and prefetched instruction stream to
  540. * catch up with reality in two very specific cases:
  541. *
  542. * a) Text was modified using one virtual address and is about to be executed
  543. * from the same physical page at a different virtual address.
  544. *
  545. * b) Text was modified on a different CPU, may subsequently be
  546. * executed on this CPU, and you want to make sure the new version
  547. * gets executed. This generally means you're calling this in a IPI.
  548. *
  549. * If you're calling this for a different reason, you're probably doing
  550. * it wrong.
  551. */
  552. static inline void sync_core(void)
  553. {
  554. /*
  555. * There are quite a few ways to do this. IRET-to-self is nice
  556. * because it works on every CPU, at any CPL (so it's compatible
  557. * with paravirtualization), and it never exits to a hypervisor.
  558. * The only down sides are that it's a bit slow (it seems to be
  559. * a bit more than 2x slower than the fastest options) and that
  560. * it unmasks NMIs. The "push %cs" is needed because, in
  561. * paravirtual environments, __KERNEL_CS may not be a valid CS
  562. * value when we do IRET directly.
  563. *
  564. * In case NMI unmasking or performance ever becomes a problem,
  565. * the next best option appears to be MOV-to-CR2 and an
  566. * unconditional jump. That sequence also works on all CPUs,
  567. * but it will fault at CPL3 (i.e. Xen PV and lguest).
  568. *
  569. * CPUID is the conventional way, but it's nasty: it doesn't
  570. * exist on some 486-like CPUs, and it usually exits to a
  571. * hypervisor.
  572. *
  573. * Like all of Linux's memory ordering operations, this is a
  574. * compiler barrier as well.
  575. */
  576. register void *__sp asm(_ASM_SP);
  577. #ifdef CONFIG_X86_32
  578. asm volatile (
  579. "pushfl\n\t"
  580. "pushl %%cs\n\t"
  581. "pushl $1f\n\t"
  582. "iret\n\t"
  583. "1:"
  584. : "+r" (__sp) : : "memory");
  585. #else
  586. unsigned int tmp;
  587. asm volatile (
  588. "mov %%ss, %0\n\t"
  589. "pushq %q0\n\t"
  590. "pushq %%rsp\n\t"
  591. "addq $8, (%%rsp)\n\t"
  592. "pushfq\n\t"
  593. "mov %%cs, %0\n\t"
  594. "pushq %q0\n\t"
  595. "pushq $1f\n\t"
  596. "iretq\n\t"
  597. "1:"
  598. : "=&r" (tmp), "+r" (__sp) : : "cc", "memory");
  599. #endif
  600. }
  601. extern void select_idle_routine(const struct cpuinfo_x86 *c);
  602. extern void amd_e400_c1e_apic_setup(void);
  603. extern unsigned long boot_option_idle_override;
  604. enum idle_boot_override {IDLE_NO_OVERRIDE=0, IDLE_HALT, IDLE_NOMWAIT,
  605. IDLE_POLL};
  606. extern void enable_sep_cpu(void);
  607. extern int sysenter_setup(void);
  608. extern void early_trap_init(void);
  609. void early_trap_pf_init(void);
  610. /* Defined in head.S */
  611. extern struct desc_ptr early_gdt_descr;
  612. extern void cpu_set_gdt(int);
  613. extern void switch_to_new_gdt(int);
  614. extern void load_percpu_segment(int);
  615. extern void cpu_init(void);
  616. static inline unsigned long get_debugctlmsr(void)
  617. {
  618. unsigned long debugctlmsr = 0;
  619. #ifndef CONFIG_X86_DEBUGCTLMSR
  620. if (boot_cpu_data.x86 < 6)
  621. return 0;
  622. #endif
  623. rdmsrl(MSR_IA32_DEBUGCTLMSR, debugctlmsr);
  624. return debugctlmsr;
  625. }
  626. static inline void update_debugctlmsr(unsigned long debugctlmsr)
  627. {
  628. #ifndef CONFIG_X86_DEBUGCTLMSR
  629. if (boot_cpu_data.x86 < 6)
  630. return;
  631. #endif
  632. wrmsrl(MSR_IA32_DEBUGCTLMSR, debugctlmsr);
  633. }
  634. extern void set_task_blockstep(struct task_struct *task, bool on);
  635. /* Boot loader type from the setup header: */
  636. extern int bootloader_type;
  637. extern int bootloader_version;
  638. extern char ignore_fpu_irq;
  639. #define HAVE_ARCH_PICK_MMAP_LAYOUT 1
  640. #define ARCH_HAS_PREFETCHW
  641. #define ARCH_HAS_SPINLOCK_PREFETCH
  642. #ifdef CONFIG_X86_32
  643. # define BASE_PREFETCH ""
  644. # define ARCH_HAS_PREFETCH
  645. #else
  646. # define BASE_PREFETCH "prefetcht0 %P1"
  647. #endif
  648. /*
  649. * Prefetch instructions for Pentium III (+) and AMD Athlon (+)
  650. *
  651. * It's not worth to care about 3dnow prefetches for the K6
  652. * because they are microcoded there and very slow.
  653. */
  654. static inline void prefetch(const void *x)
  655. {
  656. alternative_input(BASE_PREFETCH, "prefetchnta %P1",
  657. X86_FEATURE_XMM,
  658. "m" (*(const char *)x));
  659. }
  660. /*
  661. * 3dnow prefetch to get an exclusive cache line.
  662. * Useful for spinlocks to avoid one state transition in the
  663. * cache coherency protocol:
  664. */
  665. static inline void prefetchw(const void *x)
  666. {
  667. alternative_input(BASE_PREFETCH, "prefetchw %P1",
  668. X86_FEATURE_3DNOWPREFETCH,
  669. "m" (*(const char *)x));
  670. }
  671. static inline void spin_lock_prefetch(const void *x)
  672. {
  673. prefetchw(x);
  674. }
  675. #define TOP_OF_INIT_STACK ((unsigned long)&init_stack + sizeof(init_stack) - \
  676. TOP_OF_KERNEL_STACK_PADDING)
  677. #ifdef CONFIG_X86_32
  678. /*
  679. * User space process size: 3GB (default).
  680. */
  681. #define TASK_SIZE PAGE_OFFSET
  682. #define TASK_SIZE_MAX TASK_SIZE
  683. #define STACK_TOP TASK_SIZE
  684. #define STACK_TOP_MAX STACK_TOP
  685. #define INIT_THREAD { \
  686. .sp0 = TOP_OF_INIT_STACK, \
  687. .sysenter_cs = __KERNEL_CS, \
  688. .io_bitmap_ptr = NULL, \
  689. .addr_limit = KERNEL_DS, \
  690. }
  691. /*
  692. * TOP_OF_KERNEL_STACK_PADDING reserves 8 bytes on top of the ring0 stack.
  693. * This is necessary to guarantee that the entire "struct pt_regs"
  694. * is accessible even if the CPU haven't stored the SS/ESP registers
  695. * on the stack (interrupt gate does not save these registers
  696. * when switching to the same priv ring).
  697. * Therefore beware: accessing the ss/esp fields of the
  698. * "struct pt_regs" is possible, but they may contain the
  699. * completely wrong values.
  700. */
  701. #define task_pt_regs(task) \
  702. ({ \
  703. unsigned long __ptr = (unsigned long)task_stack_page(task); \
  704. __ptr += THREAD_SIZE - TOP_OF_KERNEL_STACK_PADDING; \
  705. ((struct pt_regs *)__ptr) - 1; \
  706. })
  707. #define KSTK_ESP(task) (task_pt_regs(task)->sp)
  708. #else
  709. /*
  710. * User space process size. 47bits minus one guard page. The guard
  711. * page is necessary on Intel CPUs: if a SYSCALL instruction is at
  712. * the highest possible canonical userspace address, then that
  713. * syscall will enter the kernel with a non-canonical return
  714. * address, and SYSRET will explode dangerously. We avoid this
  715. * particular problem by preventing anything from being mapped
  716. * at the maximum canonical address.
  717. */
  718. #define TASK_SIZE_MAX ((1UL << 47) - PAGE_SIZE)
  719. /* This decides where the kernel will search for a free chunk of vm
  720. * space during mmap's.
  721. */
  722. #define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? \
  723. 0xc0000000 : 0xFFFFe000)
  724. #define TASK_SIZE (test_thread_flag(TIF_ADDR32) ? \
  725. IA32_PAGE_OFFSET : TASK_SIZE_MAX)
  726. #define TASK_SIZE_OF(child) ((test_tsk_thread_flag(child, TIF_ADDR32)) ? \
  727. IA32_PAGE_OFFSET : TASK_SIZE_MAX)
  728. #define STACK_TOP TASK_SIZE
  729. #define STACK_TOP_MAX TASK_SIZE_MAX
  730. #define INIT_THREAD { \
  731. .sp0 = TOP_OF_INIT_STACK, \
  732. .addr_limit = KERNEL_DS, \
  733. }
  734. #define task_pt_regs(tsk) ((struct pt_regs *)(tsk)->thread.sp0 - 1)
  735. extern unsigned long KSTK_ESP(struct task_struct *task);
  736. #endif /* CONFIG_X86_64 */
  737. extern unsigned long thread_saved_pc(struct task_struct *tsk);
  738. extern void start_thread(struct pt_regs *regs, unsigned long new_ip,
  739. unsigned long new_sp);
  740. /*
  741. * This decides where the kernel will search for a free chunk of vm
  742. * space during mmap's.
  743. */
  744. #define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))
  745. #define KSTK_EIP(task) (task_pt_regs(task)->ip)
  746. /* Get/set a process' ability to use the timestamp counter instruction */
  747. #define GET_TSC_CTL(adr) get_tsc_mode((adr))
  748. #define SET_TSC_CTL(val) set_tsc_mode((val))
  749. extern int get_tsc_mode(unsigned long adr);
  750. extern int set_tsc_mode(unsigned int val);
  751. /* Register/unregister a process' MPX related resource */
  752. #define MPX_ENABLE_MANAGEMENT() mpx_enable_management()
  753. #define MPX_DISABLE_MANAGEMENT() mpx_disable_management()
  754. #ifdef CONFIG_X86_INTEL_MPX
  755. extern int mpx_enable_management(void);
  756. extern int mpx_disable_management(void);
  757. #else
  758. static inline int mpx_enable_management(void)
  759. {
  760. return -EINVAL;
  761. }
  762. static inline int mpx_disable_management(void)
  763. {
  764. return -EINVAL;
  765. }
  766. #endif /* CONFIG_X86_INTEL_MPX */
  767. extern u16 amd_get_nb_id(int cpu);
  768. extern u32 amd_get_nodes_per_socket(void);
  769. static inline uint32_t hypervisor_cpuid_base(const char *sig, uint32_t leaves)
  770. {
  771. uint32_t base, eax, signature[3];
  772. for (base = 0x40000000; base < 0x40010000; base += 0x100) {
  773. cpuid(base, &eax, &signature[0], &signature[1], &signature[2]);
  774. if (!memcmp(sig, signature, 12) &&
  775. (leaves == 0 || ((eax - base) >= leaves)))
  776. return base;
  777. }
  778. return 0;
  779. }
  780. extern unsigned long arch_align_stack(unsigned long sp);
  781. extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
  782. void default_idle(void);
  783. #ifdef CONFIG_XEN
  784. bool xen_set_default_idle(void);
  785. #else
  786. #define xen_set_default_idle 0
  787. #endif
  788. void stop_this_cpu(void *dummy);
  789. void df_debug(struct pt_regs *regs, long error_code);
  790. #endif /* _ASM_X86_PROCESSOR_H */