head_32.S 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764
  1. /*
  2. *
  3. * Copyright (C) 1991, 1992 Linus Torvalds
  4. *
  5. * Enhanced CPU detection and feature setting code by Mike Jagdis
  6. * and Martin Mares, November 1997.
  7. */
  8. .text
  9. #include <linux/threads.h>
  10. #include <linux/init.h>
  11. #include <linux/linkage.h>
  12. #include <asm/segment.h>
  13. #include <asm/page_types.h>
  14. #include <asm/pgtable_types.h>
  15. #include <asm/cache.h>
  16. #include <asm/thread_info.h>
  17. #include <asm/asm-offsets.h>
  18. #include <asm/setup.h>
  19. #include <asm/processor-flags.h>
  20. #include <asm/msr-index.h>
  21. #include <asm/cpufeature.h>
  22. #include <asm/percpu.h>
  23. #include <asm/nops.h>
  24. #include <asm/bootparam.h>
  25. /* Physical address */
  26. #define pa(X) ((X) - __PAGE_OFFSET)
  27. /*
  28. * References to members of the new_cpu_data structure.
  29. */
  30. #define X86 new_cpu_data+CPUINFO_x86
  31. #define X86_VENDOR new_cpu_data+CPUINFO_x86_vendor
  32. #define X86_MODEL new_cpu_data+CPUINFO_x86_model
  33. #define X86_MASK new_cpu_data+CPUINFO_x86_mask
  34. #define X86_HARD_MATH new_cpu_data+CPUINFO_hard_math
  35. #define X86_CPUID new_cpu_data+CPUINFO_cpuid_level
  36. #define X86_CAPABILITY new_cpu_data+CPUINFO_x86_capability
  37. #define X86_VENDOR_ID new_cpu_data+CPUINFO_x86_vendor_id
  38. /*
  39. * This is how much memory in addition to the memory covered up to
  40. * and including _end we need mapped initially.
  41. * We need:
  42. * (KERNEL_IMAGE_SIZE/4096) / 1024 pages (worst case, non PAE)
  43. * (KERNEL_IMAGE_SIZE/4096) / 512 + 4 pages (worst case for PAE)
  44. *
  45. * Modulo rounding, each megabyte assigned here requires a kilobyte of
  46. * memory, which is currently unreclaimed.
  47. *
  48. * This should be a multiple of a page.
  49. *
  50. * KERNEL_IMAGE_SIZE should be greater than pa(_end)
  51. * and small than max_low_pfn, otherwise will waste some page table entries
  52. */
  53. #if PTRS_PER_PMD > 1
  54. #define PAGE_TABLE_SIZE(pages) (((pages) / PTRS_PER_PMD) + PTRS_PER_PGD)
  55. #else
  56. #define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PGD)
  57. #endif
  58. /* Number of possible pages in the lowmem region */
  59. LOWMEM_PAGES = (((1<<32) - __PAGE_OFFSET) >> PAGE_SHIFT)
  60. /* Enough space to fit pagetables for the low memory linear map */
  61. MAPPING_BEYOND_END = PAGE_TABLE_SIZE(LOWMEM_PAGES) << PAGE_SHIFT
  62. /*
  63. * Worst-case size of the kernel mapping we need to make:
  64. * a relocatable kernel can live anywhere in lowmem, so we need to be able
  65. * to map all of lowmem.
  66. */
  67. KERNEL_PAGES = LOWMEM_PAGES
  68. INIT_MAP_SIZE = PAGE_TABLE_SIZE(KERNEL_PAGES) * PAGE_SIZE
  69. RESERVE_BRK(pagetables, INIT_MAP_SIZE)
  70. /*
  71. * 32-bit kernel entrypoint; only used by the boot CPU. On entry,
  72. * %esi points to the real-mode code as a 32-bit pointer.
  73. * CS and DS must be 4 GB flat segments, but we don't depend on
  74. * any particular GDT layout, because we load our own as soon as we
  75. * can.
  76. */
  77. __HEAD
  78. ENTRY(startup_32)
  79. movl pa(stack_start),%ecx
  80. /* test KEEP_SEGMENTS flag to see if the bootloader is asking
  81. us to not reload segments */
  82. testb $KEEP_SEGMENTS, BP_loadflags(%esi)
  83. jnz 2f
  84. /*
  85. * Set segments to known values.
  86. */
  87. lgdt pa(boot_gdt_descr)
  88. movl $(__BOOT_DS),%eax
  89. movl %eax,%ds
  90. movl %eax,%es
  91. movl %eax,%fs
  92. movl %eax,%gs
  93. movl %eax,%ss
  94. 2:
  95. leal -__PAGE_OFFSET(%ecx),%esp
  96. /*
  97. * Clear BSS first so that there are no surprises...
  98. */
  99. cld
  100. xorl %eax,%eax
  101. movl $pa(__bss_start),%edi
  102. movl $pa(__bss_stop),%ecx
  103. subl %edi,%ecx
  104. shrl $2,%ecx
  105. rep ; stosl
  106. /*
  107. * Copy bootup parameters out of the way.
  108. * Note: %esi still has the pointer to the real-mode data.
  109. * With the kexec as boot loader, parameter segment might be loaded beyond
  110. * kernel image and might not even be addressable by early boot page tables.
  111. * (kexec on panic case). Hence copy out the parameters before initializing
  112. * page tables.
  113. */
  114. movl $pa(boot_params),%edi
  115. movl $(PARAM_SIZE/4),%ecx
  116. cld
  117. rep
  118. movsl
  119. movl pa(boot_params) + NEW_CL_POINTER,%esi
  120. andl %esi,%esi
  121. jz 1f # No command line
  122. movl $pa(boot_command_line),%edi
  123. movl $(COMMAND_LINE_SIZE/4),%ecx
  124. rep
  125. movsl
  126. 1:
  127. #ifdef CONFIG_OLPC
  128. /* save OFW's pgdir table for later use when calling into OFW */
  129. movl %cr3, %eax
  130. movl %eax, pa(olpc_ofw_pgd)
  131. #endif
  132. #ifdef CONFIG_MICROCODE_EARLY
  133. /* Early load ucode on BSP. */
  134. call load_ucode_bsp
  135. #endif
  136. /*
  137. * Initialize page tables. This creates a PDE and a set of page
  138. * tables, which are located immediately beyond __brk_base. The variable
  139. * _brk_end is set up to point to the first "safe" location.
  140. * Mappings are created both at virtual address 0 (identity mapping)
  141. * and PAGE_OFFSET for up to _end.
  142. */
  143. #ifdef CONFIG_X86_PAE
  144. /*
  145. * In PAE mode initial_page_table is statically defined to contain
  146. * enough entries to cover the VMSPLIT option (that is the top 1, 2 or 3
  147. * entries). The identity mapping is handled by pointing two PGD entries
  148. * to the first kernel PMD.
  149. *
  150. * Note the upper half of each PMD or PTE are always zero at this stage.
  151. */
  152. #define KPMDS (((-__PAGE_OFFSET) >> 30) & 3) /* Number of kernel PMDs */
  153. xorl %ebx,%ebx /* %ebx is kept at zero */
  154. movl $pa(__brk_base), %edi
  155. movl $pa(initial_pg_pmd), %edx
  156. movl $PTE_IDENT_ATTR, %eax
  157. 10:
  158. leal PDE_IDENT_ATTR(%edi),%ecx /* Create PMD entry */
  159. movl %ecx,(%edx) /* Store PMD entry */
  160. /* Upper half already zero */
  161. addl $8,%edx
  162. movl $512,%ecx
  163. 11:
  164. stosl
  165. xchgl %eax,%ebx
  166. stosl
  167. xchgl %eax,%ebx
  168. addl $0x1000,%eax
  169. loop 11b
  170. /*
  171. * End condition: we must map up to the end + MAPPING_BEYOND_END.
  172. */
  173. movl $pa(_end) + MAPPING_BEYOND_END + PTE_IDENT_ATTR, %ebp
  174. cmpl %ebp,%eax
  175. jb 10b
  176. 1:
  177. addl $__PAGE_OFFSET, %edi
  178. movl %edi, pa(_brk_end)
  179. shrl $12, %eax
  180. movl %eax, pa(max_pfn_mapped)
  181. /* Do early initialization of the fixmap area */
  182. movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
  183. movl %eax,pa(initial_pg_pmd+0x1000*KPMDS-8)
  184. #else /* Not PAE */
  185. page_pde_offset = (__PAGE_OFFSET >> 20);
  186. movl $pa(__brk_base), %edi
  187. movl $pa(initial_page_table), %edx
  188. movl $PTE_IDENT_ATTR, %eax
  189. 10:
  190. leal PDE_IDENT_ATTR(%edi),%ecx /* Create PDE entry */
  191. movl %ecx,(%edx) /* Store identity PDE entry */
  192. movl %ecx,page_pde_offset(%edx) /* Store kernel PDE entry */
  193. addl $4,%edx
  194. movl $1024, %ecx
  195. 11:
  196. stosl
  197. addl $0x1000,%eax
  198. loop 11b
  199. /*
  200. * End condition: we must map up to the end + MAPPING_BEYOND_END.
  201. */
  202. movl $pa(_end) + MAPPING_BEYOND_END + PTE_IDENT_ATTR, %ebp
  203. cmpl %ebp,%eax
  204. jb 10b
  205. addl $__PAGE_OFFSET, %edi
  206. movl %edi, pa(_brk_end)
  207. shrl $12, %eax
  208. movl %eax, pa(max_pfn_mapped)
  209. /* Do early initialization of the fixmap area */
  210. movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
  211. movl %eax,pa(initial_page_table+0xffc)
  212. #endif
  213. #ifdef CONFIG_PARAVIRT
  214. /* This is can only trip for a broken bootloader... */
  215. cmpw $0x207, pa(boot_params + BP_version)
  216. jb default_entry
  217. /* Paravirt-compatible boot parameters. Look to see what architecture
  218. we're booting under. */
  219. movl pa(boot_params + BP_hardware_subarch), %eax
  220. cmpl $num_subarch_entries, %eax
  221. jae bad_subarch
  222. movl pa(subarch_entries)(,%eax,4), %eax
  223. subl $__PAGE_OFFSET, %eax
  224. jmp *%eax
  225. bad_subarch:
  226. WEAK(lguest_entry)
  227. WEAK(xen_entry)
  228. /* Unknown implementation; there's really
  229. nothing we can do at this point. */
  230. ud2a
  231. __INITDATA
  232. subarch_entries:
  233. .long default_entry /* normal x86/PC */
  234. .long lguest_entry /* lguest hypervisor */
  235. .long xen_entry /* Xen hypervisor */
  236. .long default_entry /* Moorestown MID */
  237. num_subarch_entries = (. - subarch_entries) / 4
  238. .previous
  239. #else
  240. jmp default_entry
  241. #endif /* CONFIG_PARAVIRT */
  242. #ifdef CONFIG_HOTPLUG_CPU
  243. /*
  244. * Boot CPU0 entry point. It's called from play_dead(). Everything has been set
  245. * up already except stack. We just set up stack here. Then call
  246. * start_secondary().
  247. */
  248. ENTRY(start_cpu0)
  249. movl stack_start, %ecx
  250. movl %ecx, %esp
  251. jmp *(initial_code)
  252. ENDPROC(start_cpu0)
  253. #endif
  254. /*
  255. * Non-boot CPU entry point; entered from trampoline.S
  256. * We can't lgdt here, because lgdt itself uses a data segment, but
  257. * we know the trampoline has already loaded the boot_gdt for us.
  258. *
  259. * If cpu hotplug is not supported then this code can go in init section
  260. * which will be freed later
  261. */
  262. ENTRY(startup_32_smp)
  263. cld
  264. movl $(__BOOT_DS),%eax
  265. movl %eax,%ds
  266. movl %eax,%es
  267. movl %eax,%fs
  268. movl %eax,%gs
  269. movl pa(stack_start),%ecx
  270. movl %eax,%ss
  271. leal -__PAGE_OFFSET(%ecx),%esp
  272. #ifdef CONFIG_MICROCODE_EARLY
  273. /* Early load ucode on AP. */
  274. call load_ucode_ap
  275. #endif
  276. default_entry:
  277. #define CR0_STATE (X86_CR0_PE | X86_CR0_MP | X86_CR0_ET | \
  278. X86_CR0_NE | X86_CR0_WP | X86_CR0_AM | \
  279. X86_CR0_PG)
  280. movl $(CR0_STATE & ~X86_CR0_PG),%eax
  281. movl %eax,%cr0
  282. /*
  283. * We want to start out with EFLAGS unambiguously cleared. Some BIOSes leave
  284. * bits like NT set. This would confuse the debugger if this code is traced. So
  285. * initialize them properly now before switching to protected mode. That means
  286. * DF in particular (even though we have cleared it earlier after copying the
  287. * command line) because GCC expects it.
  288. */
  289. pushl $0
  290. popfl
  291. /*
  292. * New page tables may be in 4Mbyte page mode and may be using the global pages.
  293. *
  294. * NOTE! If we are on a 486 we may have no cr4 at all! Specifically, cr4 exists
  295. * if and only if CPUID exists and has flags other than the FPU flag set.
  296. */
  297. movl $-1,pa(X86_CPUID) # preset CPUID level
  298. movl $X86_EFLAGS_ID,%ecx
  299. pushl %ecx
  300. popfl # set EFLAGS=ID
  301. pushfl
  302. popl %eax # get EFLAGS
  303. testl $X86_EFLAGS_ID,%eax # did EFLAGS.ID remained set?
  304. jz enable_paging # hw disallowed setting of ID bit
  305. # which means no CPUID and no CR4
  306. xorl %eax,%eax
  307. cpuid
  308. movl %eax,pa(X86_CPUID) # save largest std CPUID function
  309. movl $1,%eax
  310. cpuid
  311. andl $~1,%edx # Ignore CPUID.FPU
  312. jz enable_paging # No flags or only CPUID.FPU = no CR4
  313. movl pa(mmu_cr4_features),%eax
  314. movl %eax,%cr4
  315. testb $X86_CR4_PAE, %al # check if PAE is enabled
  316. jz enable_paging
  317. /* Check if extended functions are implemented */
  318. movl $0x80000000, %eax
  319. cpuid
  320. /* Value must be in the range 0x80000001 to 0x8000ffff */
  321. subl $0x80000001, %eax
  322. cmpl $(0x8000ffff-0x80000001), %eax
  323. ja enable_paging
  324. /* Clear bogus XD_DISABLE bits */
  325. call verify_cpu
  326. mov $0x80000001, %eax
  327. cpuid
  328. /* Execute Disable bit supported? */
  329. btl $(X86_FEATURE_NX & 31), %edx
  330. jnc enable_paging
  331. /* Setup EFER (Extended Feature Enable Register) */
  332. movl $MSR_EFER, %ecx
  333. rdmsr
  334. btsl $_EFER_NX, %eax
  335. /* Make changes effective */
  336. wrmsr
  337. enable_paging:
  338. /*
  339. * Enable paging
  340. */
  341. movl $pa(initial_page_table), %eax
  342. movl %eax,%cr3 /* set the page table pointer.. */
  343. movl $CR0_STATE,%eax
  344. movl %eax,%cr0 /* ..and set paging (PG) bit */
  345. ljmp $__BOOT_CS,$1f /* Clear prefetch and normalize %eip */
  346. 1:
  347. /* Shift the stack pointer to a virtual address */
  348. addl $__PAGE_OFFSET, %esp
  349. /*
  350. * start system 32-bit setup. We need to re-do some of the things done
  351. * in 16-bit mode for the "real" operations.
  352. */
  353. movl setup_once_ref,%eax
  354. andl %eax,%eax
  355. jz 1f # Did we do this already?
  356. call *%eax
  357. 1:
  358. /*
  359. * Check if it is 486
  360. */
  361. movb $4,X86 # at least 486
  362. cmpl $-1,X86_CPUID
  363. je is486
  364. /* get vendor info */
  365. xorl %eax,%eax # call CPUID with 0 -> return vendor ID
  366. cpuid
  367. movl %eax,X86_CPUID # save CPUID level
  368. movl %ebx,X86_VENDOR_ID # lo 4 chars
  369. movl %edx,X86_VENDOR_ID+4 # next 4 chars
  370. movl %ecx,X86_VENDOR_ID+8 # last 4 chars
  371. orl %eax,%eax # do we have processor info as well?
  372. je is486
  373. movl $1,%eax # Use the CPUID instruction to get CPU type
  374. cpuid
  375. movb %al,%cl # save reg for future use
  376. andb $0x0f,%ah # mask processor family
  377. movb %ah,X86
  378. andb $0xf0,%al # mask model
  379. shrb $4,%al
  380. movb %al,X86_MODEL
  381. andb $0x0f,%cl # mask mask revision
  382. movb %cl,X86_MASK
  383. movl %edx,X86_CAPABILITY
  384. is486:
  385. movl $0x50022,%ecx # set AM, WP, NE and MP
  386. movl %cr0,%eax
  387. andl $0x80000011,%eax # Save PG,PE,ET
  388. orl %ecx,%eax
  389. movl %eax,%cr0
  390. lgdt early_gdt_descr
  391. lidt idt_descr
  392. ljmp $(__KERNEL_CS),$1f
  393. 1: movl $(__KERNEL_DS),%eax # reload all the segment registers
  394. movl %eax,%ss # after changing gdt.
  395. movl $(__USER_DS),%eax # DS/ES contains default USER segment
  396. movl %eax,%ds
  397. movl %eax,%es
  398. movl $(__KERNEL_PERCPU), %eax
  399. movl %eax,%fs # set this cpu's percpu
  400. movl $(__KERNEL_STACK_CANARY),%eax
  401. movl %eax,%gs
  402. xorl %eax,%eax # Clear LDT
  403. lldt %ax
  404. pushl $0 # fake return address for unwinder
  405. jmp *(initial_code)
  406. #include "verify_cpu.S"
  407. /*
  408. * setup_once
  409. *
  410. * The setup work we only want to run on the BSP.
  411. *
  412. * Warning: %esi is live across this function.
  413. */
  414. __INIT
  415. setup_once:
  416. /*
  417. * Set up a idt with 256 interrupt gates that push zero if there
  418. * is no error code and then jump to early_idt_handler_common.
  419. * It doesn't actually load the idt - that needs to be done on
  420. * each CPU. Interrupts are enabled elsewhere, when we can be
  421. * relatively sure everything is ok.
  422. */
  423. movl $idt_table,%edi
  424. movl $early_idt_handler_array,%eax
  425. movl $NUM_EXCEPTION_VECTORS,%ecx
  426. 1:
  427. movl %eax,(%edi)
  428. movl %eax,4(%edi)
  429. /* interrupt gate, dpl=0, present */
  430. movl $(0x8E000000 + __KERNEL_CS),2(%edi)
  431. addl $EARLY_IDT_HANDLER_SIZE,%eax
  432. addl $8,%edi
  433. loop 1b
  434. movl $256 - NUM_EXCEPTION_VECTORS,%ecx
  435. movl $ignore_int,%edx
  436. movl $(__KERNEL_CS << 16),%eax
  437. movw %dx,%ax /* selector = 0x0010 = cs */
  438. movw $0x8E00,%dx /* interrupt gate - dpl=0, present */
  439. 2:
  440. movl %eax,(%edi)
  441. movl %edx,4(%edi)
  442. addl $8,%edi
  443. loop 2b
  444. #ifdef CONFIG_CC_STACKPROTECTOR
  445. /*
  446. * Configure the stack canary. The linker can't handle this by
  447. * relocation. Manually set base address in stack canary
  448. * segment descriptor.
  449. */
  450. movl $gdt_page,%eax
  451. movl $stack_canary,%ecx
  452. movw %cx, 8 * GDT_ENTRY_STACK_CANARY + 2(%eax)
  453. shrl $16, %ecx
  454. movb %cl, 8 * GDT_ENTRY_STACK_CANARY + 4(%eax)
  455. movb %ch, 8 * GDT_ENTRY_STACK_CANARY + 7(%eax)
  456. #endif
  457. andl $0,setup_once_ref /* Once is enough, thanks */
  458. ret
  459. ENTRY(early_idt_handler_array)
  460. # 36(%esp) %eflags
  461. # 32(%esp) %cs
  462. # 28(%esp) %eip
  463. # 24(%rsp) error code
  464. i = 0
  465. .rept NUM_EXCEPTION_VECTORS
  466. .ifeq (EXCEPTION_ERRCODE_MASK >> i) & 1
  467. pushl $0 # Dummy error code, to make stack frame uniform
  468. .endif
  469. pushl $i # 20(%esp) Vector number
  470. jmp early_idt_handler_common
  471. i = i + 1
  472. .fill early_idt_handler_array + i*EARLY_IDT_HANDLER_SIZE - ., 1, 0xcc
  473. .endr
  474. ENDPROC(early_idt_handler_array)
  475. early_idt_handler_common:
  476. /*
  477. * The stack is the hardware frame, an error code or zero, and the
  478. * vector number.
  479. */
  480. cld
  481. cmpl $2,(%esp) # X86_TRAP_NMI
  482. je is_nmi # Ignore NMI
  483. cmpl $2,%ss:early_recursion_flag
  484. je hlt_loop
  485. incl %ss:early_recursion_flag
  486. push %eax # 16(%esp)
  487. push %ecx # 12(%esp)
  488. push %edx # 8(%esp)
  489. push %ds # 4(%esp)
  490. push %es # 0(%esp)
  491. movl $(__KERNEL_DS),%eax
  492. movl %eax,%ds
  493. movl %eax,%es
  494. cmpl $(__KERNEL_CS),32(%esp)
  495. jne 10f
  496. leal 28(%esp),%eax # Pointer to %eip
  497. call early_fixup_exception
  498. andl %eax,%eax
  499. jnz ex_entry /* found an exception entry */
  500. 10:
  501. #ifdef CONFIG_PRINTK
  502. xorl %eax,%eax
  503. movw %ax,2(%esp) /* clean up the segment values on some cpus */
  504. movw %ax,6(%esp)
  505. movw %ax,34(%esp)
  506. leal 40(%esp),%eax
  507. pushl %eax /* %esp before the exception */
  508. pushl %ebx
  509. pushl %ebp
  510. pushl %esi
  511. pushl %edi
  512. movl %cr2,%eax
  513. pushl %eax
  514. pushl (20+6*4)(%esp) /* trapno */
  515. pushl $fault_msg
  516. call printk
  517. #endif
  518. call dump_stack
  519. hlt_loop:
  520. hlt
  521. jmp hlt_loop
  522. ex_entry:
  523. pop %es
  524. pop %ds
  525. pop %edx
  526. pop %ecx
  527. pop %eax
  528. decl %ss:early_recursion_flag
  529. is_nmi:
  530. addl $8,%esp /* drop vector number and error code */
  531. iret
  532. ENDPROC(early_idt_handler_common)
  533. /* This is the default interrupt "handler" :-) */
  534. ALIGN
  535. ignore_int:
  536. cld
  537. #ifdef CONFIG_PRINTK
  538. pushl %eax
  539. pushl %ecx
  540. pushl %edx
  541. pushl %es
  542. pushl %ds
  543. movl $(__KERNEL_DS),%eax
  544. movl %eax,%ds
  545. movl %eax,%es
  546. cmpl $2,early_recursion_flag
  547. je hlt_loop
  548. incl early_recursion_flag
  549. pushl 16(%esp)
  550. pushl 24(%esp)
  551. pushl 32(%esp)
  552. pushl 40(%esp)
  553. pushl $int_msg
  554. call printk
  555. call dump_stack
  556. addl $(5*4),%esp
  557. popl %ds
  558. popl %es
  559. popl %edx
  560. popl %ecx
  561. popl %eax
  562. #endif
  563. iret
  564. ENDPROC(ignore_int)
  565. __INITDATA
  566. .align 4
  567. early_recursion_flag:
  568. .long 0
  569. __REFDATA
  570. .align 4
  571. ENTRY(initial_code)
  572. .long i386_start_kernel
  573. ENTRY(setup_once_ref)
  574. .long setup_once
  575. /*
  576. * BSS section
  577. */
  578. __PAGE_ALIGNED_BSS
  579. .align PAGE_SIZE
  580. #ifdef CONFIG_X86_PAE
  581. initial_pg_pmd:
  582. .fill 1024*KPMDS,4,0
  583. #else
  584. ENTRY(initial_page_table)
  585. .fill 1024,4,0
  586. #endif
  587. initial_pg_fixmap:
  588. .fill 1024,4,0
  589. ENTRY(empty_zero_page)
  590. .fill 4096,1,0
  591. ENTRY(swapper_pg_dir)
  592. .fill 1024,4,0
  593. /*
  594. * This starts the data section.
  595. */
  596. #ifdef CONFIG_X86_PAE
  597. __PAGE_ALIGNED_DATA
  598. /* Page-aligned for the benefit of paravirt? */
  599. .align PAGE_SIZE
  600. ENTRY(initial_page_table)
  601. .long pa(initial_pg_pmd+PGD_IDENT_ATTR),0 /* low identity map */
  602. # if KPMDS == 3
  603. .long pa(initial_pg_pmd+PGD_IDENT_ATTR),0
  604. .long pa(initial_pg_pmd+PGD_IDENT_ATTR+0x1000),0
  605. .long pa(initial_pg_pmd+PGD_IDENT_ATTR+0x2000),0
  606. # elif KPMDS == 2
  607. .long 0,0
  608. .long pa(initial_pg_pmd+PGD_IDENT_ATTR),0
  609. .long pa(initial_pg_pmd+PGD_IDENT_ATTR+0x1000),0
  610. # elif KPMDS == 1
  611. .long 0,0
  612. .long 0,0
  613. .long pa(initial_pg_pmd+PGD_IDENT_ATTR),0
  614. # else
  615. # error "Kernel PMDs should be 1, 2 or 3"
  616. # endif
  617. .align PAGE_SIZE /* needs to be page-sized too */
  618. #endif
  619. .data
  620. .balign 4
  621. ENTRY(stack_start)
  622. .long init_thread_union+THREAD_SIZE
  623. __INITRODATA
  624. int_msg:
  625. .asciz "Unknown interrupt or fault at: %p %p %p\n"
  626. fault_msg:
  627. /* fault info: */
  628. .ascii "BUG: Int %d: CR2 %p\n"
  629. /* regs pushed in early_idt_handler: */
  630. .ascii " EDI %p ESI %p EBP %p EBX %p\n"
  631. .ascii " ESP %p ES %p DS %p\n"
  632. .ascii " EDX %p ECX %p EAX %p\n"
  633. /* fault frame: */
  634. .ascii " vec %p err %p EIP %p CS %p flg %p\n"
  635. .ascii "Stack: %p %p %p %p %p %p %p %p\n"
  636. .ascii " %p %p %p %p %p %p %p %p\n"
  637. .asciz " %p %p %p %p %p %p %p %p\n"
  638. #include "../../x86/xen/xen-head.S"
  639. /*
  640. * The IDT and GDT 'descriptors' are a strange 48-bit object
  641. * only used by the lidt and lgdt instructions. They are not
  642. * like usual segment descriptors - they consist of a 16-bit
  643. * segment size, and 32-bit linear address value:
  644. */
  645. .data
  646. .globl boot_gdt_descr
  647. .globl idt_descr
  648. ALIGN
  649. # early boot GDT descriptor (must use 1:1 address mapping)
  650. .word 0 # 32 bit align gdt_desc.address
  651. boot_gdt_descr:
  652. .word __BOOT_DS+7
  653. .long boot_gdt - __PAGE_OFFSET
  654. .word 0 # 32-bit align idt_desc.address
  655. idt_descr:
  656. .word IDT_ENTRIES*8-1 # idt contains 256 entries
  657. .long idt_table
  658. # boot GDT descriptor (later on used by CPU#0):
  659. .word 0 # 32 bit align gdt_desc.address
  660. ENTRY(early_gdt_descr)
  661. .word GDT_ENTRIES*8-1
  662. .long gdt_page /* Overwritten for secondary CPUs */
  663. /*
  664. * The boot_gdt must mirror the equivalent in setup.S and is
  665. * used only for booting.
  666. */
  667. .align L1_CACHE_BYTES
  668. ENTRY(boot_gdt)
  669. .fill GDT_ENTRY_BOOT_CS,8,0
  670. .quad 0x00cf9a000000ffff /* kernel 4GB code at 0x00000000 */
  671. .quad 0x00cf92000000ffff /* kernel 4GB data at 0x00000000 */