|
@@ -1055,8 +1055,8 @@ void __init mem_init(void)
|
|
|
after_bootmem = 1;
|
|
|
|
|
|
/* Register memory areas for /proc/kcore */
|
|
|
- kclist_add(&kcore_vsyscall, (void *)VSYSCALL_START,
|
|
|
- VSYSCALL_END - VSYSCALL_START, KCORE_OTHER);
|
|
|
+ kclist_add(&kcore_vsyscall, (void *)VSYSCALL_ADDR,
|
|
|
+ PAGE_SIZE, KCORE_OTHER);
|
|
|
|
|
|
mem_init_print_info(NULL);
|
|
|
}
|
|
@@ -1186,8 +1186,8 @@ int kern_addr_valid(unsigned long addr)
|
|
|
* not need special handling anymore:
|
|
|
*/
|
|
|
static struct vm_area_struct gate_vma = {
|
|
|
- .vm_start = VSYSCALL_START,
|
|
|
- .vm_end = VSYSCALL_START + (VSYSCALL_MAPPED_PAGES * PAGE_SIZE),
|
|
|
+ .vm_start = VSYSCALL_ADDR,
|
|
|
+ .vm_end = VSYSCALL_ADDR + PAGE_SIZE,
|
|
|
.vm_page_prot = PAGE_READONLY_EXEC,
|
|
|
.vm_flags = VM_READ | VM_EXEC
|
|
|
};
|
|
@@ -1218,7 +1218,7 @@ int in_gate_area(struct mm_struct *mm, unsigned long addr)
|
|
|
*/
|
|
|
int in_gate_area_no_mm(unsigned long addr)
|
|
|
{
|
|
|
- return (addr >= VSYSCALL_START) && (addr < VSYSCALL_END);
|
|
|
+ return (addr & PAGE_MASK) == VSYSCALL_ADDR;
|
|
|
}
|
|
|
|
|
|
const char *arch_vma_name(struct vm_area_struct *vma)
|