|
@@ -283,6 +283,55 @@ sigsegv:
|
|
|
return true;
|
|
|
}
|
|
|
|
|
|
+/*
|
|
|
+ * A pseudo VMA to allow ptrace access for the vsyscall page. This only
|
|
|
+ * covers the 64bit vsyscall page now. 32bit has a real VMA now and does
|
|
|
+ * not need special handling anymore:
|
|
|
+ */
|
|
|
+static const char *gate_vma_name(struct vm_area_struct *vma)
|
|
|
+{
|
|
|
+ return "[vsyscall]";
|
|
|
+}
|
|
|
+static struct vm_operations_struct gate_vma_ops = {
|
|
|
+ .name = gate_vma_name,
|
|
|
+};
|
|
|
+static struct vm_area_struct gate_vma = {
|
|
|
+ .vm_start = VSYSCALL_ADDR,
|
|
|
+ .vm_end = VSYSCALL_ADDR + PAGE_SIZE,
|
|
|
+ .vm_page_prot = PAGE_READONLY_EXEC,
|
|
|
+ .vm_flags = VM_READ | VM_EXEC,
|
|
|
+ .vm_ops = &gate_vma_ops,
|
|
|
+};
|
|
|
+
|
|
|
+struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
|
|
|
+{
|
|
|
+#ifdef CONFIG_IA32_EMULATION
|
|
|
+ if (!mm || mm->context.ia32_compat)
|
|
|
+ return NULL;
|
|
|
+#endif
|
|
|
+ return &gate_vma;
|
|
|
+}
|
|
|
+
|
|
|
+int in_gate_area(struct mm_struct *mm, unsigned long addr)
|
|
|
+{
|
|
|
+ struct vm_area_struct *vma = get_gate_vma(mm);
|
|
|
+
|
|
|
+ if (!vma)
|
|
|
+ return 0;
|
|
|
+
|
|
|
+ return (addr >= vma->vm_start) && (addr < vma->vm_end);
|
|
|
+}
|
|
|
+
|
|
|
+/*
|
|
|
+ * Use this when you have no reliable mm, typically from interrupt
|
|
|
+ * context. It is less reliable than using a task's mm and may give
|
|
|
+ * false positives.
|
|
|
+ */
|
|
|
+int in_gate_area_no_mm(unsigned long addr)
|
|
|
+{
|
|
|
+ return (addr & PAGE_MASK) == VSYSCALL_ADDR;
|
|
|
+}
|
|
|
+
|
|
|
/*
|
|
|
* Assume __initcall executes before all user space. Hopefully kmod
|
|
|
* doesn't violate that. We'll find out if it does.
|