|
@@ -13,6 +13,7 @@
|
|
|
#include <linux/err.h>
|
|
|
#include <linux/init.h>
|
|
|
#include <linux/ioport.h>
|
|
|
+#include <linux/kernel.h>
|
|
|
#include <linux/mm.h>
|
|
|
#include <linux/sched.h>
|
|
|
#include <linux/slab.h>
|
|
@@ -20,6 +21,7 @@
|
|
|
|
|
|
#include <asm/abi.h>
|
|
|
#include <asm/mips-cps.h>
|
|
|
+#include <asm/page.h>
|
|
|
#include <asm/vdso.h>
|
|
|
|
|
|
/* Kernel-provided data used by the VDSO. */
|
|
@@ -128,12 +130,30 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
|
|
|
vvar_size = gic_size + PAGE_SIZE;
|
|
|
size = vvar_size + image->size;
|
|
|
|
|
|
+ /*
|
|
|
+ * Find a region that's large enough for us to perform the
|
|
|
+ * colour-matching alignment below.
|
|
|
+ */
|
|
|
+ if (cpu_has_dc_aliases)
|
|
|
+ size += shm_align_mask + 1;
|
|
|
+
|
|
|
base = get_unmapped_area(NULL, 0, size, 0, 0);
|
|
|
if (IS_ERR_VALUE(base)) {
|
|
|
ret = base;
|
|
|
goto out;
|
|
|
}
|
|
|
|
|
|
+ /*
|
|
|
+ * If we suffer from dcache aliasing, ensure that the VDSO data page
|
|
|
+ * mapping is coloured the same as the kernel's mapping of that memory.
|
|
|
+ * This ensures that when the kernel updates the VDSO data userland
|
|
|
+ * will observe it without requiring cache invalidations.
|
|
|
+ */
|
|
|
+ if (cpu_has_dc_aliases) {
|
|
|
+ base = __ALIGN_MASK(base, shm_align_mask);
|
|
|
+ base += ((unsigned long)&vdso_data - gic_size) & shm_align_mask;
|
|
|
+ }
|
|
|
+
|
|
|
data_addr = base + gic_size;
|
|
|
vdso_addr = data_addr + PAGE_SIZE;
|
|
|
|