Эх сурвалжийг харах

[PARISC] Further work for multiple page sizes

More work towards supporing multiple page sizes on 64-bit. Convert
some assumptions that 64bit uses 3 level page tables into testing
PT_NLEVELS. Also some BUG() to BUG_ON() conversions and some cleanups
to assembler.

Signed-off-by: Helge Deller <deller@parisc-linux.org>
Signed-off-by: Kyle McMartin <kyle@parisc-linux.org>
Helge Deller 19 жил өмнө
parent
commit
2fd8303816

+ 31 - 0
arch/parisc/Kconfig

@@ -138,6 +138,37 @@ config 64BIT
 	  enable this option otherwise. The 64bit kernel is significantly bigger
 	  enable this option otherwise. The 64bit kernel is significantly bigger
 	  and slower than the 32bit one.
 	  and slower than the 32bit one.
 
 
+choice
+	prompt "Kernel page size"
+	default PARISC_PAGE_SIZE_4KB  if !64BIT
+	default PARISC_PAGE_SIZE_4KB  if 64BIT
+#	default PARISC_PAGE_SIZE_16KB if 64BIT
+
+config PARISC_PAGE_SIZE_4KB
+	bool "4KB"
+	help
+	  This lets you select the page size of the kernel.  For best
+	  performance, a page size of 16KB is recommended.  For best
+	  compatibility with 32bit applications, a page size of 4KB should be
+	  selected (the vast majority of 32bit binaries work perfectly fine
+	  with a larger page size).
+
+	  4KB                For best 32bit compatibility
+	  16KB               For best performance
+	  64KB               For best performance, might give more overhead.
+
+	  If you don't know what to do, choose 4KB.
+
+config PARISC_PAGE_SIZE_16KB
+	bool "16KB (EXPERIMENTAL)"
+	depends on PA8X00 && EXPERIMENTAL
+
+config PARISC_PAGE_SIZE_64KB
+	bool "64KB (EXPERIMENTAL)"
+	depends on PA8X00 && EXPERIMENTAL
+
+endchoice
+
 config SMP
 config SMP
 	bool "Symmetric multi-processing support"
 	bool "Symmetric multi-processing support"
 	---help---
 	---help---

+ 3 - 0
arch/parisc/kernel/asm-offsets.c

@@ -288,8 +288,11 @@ int main(void)
 	DEFINE(ASM_PGD_ENTRY_SIZE, PGD_ENTRY_SIZE);
 	DEFINE(ASM_PGD_ENTRY_SIZE, PGD_ENTRY_SIZE);
 	DEFINE(ASM_PMD_ENTRY_SIZE, PMD_ENTRY_SIZE);
 	DEFINE(ASM_PMD_ENTRY_SIZE, PMD_ENTRY_SIZE);
 	DEFINE(ASM_PTE_ENTRY_SIZE, PTE_ENTRY_SIZE);
 	DEFINE(ASM_PTE_ENTRY_SIZE, PTE_ENTRY_SIZE);
+	DEFINE(ASM_PFN_PTE_SHIFT, PFN_PTE_SHIFT);
 	DEFINE(ASM_PT_INITIAL, PT_INITIAL);
 	DEFINE(ASM_PT_INITIAL, PT_INITIAL);
 	DEFINE(ASM_PAGE_SIZE, PAGE_SIZE);
 	DEFINE(ASM_PAGE_SIZE, PAGE_SIZE);
+	DEFINE(ASM_PAGE_SIZE_DIV64, PAGE_SIZE/64);
+	DEFINE(ASM_PAGE_SIZE_DIV128, PAGE_SIZE/128);
 	BLANK();
 	BLANK();
 	DEFINE(EXCDATA_IP, offsetof(struct exception_data, fault_ip));
 	DEFINE(EXCDATA_IP, offsetof(struct exception_data, fault_ip));
 	DEFINE(EXCDATA_SPACE, offsetof(struct exception_data, fault_space));
 	DEFINE(EXCDATA_SPACE, offsetof(struct exception_data, fault_space));

+ 22 - 14
arch/parisc/kernel/entry.S

@@ -502,18 +502,20 @@
 	 * all ILP32 processes and all the kernel for machines with
 	 * all ILP32 processes and all the kernel for machines with
 	 * under 4GB of memory) */
 	 * under 4GB of memory) */
 	.macro		L3_ptep pgd,pte,index,va,fault
 	.macro		L3_ptep pgd,pte,index,va,fault
+#if PT_NLEVELS == 3 /* we might have a 2-Level scheme, e.g. with 16kb page size */
 	extrd,u		\va,63-ASM_PGDIR_SHIFT,ASM_BITS_PER_PGD,\index
 	extrd,u		\va,63-ASM_PGDIR_SHIFT,ASM_BITS_PER_PGD,\index
 	copy		%r0,\pte
 	copy		%r0,\pte
-	extrd,u,*=	\va,31,32,%r0
+	extrd,u,*=	\va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0
 	ldw,s		\index(\pgd),\pgd
 	ldw,s		\index(\pgd),\pgd
-	extrd,u,*=	\va,31,32,%r0
+	extrd,u,*=	\va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0
 	bb,>=,n		\pgd,_PxD_PRESENT_BIT,\fault
 	bb,>=,n		\pgd,_PxD_PRESENT_BIT,\fault
-	extrd,u,*=	\va,31,32,%r0
+	extrd,u,*=	\va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0
 	shld		\pgd,PxD_VALUE_SHIFT,\index
 	shld		\pgd,PxD_VALUE_SHIFT,\index
-	extrd,u,*=	\va,31,32,%r0
+	extrd,u,*=	\va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0
 	copy		\index,\pgd
 	copy		\index,\pgd
-	extrd,u,*<>	\va,31,32,%r0
+	extrd,u,*<>	\va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0
 	ldo		ASM_PGD_PMD_OFFSET(\pgd),\pgd
 	ldo		ASM_PGD_PMD_OFFSET(\pgd),\pgd
+#endif
 	L2_ptep		\pgd,\pte,\index,\va,\fault
 	L2_ptep		\pgd,\pte,\index,\va,\fault
 	.endm
 	.endm
 
 
@@ -563,10 +565,18 @@
 	extrd,u,*= 	\pte,_PAGE_GATEWAY_BIT+32,1,%r0
 	extrd,u,*= 	\pte,_PAGE_GATEWAY_BIT+32,1,%r0
 	depd		%r0,11,2,\prot	/* If Gateway, Set PL2 to 0 */
 	depd		%r0,11,2,\prot	/* If Gateway, Set PL2 to 0 */
 
 
-	/* Get rid of prot bits and convert to page addr for iitlbt and idtlbt */
+	/* Enforce uncacheable pages.
+	 * This should ONLY be use for MMIO on PA 2.0 machines.
+	 * Memory/DMA is cache coherent on all PA2.0 machines we support
+	 * (that means T-class is NOT supported) and the memory controllers
+	 * on most of those machines only handles cache transactions.
+	 */
+	extrd,u,*=	\pte,_PAGE_NO_CACHE_BIT+32,1,%r0
+	depi		1,12,1,\prot
 
 
-	depd		%r0,63,PAGE_SHIFT,\pte
-	extrd,s		\pte,(63-PAGE_SHIFT)+(63-58),64-PAGE_SHIFT,\pte
+	/* Drop prot bits and convert to page addr for iitlbt and idtlbt */
+	extrd,u		\pte,(63-ASM_PFN_PTE_SHIFT)+(63-58),64-PAGE_SHIFT,\pte
+	depdi		_PAGE_SIZE_ENCODING_DEFAULT,63,63-58,\pte
 	.endm
 	.endm
 
 
 	/* Identical macro to make_insert_tlb above, except it
 	/* Identical macro to make_insert_tlb above, except it
@@ -584,9 +594,8 @@
 
 
 	/* Get rid of prot bits and convert to page addr for iitlba */
 	/* Get rid of prot bits and convert to page addr for iitlba */
 
 
-	depi		0,31,PAGE_SHIFT,\pte
+	depi		_PAGE_SIZE_ENCODING_DEFAULT,31,ASM_PFN_PTE_SHIFT,\pte
 	extru		\pte,24,25,\pte
 	extru		\pte,24,25,\pte
-
 	.endm
 	.endm
 
 
 	/* This is for ILP32 PA2.0 only.  The TLB insertion needs
 	/* This is for ILP32 PA2.0 only.  The TLB insertion needs
@@ -1201,10 +1210,9 @@ intr_save:
 	 */
 	 */
 
 
 	/* adjust isr/ior. */
 	/* adjust isr/ior. */
-
-	extrd,u         %r16,63,7,%r1    /* get high bits from isr for ior */
-	depd            %r1,31,7,%r17    /* deposit them into ior */
-	depdi           0,63,7,%r16      /* clear them from isr */
+	extrd,u         %r16,63,SPACEID_SHIFT,%r1	/* get high bits from isr for ior */
+	depd            %r1,31,SPACEID_SHIFT,%r17	/* deposit them into ior */
+	depdi           0,63,SPACEID_SHIFT,%r16		/* clear them from isr */
 #endif
 #endif
 	STREG           %r16, PT_ISR(%r29)
 	STREG           %r16, PT_ISR(%r29)
 	STREG           %r17, PT_IOR(%r29)
 	STREG           %r17, PT_IOR(%r29)

+ 8 - 7
arch/parisc/kernel/head.S

@@ -76,16 +76,16 @@ $bss_loop:
 	mtctl		%r4,%cr24	/* Initialize kernel root pointer */
 	mtctl		%r4,%cr24	/* Initialize kernel root pointer */
 	mtctl		%r4,%cr25	/* Initialize user root pointer */
 	mtctl		%r4,%cr25	/* Initialize user root pointer */
 
 
-#ifdef CONFIG_64BIT
+#if PT_NLEVELS == 3
 	/* Set pmd in pgd */
 	/* Set pmd in pgd */
 	load32		PA(pmd0),%r5
 	load32		PA(pmd0),%r5
 	shrd            %r5,PxD_VALUE_SHIFT,%r3	
 	shrd            %r5,PxD_VALUE_SHIFT,%r3	
-        ldo             (PxD_FLAG_PRESENT+PxD_FLAG_VALID)(%r3),%r3	
+	ldo		(PxD_FLAG_PRESENT+PxD_FLAG_VALID)(%r3),%r3
 	stw		%r3,ASM_PGD_ENTRY*ASM_PGD_ENTRY_SIZE(%r4)
 	stw		%r3,ASM_PGD_ENTRY*ASM_PGD_ENTRY_SIZE(%r4)
 	ldo		ASM_PMD_ENTRY*ASM_PMD_ENTRY_SIZE(%r5),%r4
 	ldo		ASM_PMD_ENTRY*ASM_PMD_ENTRY_SIZE(%r5),%r4
 #else
 #else
 	/* 2-level page table, so pmd == pgd */
 	/* 2-level page table, so pmd == pgd */
-        ldo             ASM_PGD_ENTRY*ASM_PGD_ENTRY_SIZE(%r4),%r4
+	ldo		ASM_PGD_ENTRY*ASM_PGD_ENTRY_SIZE(%r4),%r4
 #endif
 #endif
 
 
 	/* Fill in pmd with enough pte directories */
 	/* Fill in pmd with enough pte directories */
@@ -99,7 +99,7 @@ $bss_loop:
 	stw		%r3,0(%r4)
 	stw		%r3,0(%r4)
 	ldo		(ASM_PAGE_SIZE >> PxD_VALUE_SHIFT)(%r3),%r3
 	ldo		(ASM_PAGE_SIZE >> PxD_VALUE_SHIFT)(%r3),%r3
 	addib,>		-1,%r1,1b
 	addib,>		-1,%r1,1b
-#ifdef CONFIG_64BIT
+#if PT_NLEVELS == 3
 	ldo             ASM_PMD_ENTRY_SIZE(%r4),%r4
 	ldo             ASM_PMD_ENTRY_SIZE(%r4),%r4
 #else
 #else
 	ldo             ASM_PGD_ENTRY_SIZE(%r4),%r4
 	ldo             ASM_PGD_ENTRY_SIZE(%r4),%r4
@@ -107,13 +107,14 @@ $bss_loop:
 
 
 
 
 	/* Now initialize the PTEs themselves */
 	/* Now initialize the PTEs themselves */
-	ldo		_PAGE_KERNEL(%r0),%r3 /* Hardwired 0 phys addr start */
+	ldo		0+_PAGE_KERNEL(%r0),%r3 /* Hardwired 0 phys addr start */
+	ldi		(1<<(KERNEL_INITIAL_ORDER-PAGE_SHIFT)),%r11 /* PFN count */
 	load32		PA(pg0),%r1
 	load32		PA(pg0),%r1
 
 
 $pgt_fill_loop:
 $pgt_fill_loop:
 	STREGM          %r3,ASM_PTE_ENTRY_SIZE(%r1)
 	STREGM          %r3,ASM_PTE_ENTRY_SIZE(%r1)
-	ldo		ASM_PAGE_SIZE(%r3),%r3
-	bb,>=		%r3,31-KERNEL_INITIAL_ORDER,$pgt_fill_loop
+	ldo		(1<<PFN_PTE_SHIFT)(%r3),%r3 /* add one PFN */
+	addib,>		-1,%r11,$pgt_fill_loop
 	nop
 	nop
 
 
 	/* Load the return address...er...crash 'n burn */
 	/* Load the return address...er...crash 'n burn */

+ 5 - 5
arch/parisc/kernel/init_task.c

@@ -53,17 +53,17 @@ union thread_union init_thread_union
 	__attribute__((aligned(128))) __attribute__((__section__(".data.init_task"))) =
 	__attribute__((aligned(128))) __attribute__((__section__(".data.init_task"))) =
 		{ INIT_THREAD_INFO(init_task) };
 		{ INIT_THREAD_INFO(init_task) };
 
 
-#ifdef __LP64__
+#if PT_NLEVELS == 3
 /* NOTE: This layout exactly conforms to the hybrid L2/L3 page table layout
 /* NOTE: This layout exactly conforms to the hybrid L2/L3 page table layout
  * with the first pmd adjacent to the pgd and below it. gcc doesn't actually
  * with the first pmd adjacent to the pgd and below it. gcc doesn't actually
  * guarantee that global objects will be laid out in memory in the same order 
  * guarantee that global objects will be laid out in memory in the same order 
  * as the order of declaration, so put these in different sections and use
  * as the order of declaration, so put these in different sections and use
  * the linker script to order them. */
  * the linker script to order them. */
-pmd_t pmd0[PTRS_PER_PMD] __attribute__ ((aligned(PAGE_SIZE))) __attribute__ ((__section__ (".data.vm0.pmd"))) = { {0}, };
-
+pmd_t pmd0[PTRS_PER_PMD] __attribute__ ((__section__ (".data.vm0.pmd"), aligned(PAGE_SIZE)));
 #endif
 #endif
-pgd_t swapper_pg_dir[PTRS_PER_PGD] __attribute__ ((aligned(PAGE_SIZE))) __attribute__ ((__section__ (".data.vm0.pgd"))) = { {0}, };
-pte_t pg0[PT_INITIAL * PTRS_PER_PTE] __attribute__ ((aligned(PAGE_SIZE))) __attribute__ ((__section__ (".data.vm0.pte")))  = { {0}, };
+
+pgd_t swapper_pg_dir[PTRS_PER_PGD] __attribute__ ((__section__ (".data.vm0.pgd"), aligned(PAGE_SIZE)));
+pte_t pg0[PT_INITIAL * PTRS_PER_PTE] __attribute__ ((__section__ (".data.vm0.pte"), aligned(PAGE_SIZE)));
 
 
 /*
 /*
  * Initial task structure.
  * Initial task structure.

+ 12 - 13
arch/parisc/kernel/pacache.S

@@ -65,7 +65,7 @@ flush_tlb_all_local:
 	 */
 	 */
 
 
 	/* pcxt_ssm_bug	- relied upon translation! PA 2.0 Arch. F-4 and F-5 */
 	/* pcxt_ssm_bug	- relied upon translation! PA 2.0 Arch. F-4 and F-5 */
-	rsm	PSW_SM_I, %r19		/* save I-bit state */
+	rsm		PSW_SM_I, %r19		/* save I-bit state */
 	load32		PA(1f), %r1
 	load32		PA(1f), %r1
 	nop
 	nop
 	nop
 	nop
@@ -84,8 +84,7 @@ flush_tlb_all_local:
 	rfi
 	rfi
 	nop
 	nop
 
 
-1:      ldil		L%PA(cache_info), %r1
-	ldo		R%PA(cache_info)(%r1), %r1
+1:      load32		PA(cache_info), %r1
 
 
 	/* Flush Instruction Tlb */
 	/* Flush Instruction Tlb */
 
 
@@ -212,8 +211,7 @@ flush_instruction_cache_local:
 	.entry
 	.entry
 
 
 	mtsp		%r0, %sr1
 	mtsp		%r0, %sr1
-	ldil		L%cache_info, %r1
-	ldo		R%cache_info(%r1), %r1
+	load32		cache_info, %r1
 
 
 	/* Flush Instruction Cache */
 	/* Flush Instruction Cache */
 
 
@@ -254,8 +252,7 @@ flush_data_cache_local:
 	.entry
 	.entry
 
 
 	mtsp		%r0, %sr1
 	mtsp		%r0, %sr1
-	ldil		L%cache_info, %r1
-	ldo		R%cache_info(%r1), %r1
+	load32 		cache_info, %r1
 
 
 	/* Flush Data Cache */
 	/* Flush Data Cache */
 
 
@@ -303,7 +300,8 @@ copy_user_page_asm:
 	 */
 	 */
 
 
 	ldd		0(%r25), %r19
 	ldd		0(%r25), %r19
-	ldi		32, %r1                 /* PAGE_SIZE/128 == 32 */
+	ldi		ASM_PAGE_SIZE_DIV128, %r1
+
 	ldw		64(%r25), %r0		/* prefetch 1 cacheline ahead */
 	ldw		64(%r25), %r0		/* prefetch 1 cacheline ahead */
 	ldw		128(%r25), %r0		/* prefetch 2 */
 	ldw		128(%r25), %r0		/* prefetch 2 */
 
 
@@ -368,7 +366,7 @@ copy_user_page_asm:
 	 * use ldd/std on a 32 bit kernel.
 	 * use ldd/std on a 32 bit kernel.
 	 */
 	 */
 	ldw		0(%r25), %r19
 	ldw		0(%r25), %r19
-	ldi		64, %r1		/* PAGE_SIZE/64 == 64 */
+	ldi		ASM_PAGE_SIZE_DIV64, %r1
 
 
 1:
 1:
 	ldw		4(%r25), %r20
 	ldw		4(%r25), %r20
@@ -461,6 +459,7 @@ copy_user_page_asm:
 	sub		%r25, %r1, %r23		/* move physical addr into non shadowed reg */
 	sub		%r25, %r1, %r23		/* move physical addr into non shadowed reg */
 
 
 	ldil		L%(TMPALIAS_MAP_START), %r28
 	ldil		L%(TMPALIAS_MAP_START), %r28
+	/* FIXME for different page sizes != 4k */
 #ifdef CONFIG_64BIT
 #ifdef CONFIG_64BIT
 	extrd,u		%r26,56,32, %r26		/* convert phys addr to tlb insert format */
 	extrd,u		%r26,56,32, %r26		/* convert phys addr to tlb insert format */
 	extrd,u		%r23,56,32, %r23		/* convert phys addr to tlb insert format */
 	extrd,u		%r23,56,32, %r23		/* convert phys addr to tlb insert format */
@@ -551,6 +550,7 @@ __clear_user_page_asm:
 #ifdef CONFIG_64BIT
 #ifdef CONFIG_64BIT
 #if (TMPALIAS_MAP_START >= 0x80000000)
 #if (TMPALIAS_MAP_START >= 0x80000000)
 	depdi		0, 31,32, %r28		/* clear any sign extension */
 	depdi		0, 31,32, %r28		/* clear any sign extension */
+	/* FIXME: page size dependend */
 #endif
 #endif
 	extrd,u		%r26, 56,32, %r26	/* convert phys addr to tlb insert format */
 	extrd,u		%r26, 56,32, %r26	/* convert phys addr to tlb insert format */
 	depd		%r25, 63,22, %r28	/* Form aliased virtual address 'to' */
 	depd		%r25, 63,22, %r28	/* Form aliased virtual address 'to' */
@@ -566,10 +566,10 @@ __clear_user_page_asm:
 	pdtlb		0(%r28)
 	pdtlb		0(%r28)
 
 
 #ifdef CONFIG_64BIT
 #ifdef CONFIG_64BIT
-	ldi		32, %r1			/* PAGE_SIZE/128 == 32 */
+	ldi		ASM_PAGE_SIZE_DIV128, %r1
 
 
 	/* PREFETCH (Write) has not (yet) been proven to help here */
 	/* PREFETCH (Write) has not (yet) been proven to help here */
-/* #define	PREFETCHW_OP	ldd		256(%0), %r0 */
+	/* #define	PREFETCHW_OP	ldd		256(%0), %r0 */
 
 
 1:	std		%r0, 0(%r28)
 1:	std		%r0, 0(%r28)
 	std		%r0, 8(%r28)
 	std		%r0, 8(%r28)
@@ -591,8 +591,7 @@ __clear_user_page_asm:
 	ldo		128(%r28), %r28
 	ldo		128(%r28), %r28
 
 
 #else	/* ! CONFIG_64BIT */
 #else	/* ! CONFIG_64BIT */
-
-	ldi		64, %r1			/* PAGE_SIZE/64 == 64 */
+	ldi		ASM_PAGE_SIZE_DIV64, %r1
 
 
 1:
 1:
 	stw		%r0, 0(%r28)
 	stw		%r0, 0(%r28)

+ 5 - 5
arch/parisc/kernel/syscall.S

@@ -55,7 +55,7 @@
 	 * pointers.
 	 * pointers.
 	 */
 	 */
 
 
-	.align 4096
+	.align ASM_PAGE_SIZE
 linux_gateway_page:
 linux_gateway_page:
 
 
         /* ADDRESS 0x00 to 0xb0 = 176 bytes / 4 bytes per insn = 44 insns */
         /* ADDRESS 0x00 to 0xb0 = 176 bytes / 4 bytes per insn = 44 insns */
@@ -632,7 +632,7 @@ cas_action:
 end_compare_and_swap:
 end_compare_and_swap:
 
 
 	/* Make sure nothing else is placed on this page */
 	/* Make sure nothing else is placed on this page */
-	.align 4096
+	.align ASM_PAGE_SIZE
 	.export end_linux_gateway_page
 	.export end_linux_gateway_page
 end_linux_gateway_page:
 end_linux_gateway_page:
 
 
@@ -652,7 +652,7 @@ end_linux_gateway_page:
 
 
 	.section .rodata,"a"
 	.section .rodata,"a"
 
 
-	.align 4096
+	.align ASM_PAGE_SIZE
 	/* Light-weight-syscall table */
 	/* Light-weight-syscall table */
 	/* Start of lws table. */
 	/* Start of lws table. */
 	.export lws_table
 	.export lws_table
@@ -662,14 +662,14 @@ lws_table:
 	LWS_ENTRY(compare_and_swap64)	/* 1 - ELF64 Atomic compare and swap */
 	LWS_ENTRY(compare_and_swap64)	/* 1 - ELF64 Atomic compare and swap */
 	/* End of lws table */
 	/* End of lws table */
 
 
-	.align 4096
+	.align ASM_PAGE_SIZE
 	.export sys_call_table
 	.export sys_call_table
 .Lsys_call_table:
 .Lsys_call_table:
 sys_call_table:
 sys_call_table:
 #include "syscall_table.S"
 #include "syscall_table.S"
 
 
 #ifdef CONFIG_64BIT
 #ifdef CONFIG_64BIT
-	.align 4096
+	.align ASM_PAGE_SIZE
 	.export sys_call_table64
 	.export sys_call_table64
 .Lsys_call_table64:
 .Lsys_call_table64:
 sys_call_table64:
 sys_call_table64:

+ 32 - 22
arch/parisc/kernel/vmlinux.lds.S

@@ -6,6 +6,7 @@
  *    Copyright (C) 2000 Michael Ang <mang with subcarrier.org>
  *    Copyright (C) 2000 Michael Ang <mang with subcarrier.org>
  *    Copyright (C) 2002 Randolph Chung <tausq with parisc-linux.org>
  *    Copyright (C) 2002 Randolph Chung <tausq with parisc-linux.org>
  *    Copyright (C) 2003 James Bottomley <jejb with parisc-linux.org>
  *    Copyright (C) 2003 James Bottomley <jejb with parisc-linux.org>
+ *    Copyright (C) 2006 Helge Deller <deller@gmx.de>
  *
  *
  *
  *
  *    This program is free software; you can redistribute it and/or modify
  *    This program is free software; you can redistribute it and/or modify
@@ -27,6 +28,7 @@
 /* needed for the processor specific cache alignment size */	
 /* needed for the processor specific cache alignment size */	
 #include <asm/cache.h>
 #include <asm/cache.h>
 #include <asm/page.h>
 #include <asm/page.h>
+#include <asm/asm-offsets.h>
 	
 	
 /* ld script to make hppa Linux kernel */
 /* ld script to make hppa Linux kernel */
 #ifndef CONFIG_64BIT
 #ifndef CONFIG_64BIT
@@ -68,7 +70,7 @@ SECTIONS
   RODATA
   RODATA
 
 
   /* writeable */
   /* writeable */
-  . = ALIGN(4096);		/* Make sure this is page aligned so
+  . = ALIGN(ASM_PAGE_SIZE);	/* Make sure this is page aligned so
   				   that we can properly leave these
   				   that we can properly leave these
 				   as writable */
 				   as writable */
   data_start = .;
   data_start = .;
@@ -81,23 +83,17 @@ SECTIONS
   __start___unwind = .;         /* unwind info */
   __start___unwind = .;         /* unwind info */
   .PARISC.unwind : { *(.PARISC.unwind) }
   .PARISC.unwind : { *(.PARISC.unwind) }
   __stop___unwind = .;
   __stop___unwind = .;
- 
+
+  /* rarely changed data like cpu maps */
+  . = ALIGN(16);
+  .data.read_mostly : { *(.data.read_mostly) }
+
+  . = ALIGN(L1_CACHE_BYTES);
   .data : {			/* Data */
   .data : {			/* Data */
 	*(.data)
 	*(.data)
-	*(.data.vm0.pmd)
-	*(.data.vm0.pgd)
-	*(.data.vm0.pte)
 	CONSTRUCTORS
 	CONSTRUCTORS
 	}
 	}
 
 
-  . = ALIGN(4096);
-  /* nosave data is really only used for software suspend...it's here
-   * just in case we ever implement it */
-  __nosave_begin = .;
-  .data_nosave : { *(.data.nosave) }
-  . = ALIGN(4096);
-  __nosave_end = .;
-
   . = ALIGN(L1_CACHE_BYTES);
   . = ALIGN(L1_CACHE_BYTES);
   .data.cacheline_aligned : { *(.data.cacheline_aligned) }
   .data.cacheline_aligned : { *(.data.cacheline_aligned) }
 
 
@@ -105,12 +101,29 @@ SECTIONS
   . = ALIGN(16);
   . = ALIGN(16);
   .data.lock_aligned : { *(.data.lock_aligned) }
   .data.lock_aligned : { *(.data.lock_aligned) }
 
 
-  /* rarely changed data like cpu maps */
-  . = ALIGN(16);
-  .data.read_mostly : { *(.data.read_mostly) }
+  . = ALIGN(ASM_PAGE_SIZE);
+  /* nosave data is really only used for software suspend...it's here
+   * just in case we ever implement it */
+  __nosave_begin = .;
+  .data_nosave : { *(.data.nosave) }
+  . = ALIGN(ASM_PAGE_SIZE);
+  __nosave_end = .;
 
 
   _edata = .;			/* End of data section */
   _edata = .;			/* End of data section */
 
 
+  __bss_start = .;		/* BSS */
+  /* page table entries need to be PAGE_SIZE aligned */
+  . = ALIGN(ASM_PAGE_SIZE);
+  .data.vmpages : {
+	*(.data.vm0.pmd)
+	*(.data.vm0.pgd)
+	*(.data.vm0.pte)
+	}
+  .bss : { *(.bss) *(COMMON) }
+  __bss_stop = .;
+
+
+  /* assembler code expects init_task to be 16k aligned */
   . = ALIGN(16384); 		/* init_task */
   . = ALIGN(16384); 		/* init_task */
   .data.init_task : { *(.data.init_task) }
   .data.init_task : { *(.data.init_task) }
 
 
@@ -126,6 +139,7 @@ SECTIONS
   .dlt : { *(.dlt) }
   .dlt : { *(.dlt) }
 #endif
 #endif
 
 
+  /* reserve space for interrupt stack by aligning __init* to 16k */
   . = ALIGN(16384);
   . = ALIGN(16384);
   __init_begin = .;
   __init_begin = .;
   .init.text : { 
   .init.text : { 
@@ -166,7 +180,7 @@ SECTIONS
      from .altinstructions and .eh_frame */
      from .altinstructions and .eh_frame */
   .exit.text : { *(.exit.text) }
   .exit.text : { *(.exit.text) }
   .exit.data : { *(.exit.data) }
   .exit.data : { *(.exit.data) }
-  . = ALIGN(4096);
+  . = ALIGN(ASM_PAGE_SIZE);
   __initramfs_start = .;
   __initramfs_start = .;
   .init.ramfs : { *(.init.ramfs) }
   .init.ramfs : { *(.init.ramfs) }
   __initramfs_end = .;
   __initramfs_end = .;
@@ -174,14 +188,10 @@ SECTIONS
   __per_cpu_start = .;
   __per_cpu_start = .;
   .data.percpu  : { *(.data.percpu) }
   .data.percpu  : { *(.data.percpu) }
   __per_cpu_end = .;
   __per_cpu_end = .;
-  . = ALIGN(4096);
+  . = ALIGN(ASM_PAGE_SIZE);
   __init_end = .;
   __init_end = .;
   /* freed after init ends here */
   /* freed after init ends here */
 	
 	
-  __bss_start = .;		/* BSS */
-  .bss : { *(.bss) *(COMMON) }
-  __bss_stop = .; 
-
   _end = . ;
   _end = . ;
 
 
   /* Sections to be discarded */
   /* Sections to be discarded */

+ 15 - 13
arch/parisc/mm/init.c

@@ -6,6 +6,7 @@
  *    changed by Philipp Rumpf
  *    changed by Philipp Rumpf
  *  Copyright 1999 Philipp Rumpf (prumpf@tux.org)
  *  Copyright 1999 Philipp Rumpf (prumpf@tux.org)
  *  Copyright 2004 Randolph Chung (tausq@debian.org)
  *  Copyright 2004 Randolph Chung (tausq@debian.org)
+ *  Copyright 2006 Helge Deller (deller@gmx.de)
  *
  *
  */
  */
 
 
@@ -371,8 +372,8 @@ static void __init setup_bootmem(void)
 
 
 void free_initmem(void)
 void free_initmem(void)
 {
 {
-	unsigned long addr;
-	
+	unsigned long addr, init_begin, init_end;
+
 	printk(KERN_INFO "Freeing unused kernel memory: ");
 	printk(KERN_INFO "Freeing unused kernel memory: ");
 
 
 #ifdef CONFIG_DEBUG_KERNEL
 #ifdef CONFIG_DEBUG_KERNEL
@@ -395,8 +396,11 @@ void free_initmem(void)
 	local_irq_enable();
 	local_irq_enable();
 #endif
 #endif
 	
 	
-	addr = (unsigned long)(&__init_begin);
-	for (; addr < (unsigned long)(&__init_end); addr += PAGE_SIZE) {
+	/* align __init_begin and __init_end to page size,
+	   ignoring linker script where we might have tried to save RAM */
+	init_begin = PAGE_ALIGN((unsigned long)(&__init_begin));
+	init_end   = PAGE_ALIGN((unsigned long)(&__init_end));
+	for (addr = init_begin; addr < init_end; addr += PAGE_SIZE) {
 		ClearPageReserved(virt_to_page(addr));
 		ClearPageReserved(virt_to_page(addr));
 		init_page_count(virt_to_page(addr));
 		init_page_count(virt_to_page(addr));
 		free_page(addr);
 		free_page(addr);
@@ -407,7 +411,7 @@ void free_initmem(void)
 	/* set up a new led state on systems shipped LED State panel */
 	/* set up a new led state on systems shipped LED State panel */
 	pdc_chassis_send_status(PDC_CHASSIS_DIRECT_BCOMPLETE);
 	pdc_chassis_send_status(PDC_CHASSIS_DIRECT_BCOMPLETE);
 	
 	
-	printk("%luk freed\n", (unsigned long)(&__init_end - &__init_begin) >> 10);
+	printk("%luk freed\n", (init_end - init_begin) >> 10);
 }
 }
 
 
 
 
@@ -639,11 +643,13 @@ static void __init map_pages(unsigned long start_vaddr, unsigned long start_padd
 				 * Map the fault vector writable so we can
 				 * Map the fault vector writable so we can
 				 * write the HPMC checksum.
 				 * write the HPMC checksum.
 				 */
 				 */
+#if defined(CONFIG_PARISC_PAGE_SIZE_4KB)
 				if (address >= ro_start && address < ro_end
 				if (address >= ro_start && address < ro_end
 							&& address != fv_addr
 							&& address != fv_addr
 							&& address != gw_addr)
 							&& address != gw_addr)
 				    pte = __mk_pte(address, PAGE_KERNEL_RO);
 				    pte = __mk_pte(address, PAGE_KERNEL_RO);
 				else
 				else
+#endif
 				    pte = __mk_pte(address, pgprot);
 				    pte = __mk_pte(address, pgprot);
 
 
 				if (address >= end_paddr)
 				if (address >= end_paddr)
@@ -874,8 +880,7 @@ unsigned long alloc_sid(void)
 			flush_tlb_all(); /* flush_tlb_all() calls recycle_sids() */
 			flush_tlb_all(); /* flush_tlb_all() calls recycle_sids() */
 			spin_lock(&sid_lock);
 			spin_lock(&sid_lock);
 		}
 		}
-		if (free_space_ids == 0)
-			BUG();
+		BUG_ON(free_space_ids == 0);
 	}
 	}
 
 
 	free_space_ids--;
 	free_space_ids--;
@@ -899,8 +904,7 @@ void free_sid(unsigned long spaceid)
 
 
 	spin_lock(&sid_lock);
 	spin_lock(&sid_lock);
 
 
-	if (*dirty_space_offset & (1L << index))
-	    BUG(); /* attempt to free space id twice */
+	BUG_ON(*dirty_space_offset & (1L << index)); /* attempt to free space id twice */
 
 
 	*dirty_space_offset |= (1L << index);
 	*dirty_space_offset |= (1L << index);
 	dirty_space_ids++;
 	dirty_space_ids++;
@@ -975,7 +979,7 @@ static void recycle_sids(void)
 
 
 static unsigned long recycle_ndirty;
 static unsigned long recycle_ndirty;
 static unsigned long recycle_dirty_array[SID_ARRAY_SIZE];
 static unsigned long recycle_dirty_array[SID_ARRAY_SIZE];
-static unsigned int recycle_inuse = 0;
+static unsigned int recycle_inuse;
 
 
 void flush_tlb_all(void)
 void flush_tlb_all(void)
 {
 {
@@ -984,9 +988,7 @@ void flush_tlb_all(void)
 	do_recycle = 0;
 	do_recycle = 0;
 	spin_lock(&sid_lock);
 	spin_lock(&sid_lock);
 	if (dirty_space_ids > RECYCLE_THRESHOLD) {
 	if (dirty_space_ids > RECYCLE_THRESHOLD) {
-	    if (recycle_inuse) {
-		BUG();  /* FIXME: Use a semaphore/wait queue here */
-	    }
+	    BUG_ON(recycle_inuse);  /* FIXME: Use a semaphore/wait queue here */
 	    get_dirty_sids(&recycle_ndirty,recycle_dirty_array);
 	    get_dirty_sids(&recycle_ndirty,recycle_dirty_array);
 	    recycle_inuse++;
 	    recycle_inuse++;
 	    do_recycle++;
 	    do_recycle++;

+ 21 - 4
include/asm-parisc/page.h

@@ -1,13 +1,30 @@
 #ifndef _PARISC_PAGE_H
 #ifndef _PARISC_PAGE_H
 #define _PARISC_PAGE_H
 #define _PARISC_PAGE_H
 
 
-/* PAGE_SHIFT determines the page size */
-#define PAGE_SHIFT	12
-#define PAGE_SIZE	(1UL << PAGE_SHIFT)
-#define PAGE_MASK	(~(PAGE_SIZE-1))
+#if !defined(__KERNEL__)
+/* this is for userspace applications (4k page size) */
+# define PAGE_SHIFT	12	/* 4k */
+# define PAGE_SIZE	(1UL << PAGE_SHIFT)
+# define PAGE_MASK	(~(PAGE_SIZE-1))
+#endif
+
 
 
 #ifdef __KERNEL__
 #ifdef __KERNEL__
 #include <linux/config.h>
 #include <linux/config.h>
+
+#if defined(CONFIG_PARISC_PAGE_SIZE_4KB)
+# define PAGE_SHIFT	12	/* 4k */
+#elif defined(CONFIG_PARISC_PAGE_SIZE_16KB)
+# define PAGE_SHIFT	14	/* 16k */
+#elif defined(CONFIG_PARISC_PAGE_SIZE_64KB)
+# define PAGE_SHIFT	16	/* 64k */
+#else
+# error "unknown default kernel page size"
+#endif
+#define PAGE_SIZE	(1UL << PAGE_SHIFT)
+#define PAGE_MASK	(~(PAGE_SIZE-1))
+
+
 #ifndef __ASSEMBLY__
 #ifndef __ASSEMBLY__
 
 
 #include <asm/types.h>
 #include <asm/types.h>

+ 44 - 19
include/asm-parisc/pgtable.h

@@ -59,16 +59,15 @@
 #define  ISTACK_SIZE  32768 /* Interrupt Stack Size */
 #define  ISTACK_SIZE  32768 /* Interrupt Stack Size */
 #define  ISTACK_ORDER 3
 #define  ISTACK_ORDER 3
 
 
-/* This is the size of the initially mapped kernel memory (i.e. currently
- * 0 to 1<<23 == 8MB */
+/* This is the size of the initially mapped kernel memory */
 #ifdef CONFIG_64BIT
 #ifdef CONFIG_64BIT
-#define KERNEL_INITIAL_ORDER	24
+#define KERNEL_INITIAL_ORDER	24	/* 0 to 1<<24 = 16MB */
 #else
 #else
-#define KERNEL_INITIAL_ORDER	23
+#define KERNEL_INITIAL_ORDER	23	/* 0 to 1<<23 = 8MB */
 #endif
 #endif
 #define KERNEL_INITIAL_SIZE	(1 << KERNEL_INITIAL_ORDER)
 #define KERNEL_INITIAL_SIZE	(1 << KERNEL_INITIAL_ORDER)
 
 
-#ifdef CONFIG_64BIT
+#if defined(CONFIG_64BIT) && defined(CONFIG_PARISC_PAGE_SIZE_4KB)
 #define PT_NLEVELS	3
 #define PT_NLEVELS	3
 #define PGD_ORDER	1 /* Number of pages per pgd */
 #define PGD_ORDER	1 /* Number of pages per pgd */
 #define PMD_ORDER	1 /* Number of pages per pmd */
 #define PMD_ORDER	1 /* Number of pages per pmd */
@@ -111,11 +110,15 @@
 #define MAX_ADDRBITS	(PGDIR_SHIFT + BITS_PER_PGD)
 #define MAX_ADDRBITS	(PGDIR_SHIFT + BITS_PER_PGD)
 #define MAX_ADDRESS	(1UL << MAX_ADDRBITS)
 #define MAX_ADDRESS	(1UL << MAX_ADDRBITS)
 
 
-#define SPACEID_SHIFT (MAX_ADDRBITS - 32)
+#define SPACEID_SHIFT	(MAX_ADDRBITS - 32)
 
 
 /* This calculates the number of initial pages we need for the initial
 /* This calculates the number of initial pages we need for the initial
  * page tables */
  * page tables */
-#define PT_INITIAL	(1 << (KERNEL_INITIAL_ORDER - PMD_SHIFT))
+#if (KERNEL_INITIAL_ORDER) >= (PMD_SHIFT)
+# define PT_INITIAL	(1 << (KERNEL_INITIAL_ORDER - PMD_SHIFT))
+#else
+# define PT_INITIAL	(1)  /* all initial PTEs fit into one page */
+#endif
 
 
 /*
 /*
  * pgd entries used up by user/kernel:
  * pgd entries used up by user/kernel:
@@ -160,6 +163,10 @@ extern  void *vmalloc_start;
  * to zero */
  * to zero */
 #define PTE_SHIFT	   	xlate_pabit(_PAGE_USER_BIT)
 #define PTE_SHIFT	   	xlate_pabit(_PAGE_USER_BIT)
 
 
+/* PFN_PTE_SHIFT defines the shift of a PTE value to access the PFN field */
+#define PFN_PTE_SHIFT		12
+
+
 /* this is how many bits may be used by the file functions */
 /* this is how many bits may be used by the file functions */
 #define PTE_FILE_MAX_BITS	(BITS_PER_LONG - PTE_SHIFT)
 #define PTE_FILE_MAX_BITS	(BITS_PER_LONG - PTE_SHIFT)
 
 
@@ -188,7 +195,8 @@ extern  void *vmalloc_start;
 /* The pgd/pmd contains a ptr (in phys addr space); since all pgds/pmds
 /* The pgd/pmd contains a ptr (in phys addr space); since all pgds/pmds
  * are page-aligned, we don't care about the PAGE_OFFSET bits, except
  * are page-aligned, we don't care about the PAGE_OFFSET bits, except
  * for a few meta-information bits, so we shift the address to be
  * for a few meta-information bits, so we shift the address to be
- * able to effectively address 40-bits of physical address space. */
+ * able to effectively address 40/42/44-bits of physical address space
+ * depending on 4k/16k/64k PAGE_SIZE */
 #define _PxD_PRESENT_BIT   31
 #define _PxD_PRESENT_BIT   31
 #define _PxD_ATTACHED_BIT  30
 #define _PxD_ATTACHED_BIT  30
 #define _PxD_VALID_BIT     29
 #define _PxD_VALID_BIT     29
@@ -198,7 +206,7 @@ extern  void *vmalloc_start;
 #define PxD_FLAG_VALID    (1 << xlate_pabit(_PxD_VALID_BIT))
 #define PxD_FLAG_VALID    (1 << xlate_pabit(_PxD_VALID_BIT))
 #define PxD_FLAG_MASK     (0xf)
 #define PxD_FLAG_MASK     (0xf)
 #define PxD_FLAG_SHIFT    (4)
 #define PxD_FLAG_SHIFT    (4)
-#define PxD_VALUE_SHIFT   (8)
+#define PxD_VALUE_SHIFT   (8) /* (PAGE_SHIFT-PxD_FLAG_SHIFT) */
 
 
 #ifndef __ASSEMBLY__
 #ifndef __ASSEMBLY__
 
 
@@ -246,6 +254,7 @@ extern  void *vmalloc_start;
 #define __S110  PAGE_RWX
 #define __S110  PAGE_RWX
 #define __S111  PAGE_RWX
 #define __S111  PAGE_RWX
 
 
+
 extern pgd_t swapper_pg_dir[]; /* declared in init_task.c */
 extern pgd_t swapper_pg_dir[]; /* declared in init_task.c */
 
 
 /* initial page tables for 0-8MB for kernel */
 /* initial page tables for 0-8MB for kernel */
@@ -272,7 +281,7 @@ extern unsigned long *empty_zero_page;
 #define pgd_flag(x)	(pgd_val(x) & PxD_FLAG_MASK)
 #define pgd_flag(x)	(pgd_val(x) & PxD_FLAG_MASK)
 #define pgd_address(x)	((unsigned long)(pgd_val(x) &~ PxD_FLAG_MASK) << PxD_VALUE_SHIFT)
 #define pgd_address(x)	((unsigned long)(pgd_val(x) &~ PxD_FLAG_MASK) << PxD_VALUE_SHIFT)
 
 
-#ifdef CONFIG_64BIT
+#if PT_NLEVELS == 3
 /* The first entry of the permanent pmd is not there if it contains
 /* The first entry of the permanent pmd is not there if it contains
  * the gateway marker */
  * the gateway marker */
 #define pmd_none(x)	(!pmd_val(x) || pmd_flag(x) == PxD_FLAG_ATTACHED)
 #define pmd_none(x)	(!pmd_val(x) || pmd_flag(x) == PxD_FLAG_ATTACHED)
@@ -282,7 +291,7 @@ extern unsigned long *empty_zero_page;
 #define pmd_bad(x)	(!(pmd_flag(x) & PxD_FLAG_VALID))
 #define pmd_bad(x)	(!(pmd_flag(x) & PxD_FLAG_VALID))
 #define pmd_present(x)	(pmd_flag(x) & PxD_FLAG_PRESENT)
 #define pmd_present(x)	(pmd_flag(x) & PxD_FLAG_PRESENT)
 static inline void pmd_clear(pmd_t *pmd) {
 static inline void pmd_clear(pmd_t *pmd) {
-#ifdef CONFIG_64BIT
+#if PT_NLEVELS == 3
 	if (pmd_flag(*pmd) & PxD_FLAG_ATTACHED)
 	if (pmd_flag(*pmd) & PxD_FLAG_ATTACHED)
 		/* This is the entry pointing to the permanent pmd
 		/* This is the entry pointing to the permanent pmd
 		 * attached to the pgd; cannot clear it */
 		 * attached to the pgd; cannot clear it */
@@ -303,7 +312,7 @@ static inline void pmd_clear(pmd_t *pmd) {
 #define pgd_bad(x)      (!(pgd_flag(x) & PxD_FLAG_VALID))
 #define pgd_bad(x)      (!(pgd_flag(x) & PxD_FLAG_VALID))
 #define pgd_present(x)  (pgd_flag(x) & PxD_FLAG_PRESENT)
 #define pgd_present(x)  (pgd_flag(x) & PxD_FLAG_PRESENT)
 static inline void pgd_clear(pgd_t *pgd) {
 static inline void pgd_clear(pgd_t *pgd) {
-#ifdef CONFIG_64BIT
+#if PT_NLEVELS == 3
 	if(pgd_flag(*pgd) & PxD_FLAG_ATTACHED)
 	if(pgd_flag(*pgd) & PxD_FLAG_ATTACHED)
 		/* This is the permanent pmd attached to the pgd; cannot
 		/* This is the permanent pmd attached to the pgd; cannot
 		 * free it */
 		 * free it */
@@ -351,7 +360,7 @@ extern inline pte_t pte_mkwrite(pte_t pte)	{ pte_val(pte) |= _PAGE_WRITE; return
 ({									\
 ({									\
 	pte_t __pte;							\
 	pte_t __pte;							\
 									\
 									\
-	pte_val(__pte) = ((addr)+pgprot_val(pgprot));			\
+	pte_val(__pte) = ((((addr)>>PAGE_SHIFT)<<PFN_PTE_SHIFT) + pgprot_val(pgprot));	\
 									\
 									\
 	__pte;								\
 	__pte;								\
 })
 })
@@ -361,20 +370,16 @@ extern inline pte_t pte_mkwrite(pte_t pte)	{ pte_val(pte) |= _PAGE_WRITE; return
 static inline pte_t pfn_pte(unsigned long pfn, pgprot_t pgprot)
 static inline pte_t pfn_pte(unsigned long pfn, pgprot_t pgprot)
 {
 {
 	pte_t pte;
 	pte_t pte;
-	pte_val(pte) = (pfn << PAGE_SHIFT) | pgprot_val(pgprot);
+	pte_val(pte) = (pfn << PFN_PTE_SHIFT) | pgprot_val(pgprot);
 	return pte;
 	return pte;
 }
 }
 
 
-/* This takes a physical page address that is used by the remapping functions */
-#define mk_pte_phys(physpage, pgprot) \
-({ pte_t __pte; pte_val(__pte) = physpage + pgprot_val(pgprot); __pte; })
-
 extern inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
 extern inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
 { pte_val(pte) = (pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot); return pte; }
 { pte_val(pte) = (pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot); return pte; }
 
 
 /* Permanent address of a page.  On parisc we don't have highmem. */
 /* Permanent address of a page.  On parisc we don't have highmem. */
 
 
-#define pte_pfn(x) (pte_val(x) >> PAGE_SHIFT)
+#define pte_pfn(x)		(pte_val(x) >> PFN_PTE_SHIFT)
 
 
 #define pte_page(pte)		(pfn_to_page(pte_pfn(pte)))
 #define pte_page(pte)		(pfn_to_page(pte_pfn(pte)))
 
 
@@ -499,6 +504,26 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr,
 
 
 #endif /* !__ASSEMBLY__ */
 #endif /* !__ASSEMBLY__ */
 
 
+
+/* TLB page size encoding - see table 3-1 in parisc20.pdf */
+#define _PAGE_SIZE_ENCODING_4K		0
+#define _PAGE_SIZE_ENCODING_16K	1
+#define _PAGE_SIZE_ENCODING_64K	2
+#define _PAGE_SIZE_ENCODING_256K	3
+#define _PAGE_SIZE_ENCODING_1M		4
+#define _PAGE_SIZE_ENCODING_4M		5
+#define _PAGE_SIZE_ENCODING_16M	6
+#define _PAGE_SIZE_ENCODING_64M	7
+
+#if defined(CONFIG_PARISC_PAGE_SIZE_4KB)
+# define _PAGE_SIZE_ENCODING_DEFAULT _PAGE_SIZE_ENCODING_4K
+#elif defined(CONFIG_PARISC_PAGE_SIZE_16KB)
+# define _PAGE_SIZE_ENCODING_DEFAULT _PAGE_SIZE_ENCODING_16K
+#elif defined(CONFIG_PARISC_PAGE_SIZE_64KB)
+# define _PAGE_SIZE_ENCODING_DEFAULT _PAGE_SIZE_ENCODING_64K
+#endif
+
+
 #define io_remap_pfn_range(vma, vaddr, pfn, size, prot)		\
 #define io_remap_pfn_range(vma, vaddr, pfn, size, prot)		\
 		remap_pfn_range(vma, vaddr, pfn, size, prot)
 		remap_pfn_range(vma, vaddr, pfn, size, prot)