Browse Source

Merge branch 'parisc-3.9' of git://git.kernel.org/pub/scm/linux/kernel/git/deller/parisc-linux

Pull parisc updates from Helge Deller.

The bulk of this is optimized page coping/clearing and cache flushing
(virtual caches are lovely) by John David Anglin.

* 'parisc-3.9' of git://git.kernel.org/pub/scm/linux/kernel/git/deller/parisc-linux: (31 commits)
  arch/parisc/include/asm: use ARRAY_SIZE macro in mmzone.h
  parisc: remove empty lines and unnecessary #ifdef coding in include/asm/signal.h
  parisc: sendfile and sendfile64 syscall cleanups
  parisc: switch to available compat_sched_rr_get_interval implementation
  parisc: fix fallocate syscall
  parisc: fix error return codes for rt_sigaction and rt_sigprocmask
  parisc: convert msgrcv and msgsnd syscalls to use compat layer
  parisc: correctly wire up mq_* functions for CONFIG_COMPAT case
  parisc: fix personality on 32bit kernel
  parisc: wire up process_vm_readv, process_vm_writev, kcmp and finit_module syscalls
  parisc: led driver requires CONFIG_VM_EVENT_COUNTERS
  parisc: remove unused compat_rt_sigframe.h header
  parisc/mm/fault.c: Port OOM changes to do_page_fault
  parisc: space register variables need to be in native length (unsigned long)
  parisc: fix ptrace breakage
  parisc: always detect multiple physical ranges
  parisc: ensure that mmapped shared pages are aligned at SHMLBA addresses
  parisc: disable preemption while flushing D- or I-caches through TMPALIAS region
  parisc: remove IRQF_DISABLED
  parisc: fixes and cleanups in page cache flushing (4/4)
  ...
Linus Torvalds 12 years ago
parent
commit
5f32ed140d

+ 21 - 0
arch/parisc/Kconfig

@@ -160,6 +160,23 @@ config PREFETCH
 	def_bool y
 	def_bool y
 	depends on PA8X00 || PA7200
 	depends on PA8X00 || PA7200
 
 
+config MLONGCALLS
+	bool "Enable the -mlong-calls compiler option for big kernels"
+	def_bool y if (!MODULES)
+	depends on PA8X00
+	help
+	  If you configure the kernel to include many drivers built-in instead
+	  as modules, the kernel executable may become too big, so that the
+	  linker will not be able to resolve some long branches and fails to link
+	  your vmlinux kernel. In that case enabling this option will help you
+	  to overcome this limit by using the -mlong-calls compiler option.
+
+	  Usually you want to say N here, unless you e.g. want to build
+	  a kernel which includes all necessary drivers built-in and which can
+	  be used for TFTP booting without the need to have an initrd ramdisk.
+
+	  Enabling this option will probably slow down your kernel.
+
 config 64BIT
 config 64BIT
 	bool "64-bit kernel"
 	bool "64-bit kernel"
 	depends on PA8X00
 	depends on PA8X00
@@ -254,6 +271,10 @@ config COMPAT
 	def_bool y
 	def_bool y
 	depends on 64BIT
 	depends on 64BIT
 
 
+config SYSVIPC_COMPAT
+	def_bool y
+	depends on COMPAT && SYSVIPC
+
 config HPUX
 config HPUX
 	bool "Support for HP-UX binaries"
 	bool "Support for HP-UX binaries"
 	depends on !64BIT
 	depends on !64BIT

+ 8 - 5
arch/parisc/Makefile

@@ -32,11 +32,6 @@ ifdef CONFIG_64BIT
 UTS_MACHINE	:= parisc64
 UTS_MACHINE	:= parisc64
 CHECKFLAGS	+= -D__LP64__=1 -m64
 CHECKFLAGS	+= -D__LP64__=1 -m64
 WIDTH		:= 64
 WIDTH		:= 64
-
-# FIXME: if no default set, should really try to locate dynamically
-ifeq ($(CROSS_COMPILE),)
-CROSS_COMPILE	:= hppa64-linux-gnu-
-endif
 else # 32-bit
 else # 32-bit
 WIDTH		:=
 WIDTH		:=
 endif
 endif
@@ -44,6 +39,10 @@ endif
 # attempt to help out folks who are cross-compiling
 # attempt to help out folks who are cross-compiling
 ifeq ($(NATIVE),1)
 ifeq ($(NATIVE),1)
 CROSS_COMPILE	:= hppa$(WIDTH)-linux-
 CROSS_COMPILE	:= hppa$(WIDTH)-linux-
+else
+ ifeq ($(CROSS_COMPILE),)
+ CROSS_COMPILE	:= hppa$(WIDTH)-linux-gnu-
+ endif
 endif
 endif
 
 
 OBJCOPY_FLAGS =-O binary -R .note -R .comment -S
 OBJCOPY_FLAGS =-O binary -R .note -R .comment -S
@@ -65,6 +64,10 @@ ifndef CONFIG_FUNCTION_TRACER
   cflags-y	+= -ffunction-sections
   cflags-y	+= -ffunction-sections
 endif
 endif
 
 
+# Use long jumps instead of long branches (needed if your linker fails to
+# link a too big vmlinux executable)
+cflags-$(CONFIG_MLONGCALLS)	+= -mlong-calls
+
 # select which processor to optimise for
 # select which processor to optimise for
 cflags-$(CONFIG_PA7100)		+= -march=1.1 -mschedule=7100
 cflags-$(CONFIG_PA7100)		+= -march=1.1 -mschedule=7100
 cflags-$(CONFIG_PA7200)		+= -march=1.1 -mschedule=7200
 cflags-$(CONFIG_PA7200)		+= -march=1.1 -mschedule=7200

+ 1 - 2
arch/parisc/hpux/fs.c

@@ -43,8 +43,7 @@ int hpux_execve(struct pt_regs *regs)
 
 
 	error = do_execve(filename->name,
 	error = do_execve(filename->name,
 			  (const char __user *const __user *) regs->gr[25],
 			  (const char __user *const __user *) regs->gr[25],
-			  (const char __user *const __user *) regs->gr[24],
-			  regs);
+			  (const char __user *const __user *) regs->gr[24]);
 
 
 	putname(filename);
 	putname(filename);
 
 

+ 2 - 0
arch/parisc/include/asm/cacheflush.h

@@ -115,7 +115,9 @@ flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vma
 {
 {
 	if (PageAnon(page)) {
 	if (PageAnon(page)) {
 		flush_tlb_page(vma, vmaddr);
 		flush_tlb_page(vma, vmaddr);
+		preempt_disable();
 		flush_dcache_page_asm(page_to_phys(page), vmaddr);
 		flush_dcache_page_asm(page_to_phys(page), vmaddr);
+		preempt_enable();
 	}
 	}
 }
 }
 
 

+ 61 - 0
arch/parisc/include/asm/compat.h

@@ -28,6 +28,7 @@ typedef u16	compat_nlink_t;
 typedef u16	compat_ipc_pid_t;
 typedef u16	compat_ipc_pid_t;
 typedef s32	compat_daddr_t;
 typedef s32	compat_daddr_t;
 typedef u32	compat_caddr_t;
 typedef u32	compat_caddr_t;
+typedef s32	compat_key_t;
 typedef s32	compat_timer_t;
 typedef s32	compat_timer_t;
 
 
 typedef s32	compat_int_t;
 typedef s32	compat_int_t;
@@ -188,6 +189,66 @@ typedef struct compat_siginfo {
 #define COMPAT_OFF_T_MAX	0x7fffffff
 #define COMPAT_OFF_T_MAX	0x7fffffff
 #define COMPAT_LOFF_T_MAX	0x7fffffffffffffffL
 #define COMPAT_LOFF_T_MAX	0x7fffffffffffffffL
 
 
+struct compat_ipc64_perm {
+	compat_key_t key;
+	__compat_uid_t uid;
+	__compat_gid_t gid;
+	__compat_uid_t cuid;
+	__compat_gid_t cgid;
+	unsigned short int __pad1;
+	compat_mode_t mode;
+	unsigned short int __pad2;
+	unsigned short int seq;
+	unsigned int __pad3;
+	unsigned long __unused1;	/* yes they really are 64bit pads */
+	unsigned long __unused2;
+};
+
+struct compat_semid64_ds {
+	struct compat_ipc64_perm sem_perm;
+	compat_time_t sem_otime;
+	unsigned int __unused1;
+	compat_time_t sem_ctime;
+	unsigned int __unused2;
+	compat_ulong_t sem_nsems;
+	compat_ulong_t __unused3;
+	compat_ulong_t __unused4;
+};
+
+struct compat_msqid64_ds {
+	struct compat_ipc64_perm msg_perm;
+	unsigned int __unused1;
+	compat_time_t msg_stime;
+	unsigned int __unused2;
+	compat_time_t msg_rtime;
+	unsigned int __unused3;
+	compat_time_t msg_ctime;
+	compat_ulong_t msg_cbytes;
+	compat_ulong_t msg_qnum;
+	compat_ulong_t msg_qbytes;
+	compat_pid_t msg_lspid;
+	compat_pid_t msg_lrpid;
+	compat_ulong_t __unused4;
+	compat_ulong_t __unused5;
+};
+
+struct compat_shmid64_ds {
+	struct compat_ipc64_perm shm_perm;
+	unsigned int __unused1;
+	compat_time_t shm_atime;
+	unsigned int __unused2;
+	compat_time_t shm_dtime;
+	unsigned int __unused3;
+	compat_time_t shm_ctime;
+	unsigned int __unused4;
+	compat_size_t shm_segsz;
+	compat_pid_t shm_cpid;
+	compat_pid_t shm_lpid;
+	compat_ulong_t shm_nattch;
+	compat_ulong_t __unused5;
+	compat_ulong_t __unused6;
+};
+
 /*
 /*
  * A pointer passed in from user mode. This should not
  * A pointer passed in from user mode. This should not
  * be used for syscall parameters, just declare them
  * be used for syscall parameters, just declare them

+ 0 - 50
arch/parisc/include/asm/compat_rt_sigframe.h

@@ -1,50 +0,0 @@
-#include <linux/compat.h>
-#include <linux/compat_siginfo.h>
-#include <asm/compat_ucontext.h>
-
-#ifndef _ASM_PARISC_COMPAT_RT_SIGFRAME_H
-#define _ASM_PARISC_COMPAT_RT_SIGFRAME_H
-
-/* In a deft move of uber-hackery, we decide to carry the top half of all
- * 64-bit registers in a non-portable, non-ABI, hidden structure.
- * Userspace can read the hidden structure if it *wants* but is never
- * guaranteed to be in the same place. Infact the uc_sigmask from the 
- * ucontext_t structure may push the hidden register file downards
- */
-struct compat_regfile {
-	/* Upper half of all the 64-bit registers that were truncated
-	   on a copy to a 32-bit userspace */
-	compat_int_t rf_gr[32];
-	compat_int_t rf_iasq[2];
-	compat_int_t rf_iaoq[2];
-	compat_int_t rf_sar;
-};
-
-#define COMPAT_SIGRETURN_TRAMP 4
-#define COMPAT_SIGRESTARTBLOCK_TRAMP 5 
-#define COMPAT_TRAMP_SIZE (COMPAT_SIGRETURN_TRAMP + COMPAT_SIGRESTARTBLOCK_TRAMP)
-
-struct compat_rt_sigframe {
-	/* XXX: Must match trampoline size in arch/parisc/kernel/signal.c 
-	        Secondary to that it must protect the ERESTART_RESTARTBLOCK
-		trampoline we left on the stack (we were bad and didn't 
-		change sp so we could run really fast.) */
-	compat_uint_t tramp[COMPAT_TRAMP_SIZE];
-	compat_siginfo_t info;
-	struct compat_ucontext uc;
-	/* Hidden location of truncated registers, *must* be last. */
-	struct compat_regfile regs; 
-};
-
-/*
- * The 32-bit ABI wants at least 48 bytes for a function call frame:
- * 16 bytes for arg0-arg3, and 32 bytes for magic (the only part of
- * which Linux/parisc uses is sp-20 for the saved return pointer...)
- * Then, the stack pointer must be rounded to a cache line (64 bytes).
- */
-#define SIGFRAME32		64
-#define FUNCTIONCALLFRAME32	48
-#define PARISC_RT_SIGFRAME_SIZE32					\
-	(((sizeof(struct compat_rt_sigframe) + FUNCTIONCALLFRAME32) + SIGFRAME32) & -SIGFRAME32)
-
-#endif

+ 1 - 1
arch/parisc/include/asm/elf.h

@@ -247,7 +247,7 @@ typedef unsigned long elf_greg_t;
 #define ELF_PLATFORM  ("PARISC\0")
 #define ELF_PLATFORM  ("PARISC\0")
 
 
 #define SET_PERSONALITY(ex) \
 #define SET_PERSONALITY(ex) \
-	current->personality = PER_LINUX; \
+	set_personality((current->personality & ~PER_MASK) | PER_LINUX); \
 	current->thread.map_base = DEFAULT_MAP_BASE; \
 	current->thread.map_base = DEFAULT_MAP_BASE; \
 	current->thread.task_size = DEFAULT_TASK_SIZE \
 	current->thread.task_size = DEFAULT_TASK_SIZE \
 
 

+ 2 - 2
arch/parisc/include/asm/floppy.h

@@ -157,10 +157,10 @@ static int fd_request_irq(void)
 {
 {
 	if(can_use_virtual_dma)
 	if(can_use_virtual_dma)
 		return request_irq(FLOPPY_IRQ, floppy_hardint,
 		return request_irq(FLOPPY_IRQ, floppy_hardint,
-				   IRQF_DISABLED, "floppy", NULL);
+				   0, "floppy", NULL);
 	else
 	else
 		return request_irq(FLOPPY_IRQ, floppy_interrupt,
 		return request_irq(FLOPPY_IRQ, floppy_interrupt,
-				   IRQF_DISABLED, "floppy", NULL);
+				   0, "floppy", NULL);
 }
 }
 
 
 static unsigned long dma_mem_alloc(unsigned long size)
 static unsigned long dma_mem_alloc(unsigned long size)

+ 3 - 4
arch/parisc/include/asm/mmzone.h

@@ -1,9 +1,10 @@
 #ifndef _PARISC_MMZONE_H
 #ifndef _PARISC_MMZONE_H
 #define _PARISC_MMZONE_H
 #define _PARISC_MMZONE_H
 
 
+#define MAX_PHYSMEM_RANGES 8 /* Fix the size for now (current known max is 3) */
+
 #ifdef CONFIG_DISCONTIGMEM
 #ifdef CONFIG_DISCONTIGMEM
 
 
-#define MAX_PHYSMEM_RANGES 8 /* Fix the size for now (current known max is 3) */
 extern int npmem_ranges;
 extern int npmem_ranges;
 
 
 struct node_map_data {
 struct node_map_data {
@@ -44,7 +45,7 @@ static inline int pfn_to_nid(unsigned long pfn)
 		return 0;
 		return 0;
 
 
 	i = pfn >> PFNNID_SHIFT;
 	i = pfn >> PFNNID_SHIFT;
-	BUG_ON(i >= sizeof(pfnnid_map) / sizeof(pfnnid_map[0]));
+	BUG_ON(i >= ARRAY_SIZE(pfnnid_map));
 	r = pfnnid_map[i];
 	r = pfnnid_map[i];
 	BUG_ON(r == 0xff);
 	BUG_ON(r == 0xff);
 
 
@@ -60,7 +61,5 @@ static inline int pfn_valid(int pfn)
 	return 0;
 	return 0;
 }
 }
 
 
-#else /* !CONFIG_DISCONTIGMEM */
-#define MAX_PHYSMEM_RANGES 	1 
 #endif
 #endif
 #endif /* _PARISC_MMZONE_H */
 #endif /* _PARISC_MMZONE_H */

+ 16 - 4
arch/parisc/include/asm/page.h

@@ -21,15 +21,27 @@
 #include <asm/types.h>
 #include <asm/types.h>
 #include <asm/cache.h>
 #include <asm/cache.h>
 
 
-#define clear_page(page)	memset((void *)(page), 0, PAGE_SIZE)
-#define copy_page(to,from)      copy_user_page_asm((void *)(to), (void *)(from))
+#define clear_page(page)	clear_page_asm((void *)(page))
+#define copy_page(to, from)	copy_page_asm((void *)(to), (void *)(from))
 
 
 struct page;
 struct page;
 
 
-void copy_user_page_asm(void *to, void *from);
+void clear_page_asm(void *page);
+void copy_page_asm(void *to, void *from);
+void clear_user_page(void *vto, unsigned long vaddr, struct page *pg);
 void copy_user_page(void *vto, void *vfrom, unsigned long vaddr,
 void copy_user_page(void *vto, void *vfrom, unsigned long vaddr,
 			   struct page *pg);
 			   struct page *pg);
-void clear_user_page(void *page, unsigned long vaddr, struct page *pg);
+
+/* #define CONFIG_PARISC_TMPALIAS */
+
+#ifdef CONFIG_PARISC_TMPALIAS
+void clear_user_highpage(struct page *page, unsigned long vaddr);
+#define clear_user_highpage clear_user_highpage
+struct vm_area_struct;
+void copy_user_highpage(struct page *to, struct page *from,
+	unsigned long vaddr, struct vm_area_struct *vma);
+#define __HAVE_ARCH_COPY_USER_HIGHPAGE
+#endif
 
 
 /*
 /*
  * These are used to make use of C type-checking..
  * These are used to make use of C type-checking..

+ 10 - 3
arch/parisc/include/asm/pgtable.h

@@ -12,11 +12,10 @@
 
 
 #include <linux/bitops.h>
 #include <linux/bitops.h>
 #include <linux/spinlock.h>
 #include <linux/spinlock.h>
+#include <linux/mm_types.h>
 #include <asm/processor.h>
 #include <asm/processor.h>
 #include <asm/cache.h>
 #include <asm/cache.h>
 
 
-struct vm_area_struct;
-
 /*
 /*
  * kern_addr_valid(ADDR) tests if ADDR is pointing to valid kernel
  * kern_addr_valid(ADDR) tests if ADDR is pointing to valid kernel
  * memory.  For the return value to be meaningful, ADDR must be >=
  * memory.  For the return value to be meaningful, ADDR must be >=
@@ -40,7 +39,14 @@ struct vm_area_struct;
         do{                                                     \
         do{                                                     \
                 *(pteptr) = (pteval);                           \
                 *(pteptr) = (pteval);                           \
         } while(0)
         } while(0)
-#define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval)
+
+extern void purge_tlb_entries(struct mm_struct *, unsigned long);
+
+#define set_pte_at(mm, addr, ptep, pteval)                      \
+	do {                                                    \
+		set_pte(ptep, pteval);                          \
+		purge_tlb_entries(mm, addr);                    \
+	} while (0)
 
 
 #endif /* !__ASSEMBLY__ */
 #endif /* !__ASSEMBLY__ */
 
 
@@ -466,6 +472,7 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr,
 		old = pte_val(*ptep);
 		old = pte_val(*ptep);
 		new = pte_val(pte_wrprotect(__pte (old)));
 		new = pte_val(pte_wrprotect(__pte (old)));
 	} while (cmpxchg((unsigned long *) ptep, old, new) != old);
 	} while (cmpxchg((unsigned long *) ptep, old, new) != old);
+	purge_tlb_entries(mm, addr);
 #else
 #else
 	pte_t old_pte = *ptep;
 	pte_t old_pte = *ptep;
 	set_pte_at(mm, addr, ptep, pte_wrprotect(old_pte));
 	set_pte_at(mm, addr, ptep, pte_wrprotect(old_pte));

+ 0 - 4
arch/parisc/include/asm/signal.h

@@ -3,16 +3,12 @@
 
 
 #include <uapi/asm/signal.h>
 #include <uapi/asm/signal.h>
 
 
-
 #define _NSIG		64
 #define _NSIG		64
 /* bits-per-word, where word apparently means 'long' not 'int' */
 /* bits-per-word, where word apparently means 'long' not 'int' */
 #define _NSIG_BPW	BITS_PER_LONG
 #define _NSIG_BPW	BITS_PER_LONG
 #define _NSIG_WORDS	(_NSIG / _NSIG_BPW)
 #define _NSIG_WORDS	(_NSIG / _NSIG_BPW)
 
 
 # ifndef __ASSEMBLY__
 # ifndef __ASSEMBLY__
-#ifdef CONFIG_64BIT
-#else
-#endif
 
 
 /* Most things should be clean enough to redefine this at will, if care
 /* Most things should be clean enough to redefine this at will, if care
    is taken to make libc match.  */
    is taken to make libc match.  */

+ 2 - 0
arch/parisc/include/asm/unistd.h

@@ -149,6 +149,7 @@ type name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, type5 arg5)	\
 #define __ARCH_WANT_SYS_SIGNAL
 #define __ARCH_WANT_SYS_SIGNAL
 #define __ARCH_WANT_SYS_TIME
 #define __ARCH_WANT_SYS_TIME
 #define __ARCH_WANT_COMPAT_SYS_TIME
 #define __ARCH_WANT_COMPAT_SYS_TIME
+#define __ARCH_WANT_COMPAT_SYS_SCHED_RR_GET_INTERVAL
 #define __ARCH_WANT_SYS_UTIME
 #define __ARCH_WANT_SYS_UTIME
 #define __ARCH_WANT_SYS_WAITPID
 #define __ARCH_WANT_SYS_WAITPID
 #define __ARCH_WANT_SYS_SOCKETCALL
 #define __ARCH_WANT_SYS_SOCKETCALL
@@ -166,6 +167,7 @@ type name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, type5 arg5)	\
 #define __ARCH_WANT_SYS_FORK
 #define __ARCH_WANT_SYS_FORK
 #define __ARCH_WANT_SYS_VFORK
 #define __ARCH_WANT_SYS_VFORK
 #define __ARCH_WANT_SYS_CLONE
 #define __ARCH_WANT_SYS_CLONE
+#define __ARCH_WANT_COMPAT_SYS_SENDFILE
 
 
 #endif /* __ASSEMBLY__ */
 #endif /* __ASSEMBLY__ */
 
 

+ 5 - 1
arch/parisc/include/uapi/asm/unistd.h

@@ -822,8 +822,12 @@
 #define __NR_syncfs		(__NR_Linux + 327)
 #define __NR_syncfs		(__NR_Linux + 327)
 #define __NR_setns		(__NR_Linux + 328)
 #define __NR_setns		(__NR_Linux + 328)
 #define __NR_sendmmsg		(__NR_Linux + 329)
 #define __NR_sendmmsg		(__NR_Linux + 329)
+#define __NR_process_vm_readv	(__NR_Linux + 330)
+#define __NR_process_vm_writev	(__NR_Linux + 331)
+#define __NR_kcmp		(__NR_Linux + 332)
+#define __NR_finit_module	(__NR_Linux + 333)
 
 
-#define __NR_Linux_syscalls	(__NR_sendmmsg + 1)
+#define __NR_Linux_syscalls	(__NR_finit_module + 1)
 
 
 
 
 #define __IGNORE_select		/* newselect */
 #define __IGNORE_select		/* newselect */

+ 186 - 35
arch/parisc/kernel/cache.c

@@ -267,9 +267,11 @@ static inline void
 __flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr,
 __flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr,
 		   unsigned long physaddr)
 		   unsigned long physaddr)
 {
 {
+	preempt_disable();
 	flush_dcache_page_asm(physaddr, vmaddr);
 	flush_dcache_page_asm(physaddr, vmaddr);
 	if (vma->vm_flags & VM_EXEC)
 	if (vma->vm_flags & VM_EXEC)
 		flush_icache_page_asm(physaddr, vmaddr);
 		flush_icache_page_asm(physaddr, vmaddr);
+	preempt_enable();
 }
 }
 
 
 void flush_dcache_page(struct page *page)
 void flush_dcache_page(struct page *page)
@@ -329,17 +331,6 @@ EXPORT_SYMBOL(flush_kernel_dcache_page_asm);
 EXPORT_SYMBOL(flush_data_cache_local);
 EXPORT_SYMBOL(flush_data_cache_local);
 EXPORT_SYMBOL(flush_kernel_icache_range_asm);
 EXPORT_SYMBOL(flush_kernel_icache_range_asm);
 
 
-void clear_user_page_asm(void *page, unsigned long vaddr)
-{
-	unsigned long flags;
-	/* This function is implemented in assembly in pacache.S */
-	extern void __clear_user_page_asm(void *page, unsigned long vaddr);
-
-	purge_tlb_start(flags);
-	__clear_user_page_asm(page, vaddr);
-	purge_tlb_end(flags);
-}
-
 #define FLUSH_THRESHOLD 0x80000 /* 0.5MB */
 #define FLUSH_THRESHOLD 0x80000 /* 0.5MB */
 int parisc_cache_flush_threshold __read_mostly = FLUSH_THRESHOLD;
 int parisc_cache_flush_threshold __read_mostly = FLUSH_THRESHOLD;
 
 
@@ -373,20 +364,9 @@ void __init parisc_setup_cache_timing(void)
 	printk(KERN_INFO "Setting cache flush threshold to %x (%d CPUs online)\n", parisc_cache_flush_threshold, num_online_cpus());
 	printk(KERN_INFO "Setting cache flush threshold to %x (%d CPUs online)\n", parisc_cache_flush_threshold, num_online_cpus());
 }
 }
 
 
-extern void purge_kernel_dcache_page(unsigned long);
-extern void clear_user_page_asm(void *page, unsigned long vaddr);
-
-void clear_user_page(void *page, unsigned long vaddr, struct page *pg)
-{
-	unsigned long flags;
-
-	purge_kernel_dcache_page((unsigned long)page);
-	purge_tlb_start(flags);
-	pdtlb_kernel(page);
-	purge_tlb_end(flags);
-	clear_user_page_asm(page, vaddr);
-}
-EXPORT_SYMBOL(clear_user_page);
+extern void purge_kernel_dcache_page_asm(unsigned long);
+extern void clear_user_page_asm(void *, unsigned long);
+extern void copy_user_page_asm(void *, void *, unsigned long);
 
 
 void flush_kernel_dcache_page_addr(void *addr)
 void flush_kernel_dcache_page_addr(void *addr)
 {
 {
@@ -399,11 +379,26 @@ void flush_kernel_dcache_page_addr(void *addr)
 }
 }
 EXPORT_SYMBOL(flush_kernel_dcache_page_addr);
 EXPORT_SYMBOL(flush_kernel_dcache_page_addr);
 
 
+void clear_user_page(void *vto, unsigned long vaddr, struct page *page)
+{
+	clear_page_asm(vto);
+	if (!parisc_requires_coherency())
+		flush_kernel_dcache_page_asm(vto);
+}
+EXPORT_SYMBOL(clear_user_page);
+
 void copy_user_page(void *vto, void *vfrom, unsigned long vaddr,
 void copy_user_page(void *vto, void *vfrom, unsigned long vaddr,
-		    struct page *pg)
+	struct page *pg)
 {
 {
-	/* no coherency needed (all in kmap/kunmap) */
-	copy_user_page_asm(vto, vfrom);
+	/* Copy using kernel mapping.  No coherency is needed
+	   (all in kmap/kunmap) on machines that don't support
+	   non-equivalent aliasing.  However, the `from' page
+	   needs to be flushed before it can be accessed through
+	   the kernel mapping. */
+	preempt_disable();
+	flush_dcache_page_asm(__pa(vfrom), vaddr);
+	preempt_enable();
+	copy_page_asm(vto, vfrom);
 	if (!parisc_requires_coherency())
 	if (!parisc_requires_coherency())
 		flush_kernel_dcache_page_asm(vto);
 		flush_kernel_dcache_page_asm(vto);
 }
 }
@@ -419,6 +414,24 @@ void kunmap_parisc(void *addr)
 EXPORT_SYMBOL(kunmap_parisc);
 EXPORT_SYMBOL(kunmap_parisc);
 #endif
 #endif
 
 
+void purge_tlb_entries(struct mm_struct *mm, unsigned long addr)
+{
+	unsigned long flags;
+
+	/* Note: purge_tlb_entries can be called at startup with
+	   no context.  */
+
+	/* Disable preemption while we play with %sr1.  */
+	preempt_disable();
+	mtsp(mm->context, 1);
+	purge_tlb_start(flags);
+	pdtlb(addr);
+	pitlb(addr);
+	purge_tlb_end(flags);
+	preempt_enable();
+}
+EXPORT_SYMBOL(purge_tlb_entries);
+
 void __flush_tlb_range(unsigned long sid, unsigned long start,
 void __flush_tlb_range(unsigned long sid, unsigned long start,
 		       unsigned long end)
 		       unsigned long end)
 {
 {
@@ -458,8 +471,66 @@ void flush_cache_all(void)
 	on_each_cpu(cacheflush_h_tmp_function, NULL, 1);
 	on_each_cpu(cacheflush_h_tmp_function, NULL, 1);
 }
 }
 
 
+static inline unsigned long mm_total_size(struct mm_struct *mm)
+{
+	struct vm_area_struct *vma;
+	unsigned long usize = 0;
+
+	for (vma = mm->mmap; vma; vma = vma->vm_next)
+		usize += vma->vm_end - vma->vm_start;
+	return usize;
+}
+
+static inline pte_t *get_ptep(pgd_t *pgd, unsigned long addr)
+{
+	pte_t *ptep = NULL;
+
+	if (!pgd_none(*pgd)) {
+		pud_t *pud = pud_offset(pgd, addr);
+		if (!pud_none(*pud)) {
+			pmd_t *pmd = pmd_offset(pud, addr);
+			if (!pmd_none(*pmd))
+				ptep = pte_offset_map(pmd, addr);
+		}
+	}
+	return ptep;
+}
+
 void flush_cache_mm(struct mm_struct *mm)
 void flush_cache_mm(struct mm_struct *mm)
 {
 {
+	/* Flushing the whole cache on each cpu takes forever on
+	   rp3440, etc.  So, avoid it if the mm isn't too big.  */
+	if (mm_total_size(mm) < parisc_cache_flush_threshold) {
+		struct vm_area_struct *vma;
+
+		if (mm->context == mfsp(3)) {
+			for (vma = mm->mmap; vma; vma = vma->vm_next) {
+				flush_user_dcache_range_asm(vma->vm_start,
+					vma->vm_end);
+				if (vma->vm_flags & VM_EXEC)
+					flush_user_icache_range_asm(
+					  vma->vm_start, vma->vm_end);
+			}
+		} else {
+			pgd_t *pgd = mm->pgd;
+
+			for (vma = mm->mmap; vma; vma = vma->vm_next) {
+				unsigned long addr;
+
+				for (addr = vma->vm_start; addr < vma->vm_end;
+				     addr += PAGE_SIZE) {
+					pte_t *ptep = get_ptep(pgd, addr);
+					if (ptep != NULL) {
+						pte_t pte = *ptep;
+						__flush_cache_page(vma, addr,
+						  page_to_phys(pte_page(pte)));
+					}
+				}
+			}
+		}
+		return;
+	}
+
 #ifdef CONFIG_SMP
 #ifdef CONFIG_SMP
 	flush_cache_all();
 	flush_cache_all();
 #else
 #else
@@ -485,20 +556,36 @@ flush_user_icache_range(unsigned long start, unsigned long end)
 		flush_instruction_cache();
 		flush_instruction_cache();
 }
 }
 
 
-
 void flush_cache_range(struct vm_area_struct *vma,
 void flush_cache_range(struct vm_area_struct *vma,
 		unsigned long start, unsigned long end)
 		unsigned long start, unsigned long end)
 {
 {
-	int sr3;
-
 	BUG_ON(!vma->vm_mm->context);
 	BUG_ON(!vma->vm_mm->context);
 
 
-	sr3 = mfsp(3);
-	if (vma->vm_mm->context == sr3) {
-		flush_user_dcache_range(start,end);
-		flush_user_icache_range(start,end);
+	if ((end - start) < parisc_cache_flush_threshold) {
+		if (vma->vm_mm->context == mfsp(3)) {
+			flush_user_dcache_range_asm(start, end);
+			if (vma->vm_flags & VM_EXEC)
+				flush_user_icache_range_asm(start, end);
+		} else {
+			unsigned long addr;
+			pgd_t *pgd = vma->vm_mm->pgd;
+
+			for (addr = start & PAGE_MASK; addr < end;
+			     addr += PAGE_SIZE) {
+				pte_t *ptep = get_ptep(pgd, addr);
+				if (ptep != NULL) {
+					pte_t pte = *ptep;
+					flush_cache_page(vma,
+					   addr, pte_pfn(pte));
+				}
+			}
+		}
 	} else {
 	} else {
+#ifdef CONFIG_SMP
 		flush_cache_all();
 		flush_cache_all();
+#else
+		flush_cache_all_local();
+#endif
 	}
 	}
 }
 }
 
 
@@ -511,3 +598,67 @@ flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long
 	__flush_cache_page(vma, vmaddr, page_to_phys(pfn_to_page(pfn)));
 	__flush_cache_page(vma, vmaddr, page_to_phys(pfn_to_page(pfn)));
 
 
 }
 }
+
+#ifdef CONFIG_PARISC_TMPALIAS
+
+void clear_user_highpage(struct page *page, unsigned long vaddr)
+{
+	void *vto;
+	unsigned long flags;
+
+	/* Clear using TMPALIAS region.  The page doesn't need to
+	   be flushed but the kernel mapping needs to be purged.  */
+
+	vto = kmap_atomic(page, KM_USER0);
+
+	/* The PA-RISC 2.0 Architecture book states on page F-6:
+	   "Before a write-capable translation is enabled, *all*
+	   non-equivalently-aliased translations must be removed
+	   from the page table and purged from the TLB.  (Note
+	   that the caches are not required to be flushed at this
+	   time.)  Before any non-equivalent aliased translation
+	   is re-enabled, the virtual address range for the writeable
+	   page (the entire page) must be flushed from the cache,
+	   and the write-capable translation removed from the page
+	   table and purged from the TLB."  */
+
+	purge_kernel_dcache_page_asm((unsigned long)vto);
+	purge_tlb_start(flags);
+	pdtlb_kernel(vto);
+	purge_tlb_end(flags);
+	preempt_disable();
+	clear_user_page_asm(vto, vaddr);
+	preempt_enable();
+
+	pagefault_enable();		/* kunmap_atomic(addr, KM_USER0); */
+}
+
+void copy_user_highpage(struct page *to, struct page *from,
+	unsigned long vaddr, struct vm_area_struct *vma)
+{
+	void *vfrom, *vto;
+	unsigned long flags;
+
+	/* Copy using TMPALIAS region.  This has the advantage
+	   that the `from' page doesn't need to be flushed.  However,
+	   the `to' page must be flushed in copy_user_page_asm since
+	   it can be used to bring in executable code.  */
+
+	vfrom = kmap_atomic(from, KM_USER0);
+	vto = kmap_atomic(to, KM_USER1);
+
+	purge_kernel_dcache_page_asm((unsigned long)vto);
+	purge_tlb_start(flags);
+	pdtlb_kernel(vto);
+	pdtlb_kernel(vfrom);
+	purge_tlb_end(flags);
+	preempt_disable();
+	copy_user_page_asm(vto, vfrom, vaddr);
+	flush_dcache_page_asm(__pa(vto), vaddr);
+	preempt_enable();
+
+	pagefault_enable();		/* kunmap_atomic(addr, KM_USER1); */
+	pagefault_enable();		/* kunmap_atomic(addr, KM_USER0); */
+}
+
+#endif /* CONFIG_PARISC_TMPALIAS */

+ 2 - 2
arch/parisc/kernel/entry.S

@@ -483,7 +483,7 @@
 	 * B <-> _PAGE_DMB (memory break)
 	 * B <-> _PAGE_DMB (memory break)
 	 *
 	 *
 	 * Then incredible subtlety: The access rights are
 	 * Then incredible subtlety: The access rights are
-	 * _PAGE_GATEWAY _PAGE_EXEC _PAGE_READ
+	 * _PAGE_GATEWAY, _PAGE_EXEC and _PAGE_WRITE
 	 * See 3-14 of the parisc 2.0 manual
 	 * See 3-14 of the parisc 2.0 manual
 	 *
 	 *
 	 * Finally, _PAGE_READ goes in the top bit of PL1 (so we
 	 * Finally, _PAGE_READ goes in the top bit of PL1 (so we
@@ -493,7 +493,7 @@
 
 
 	/* PAGE_USER indicates the page can be read with user privileges,
 	/* PAGE_USER indicates the page can be read with user privileges,
 	 * so deposit X1|11 to PL1|PL2 (remember the upper bit of PL1
 	 * so deposit X1|11 to PL1|PL2 (remember the upper bit of PL1
-	 * contains _PAGE_READ */
+	 * contains _PAGE_READ) */
 	extrd,u,*=      \pte,_PAGE_USER_BIT+32,1,%r0
 	extrd,u,*=      \pte,_PAGE_USER_BIT+32,1,%r0
 	depdi		7,11,3,\prot
 	depdi		7,11,3,\prot
 	/* If we're a gateway page, drop PL2 back to zero for promotion
 	/* If we're a gateway page, drop PL2 back to zero for promotion

+ 2 - 0
arch/parisc/kernel/inventory.c

@@ -186,12 +186,14 @@ pat_query_module(ulong pcell_loc, ulong mod_index)
 
 
 	if (status != PDC_OK) {
 	if (status != PDC_OK) {
 		/* no more cell modules or error */
 		/* no more cell modules or error */
+		kfree(pa_pdc_cell);
 		return status;
 		return status;
 	}
 	}
 
 
 	temp = pa_pdc_cell->cba;
 	temp = pa_pdc_cell->cba;
 	dev = alloc_pa_dev(PAT_GET_CBA(temp), &(pa_pdc_cell->mod_path));
 	dev = alloc_pa_dev(PAT_GET_CBA(temp), &(pa_pdc_cell->mod_path));
 	if (!dev) {
 	if (!dev) {
+		kfree(pa_pdc_cell);
 		return PDC_OK;
 		return PDC_OK;
 	}
 	}
 
 

+ 2 - 2
arch/parisc/kernel/irq.c

@@ -379,14 +379,14 @@ void do_cpu_irq_mask(struct pt_regs *regs)
 static struct irqaction timer_action = {
 static struct irqaction timer_action = {
 	.handler = timer_interrupt,
 	.handler = timer_interrupt,
 	.name = "timer",
 	.name = "timer",
-	.flags = IRQF_DISABLED | IRQF_TIMER | IRQF_PERCPU | IRQF_IRQPOLL,
+	.flags = IRQF_TIMER | IRQF_PERCPU | IRQF_IRQPOLL,
 };
 };
 
 
 #ifdef CONFIG_SMP
 #ifdef CONFIG_SMP
 static struct irqaction ipi_action = {
 static struct irqaction ipi_action = {
 	.handler = ipi_interrupt,
 	.handler = ipi_interrupt,
 	.name = "IPI",
 	.name = "IPI",
-	.flags = IRQF_DISABLED | IRQF_PERCPU,
+	.flags = IRQF_PERCPU,
 };
 };
 #endif
 #endif
 
 

+ 290 - 45
arch/parisc/kernel/pacache.S

@@ -199,7 +199,6 @@ ENTRY(flush_instruction_cache_local)
 	.callinfo NO_CALLS
 	.callinfo NO_CALLS
 	.entry
 	.entry
 
 
-	mtsp		%r0, %sr1
 	load32		cache_info, %r1
 	load32		cache_info, %r1
 
 
 	/* Flush Instruction Cache */
 	/* Flush Instruction Cache */
@@ -208,7 +207,8 @@ ENTRY(flush_instruction_cache_local)
 	LDREG		ICACHE_STRIDE(%r1), %arg1
 	LDREG		ICACHE_STRIDE(%r1), %arg1
 	LDREG		ICACHE_COUNT(%r1), %arg2
 	LDREG		ICACHE_COUNT(%r1), %arg2
 	LDREG		ICACHE_LOOP(%r1), %arg3
 	LDREG		ICACHE_LOOP(%r1), %arg3
-	rsm             PSW_SM_I, %r22		/* No mmgt ops during loop*/
+	rsm		PSW_SM_I, %r22		/* No mmgt ops during loop*/
+	mtsp		%r0, %sr1
 	addib,COND(=)		-1, %arg3, fioneloop	/* Preadjust and test */
 	addib,COND(=)		-1, %arg3, fioneloop	/* Preadjust and test */
 	movb,<,n	%arg3, %r31, fisync	/* If loop < 0, do sync */
 	movb,<,n	%arg3, %r31, fisync	/* If loop < 0, do sync */
 
 
@@ -220,7 +220,33 @@ fimanyloop:					/* Loop if LOOP >= 2 */
 	addib,COND(<=),n	-1, %arg2, fisync	/* Outer loop decr */
 	addib,COND(<=),n	-1, %arg2, fisync	/* Outer loop decr */
 
 
 fioneloop:					/* Loop if LOOP = 1 */
 fioneloop:					/* Loop if LOOP = 1 */
-	addib,COND(>)		-1, %arg2, fioneloop	/* Outer loop count decr */
+	/* Some implementations may flush with a single fice instruction */
+	cmpib,COND(>>=),n	15, %arg2, fioneloop2
+
+fioneloop1:
+	fice,m		%arg1(%sr1, %arg0)
+	fice,m		%arg1(%sr1, %arg0)
+	fice,m		%arg1(%sr1, %arg0)
+	fice,m		%arg1(%sr1, %arg0)
+	fice,m		%arg1(%sr1, %arg0)
+	fice,m		%arg1(%sr1, %arg0)
+	fice,m		%arg1(%sr1, %arg0)
+	fice,m		%arg1(%sr1, %arg0)
+	fice,m		%arg1(%sr1, %arg0)
+	fice,m		%arg1(%sr1, %arg0)
+	fice,m		%arg1(%sr1, %arg0)
+	fice,m		%arg1(%sr1, %arg0)
+	fice,m		%arg1(%sr1, %arg0)
+	fice,m		%arg1(%sr1, %arg0)
+	fice,m		%arg1(%sr1, %arg0)
+	addib,COND(>)	-16, %arg2, fioneloop1
+	fice,m		%arg1(%sr1, %arg0)
+
+	/* Check if done */
+	cmpb,COND(=),n	%arg2, %r0, fisync	/* Predict branch taken */
+
+fioneloop2:
+	addib,COND(>)	-1, %arg2, fioneloop2	/* Outer loop count decr */
 	fice,m		%arg1(%sr1, %arg0)	/* Fice for one loop */
 	fice,m		%arg1(%sr1, %arg0)	/* Fice for one loop */
 
 
 fisync:
 fisync:
@@ -240,8 +266,7 @@ ENTRY(flush_data_cache_local)
 	.callinfo NO_CALLS
 	.callinfo NO_CALLS
 	.entry
 	.entry
 
 
-	mtsp		%r0, %sr1
-	load32 		cache_info, %r1
+	load32		cache_info, %r1
 
 
 	/* Flush Data Cache */
 	/* Flush Data Cache */
 
 
@@ -249,7 +274,8 @@ ENTRY(flush_data_cache_local)
 	LDREG		DCACHE_STRIDE(%r1), %arg1
 	LDREG		DCACHE_STRIDE(%r1), %arg1
 	LDREG		DCACHE_COUNT(%r1), %arg2
 	LDREG		DCACHE_COUNT(%r1), %arg2
 	LDREG		DCACHE_LOOP(%r1), %arg3
 	LDREG		DCACHE_LOOP(%r1), %arg3
-	rsm		PSW_SM_I, %r22
+	rsm		PSW_SM_I, %r22		/* No mmgt ops during loop*/
+	mtsp		%r0, %sr1
 	addib,COND(=)		-1, %arg3, fdoneloop	/* Preadjust and test */
 	addib,COND(=)		-1, %arg3, fdoneloop	/* Preadjust and test */
 	movb,<,n	%arg3, %r31, fdsync	/* If loop < 0, do sync */
 	movb,<,n	%arg3, %r31, fdsync	/* If loop < 0, do sync */
 
 
@@ -261,7 +287,33 @@ fdmanyloop:					/* Loop if LOOP >= 2 */
 	addib,COND(<=),n	-1, %arg2, fdsync	/* Outer loop decr */
 	addib,COND(<=),n	-1, %arg2, fdsync	/* Outer loop decr */
 
 
 fdoneloop:					/* Loop if LOOP = 1 */
 fdoneloop:					/* Loop if LOOP = 1 */
-	addib,COND(>)		-1, %arg2, fdoneloop	/* Outer loop count decr */
+	/* Some implementations may flush with a single fdce instruction */
+	cmpib,COND(>>=),n	15, %arg2, fdoneloop2
+
+fdoneloop1:
+	fdce,m		%arg1(%sr1, %arg0)
+	fdce,m		%arg1(%sr1, %arg0)
+	fdce,m		%arg1(%sr1, %arg0)
+	fdce,m		%arg1(%sr1, %arg0)
+	fdce,m		%arg1(%sr1, %arg0)
+	fdce,m		%arg1(%sr1, %arg0)
+	fdce,m		%arg1(%sr1, %arg0)
+	fdce,m		%arg1(%sr1, %arg0)
+	fdce,m		%arg1(%sr1, %arg0)
+	fdce,m		%arg1(%sr1, %arg0)
+	fdce,m		%arg1(%sr1, %arg0)
+	fdce,m		%arg1(%sr1, %arg0)
+	fdce,m		%arg1(%sr1, %arg0)
+	fdce,m		%arg1(%sr1, %arg0)
+	fdce,m		%arg1(%sr1, %arg0)
+	addib,COND(>)	-16, %arg2, fdoneloop1
+	fdce,m		%arg1(%sr1, %arg0)
+
+	/* Check if done */
+	cmpb,COND(=),n	%arg2, %r0, fdsync	/* Predict branch taken */
+
+fdoneloop2:
+	addib,COND(>)	-1, %arg2, fdoneloop2	/* Outer loop count decr */
 	fdce,m		%arg1(%sr1, %arg0)	/* Fdce for one loop */
 	fdce,m		%arg1(%sr1, %arg0)	/* Fdce for one loop */
 
 
 fdsync:
 fdsync:
@@ -277,7 +329,104 @@ ENDPROC(flush_data_cache_local)
 
 
 	.align	16
 	.align	16
 
 
-ENTRY(copy_user_page_asm)
+/* Macros to serialize TLB purge operations on SMP.  */
+
+	.macro	tlb_lock	la,flags,tmp
+#ifdef CONFIG_SMP
+	ldil		L%pa_tlb_lock,%r1
+	ldo		R%pa_tlb_lock(%r1),\la
+	rsm		PSW_SM_I,\flags
+1:	LDCW		0(\la),\tmp
+	cmpib,<>,n	0,\tmp,3f
+2:	ldw		0(\la),\tmp
+	cmpb,<>		%r0,\tmp,1b
+	nop
+	b,n		2b
+3:
+#endif
+	.endm
+
+	.macro	tlb_unlock	la,flags,tmp
+#ifdef CONFIG_SMP
+	ldi		1,\tmp
+	stw		\tmp,0(\la)
+	mtsm		\flags
+#endif
+	.endm
+
+/* Clear page using kernel mapping.  */
+
+ENTRY(clear_page_asm)
+	.proc
+	.callinfo NO_CALLS
+	.entry
+
+#ifdef CONFIG_64BIT
+
+	/* Unroll the loop.  */
+	ldi		(PAGE_SIZE / 128), %r1
+
+1:
+	std		%r0, 0(%r26)
+	std		%r0, 8(%r26)
+	std		%r0, 16(%r26)
+	std		%r0, 24(%r26)
+	std		%r0, 32(%r26)
+	std		%r0, 40(%r26)
+	std		%r0, 48(%r26)
+	std		%r0, 56(%r26)
+	std		%r0, 64(%r26)
+	std		%r0, 72(%r26)
+	std		%r0, 80(%r26)
+	std		%r0, 88(%r26)
+	std		%r0, 96(%r26)
+	std		%r0, 104(%r26)
+	std		%r0, 112(%r26)
+	std		%r0, 120(%r26)
+
+	/* Note reverse branch hint for addib is taken.  */
+	addib,COND(>),n	-1, %r1, 1b
+	ldo		128(%r26), %r26
+
+#else
+
+	/*
+	 * Note that until (if) we start saving the full 64-bit register
+	 * values on interrupt, we can't use std on a 32 bit kernel.
+	 */
+	ldi		(PAGE_SIZE / 64), %r1
+
+1:
+	stw		%r0, 0(%r26)
+	stw		%r0, 4(%r26)
+	stw		%r0, 8(%r26)
+	stw		%r0, 12(%r26)
+	stw		%r0, 16(%r26)
+	stw		%r0, 20(%r26)
+	stw		%r0, 24(%r26)
+	stw		%r0, 28(%r26)
+	stw		%r0, 32(%r26)
+	stw		%r0, 36(%r26)
+	stw		%r0, 40(%r26)
+	stw		%r0, 44(%r26)
+	stw		%r0, 48(%r26)
+	stw		%r0, 52(%r26)
+	stw		%r0, 56(%r26)
+	stw		%r0, 60(%r26)
+
+	addib,COND(>),n	-1, %r1, 1b
+	ldo		64(%r26), %r26
+#endif
+	bv		%r0(%r2)
+	nop
+	.exit
+
+	.procend
+ENDPROC(clear_page_asm)
+
+/* Copy page using kernel mapping.  */
+
+ENTRY(copy_page_asm)
 	.proc
 	.proc
 	.callinfo NO_CALLS
 	.callinfo NO_CALLS
 	.entry
 	.entry
@@ -285,18 +434,14 @@ ENTRY(copy_user_page_asm)
 #ifdef CONFIG_64BIT
 #ifdef CONFIG_64BIT
 	/* PA8x00 CPUs can consume 2 loads or 1 store per cycle.
 	/* PA8x00 CPUs can consume 2 loads or 1 store per cycle.
 	 * Unroll the loop by hand and arrange insn appropriately.
 	 * Unroll the loop by hand and arrange insn appropriately.
-	 * GCC probably can do this just as well.
+	 * Prefetch doesn't improve performance on rp3440.
+	 * GCC probably can do this just as well...
 	 */
 	 */
 
 
-	ldd		0(%r25), %r19
 	ldi		(PAGE_SIZE / 128), %r1
 	ldi		(PAGE_SIZE / 128), %r1
 
 
-	ldw		64(%r25), %r0		/* prefetch 1 cacheline ahead */
-	ldw		128(%r25), %r0		/* prefetch 2 */
-
-1:	ldd		8(%r25), %r20
-	ldw		192(%r25), %r0		/* prefetch 3 */
-	ldw		256(%r25), %r0		/* prefetch 4 */
+1:	ldd		0(%r25), %r19
+	ldd		8(%r25), %r20
 
 
 	ldd		16(%r25), %r21
 	ldd		16(%r25), %r21
 	ldd		24(%r25), %r22
 	ldd		24(%r25), %r22
@@ -330,20 +475,16 @@ ENTRY(copy_user_page_asm)
 
 
 	ldd		112(%r25), %r21
 	ldd		112(%r25), %r21
 	ldd		120(%r25), %r22
 	ldd		120(%r25), %r22
+	ldo		128(%r25), %r25
 	std		%r19, 96(%r26)
 	std		%r19, 96(%r26)
 	std		%r20, 104(%r26)
 	std		%r20, 104(%r26)
 
 
-	ldo		128(%r25), %r25
 	std		%r21, 112(%r26)
 	std		%r21, 112(%r26)
 	std		%r22, 120(%r26)
 	std		%r22, 120(%r26)
-	ldo		128(%r26), %r26
 
 
-	/* conditional branches nullify on forward taken branch, and on
-	 * non-taken backward branch. Note that .+4 is a backwards branch.
-	 * The ldd should only get executed if the branch is taken.
-	 */
-	addib,COND(>),n	-1, %r1, 1b		/* bundle 10 */
-	ldd		0(%r25), %r19		/* start next loads */
+	/* Note reverse branch hint for addib is taken.  */
+	addib,COND(>),n	-1, %r1, 1b
+	ldo		128(%r26), %r26
 
 
 #else
 #else
 
 
@@ -399,7 +540,7 @@ ENTRY(copy_user_page_asm)
 	.exit
 	.exit
 
 
 	.procend
 	.procend
-ENDPROC(copy_user_page_asm)
+ENDPROC(copy_page_asm)
 
 
 /*
 /*
  * NOTE: Code in clear_user_page has a hard coded dependency on the
  * NOTE: Code in clear_user_page has a hard coded dependency on the
@@ -422,8 +563,6 @@ ENDPROC(copy_user_page_asm)
  *          %r23 physical page (shifted for tlb insert) of "from" translation
  *          %r23 physical page (shifted for tlb insert) of "from" translation
  */
  */
 
 
-#if 0
-
 	/*
 	/*
 	 * We can't do this since copy_user_page is used to bring in
 	 * We can't do this since copy_user_page is used to bring in
 	 * file data that might have instructions. Since the data would
 	 * file data that might have instructions. Since the data would
@@ -435,6 +574,7 @@ ENDPROC(copy_user_page_asm)
 	 * use it if more information is passed into copy_user_page().
 	 * use it if more information is passed into copy_user_page().
 	 * Have to do some measurements to see if it is worthwhile to
 	 * Have to do some measurements to see if it is worthwhile to
 	 * lobby for such a change.
 	 * lobby for such a change.
+	 *
 	 */
 	 */
 
 
 ENTRY(copy_user_page_asm)
 ENTRY(copy_user_page_asm)
@@ -442,16 +582,21 @@ ENTRY(copy_user_page_asm)
 	.callinfo NO_CALLS
 	.callinfo NO_CALLS
 	.entry
 	.entry
 
 
+	/* Convert virtual `to' and `from' addresses to physical addresses.
+	   Move `from' physical address to non shadowed register.  */
 	ldil		L%(__PAGE_OFFSET), %r1
 	ldil		L%(__PAGE_OFFSET), %r1
 	sub		%r26, %r1, %r26
 	sub		%r26, %r1, %r26
-	sub		%r25, %r1, %r23		/* move physical addr into non shadowed reg */
+	sub		%r25, %r1, %r23
 
 
 	ldil		L%(TMPALIAS_MAP_START), %r28
 	ldil		L%(TMPALIAS_MAP_START), %r28
 	/* FIXME for different page sizes != 4k */
 	/* FIXME for different page sizes != 4k */
 #ifdef CONFIG_64BIT
 #ifdef CONFIG_64BIT
-	extrd,u		%r26,56,32, %r26		/* convert phys addr to tlb insert format */
-	extrd,u		%r23,56,32, %r23		/* convert phys addr to tlb insert format */
-	depd		%r24,63,22, %r28		/* Form aliased virtual address 'to' */
+#if (TMPALIAS_MAP_START >= 0x80000000)
+	depdi		0, 31,32, %r28		/* clear any sign extension */
+#endif
+	extrd,u		%r26,56,32, %r26	/* convert phys addr to tlb insert format */
+	extrd,u		%r23,56,32, %r23	/* convert phys addr to tlb insert format */
+	depd		%r24,63,22, %r28	/* Form aliased virtual address 'to' */
 	depdi		0, 63,12, %r28		/* Clear any offset bits */
 	depdi		0, 63,12, %r28		/* Clear any offset bits */
 	copy		%r28, %r29
 	copy		%r28, %r29
 	depdi		1, 41,1, %r29		/* Form aliased virtual address 'from' */
 	depdi		1, 41,1, %r29		/* Form aliased virtual address 'from' */
@@ -466,10 +611,76 @@ ENTRY(copy_user_page_asm)
 
 
 	/* Purge any old translations */
 	/* Purge any old translations */
 
 
+#ifdef CONFIG_PA20
+	pdtlb,l		0(%r28)
+	pdtlb,l		0(%r29)
+#else
+	tlb_lock	%r20,%r21,%r22
 	pdtlb		0(%r28)
 	pdtlb		0(%r28)
 	pdtlb		0(%r29)
 	pdtlb		0(%r29)
+	tlb_unlock	%r20,%r21,%r22
+#endif
+
+#ifdef CONFIG_64BIT
+	/* PA8x00 CPUs can consume 2 loads or 1 store per cycle.
+	 * Unroll the loop by hand and arrange insn appropriately.
+	 * GCC probably can do this just as well.
+	 */
 
 
-	ldi		64, %r1
+	ldd		0(%r29), %r19
+	ldi		(PAGE_SIZE / 128), %r1
+
+1:	ldd		8(%r29), %r20
+
+	ldd		16(%r29), %r21
+	ldd		24(%r29), %r22
+	std		%r19, 0(%r28)
+	std		%r20, 8(%r28)
+
+	ldd		32(%r29), %r19
+	ldd		40(%r29), %r20
+	std		%r21, 16(%r28)
+	std		%r22, 24(%r28)
+
+	ldd		48(%r29), %r21
+	ldd		56(%r29), %r22
+	std		%r19, 32(%r28)
+	std		%r20, 40(%r28)
+
+	ldd		64(%r29), %r19
+	ldd		72(%r29), %r20
+	std		%r21, 48(%r28)
+	std		%r22, 56(%r28)
+
+	ldd		80(%r29), %r21
+	ldd		88(%r29), %r22
+	std		%r19, 64(%r28)
+	std		%r20, 72(%r28)
+
+	ldd		 96(%r29), %r19
+	ldd		104(%r29), %r20
+	std		%r21, 80(%r28)
+	std		%r22, 88(%r28)
+
+	ldd		112(%r29), %r21
+	ldd		120(%r29), %r22
+	std		%r19, 96(%r28)
+	std		%r20, 104(%r28)
+
+	ldo		128(%r29), %r29
+	std		%r21, 112(%r28)
+	std		%r22, 120(%r28)
+	ldo		128(%r28), %r28
+
+	/* conditional branches nullify on forward taken branch, and on
+	 * non-taken backward branch. Note that .+4 is a backwards branch.
+	 * The ldd should only get executed if the branch is taken.
+	 */
+	addib,COND(>),n	-1, %r1, 1b		/* bundle 10 */
+	ldd		0(%r29), %r19		/* start next loads */
+
+#else
+	ldi		(PAGE_SIZE / 64), %r1
 
 
 	/*
 	/*
 	 * This loop is optimized for PCXL/PCXL2 ldw/ldw and stw/stw
 	 * This loop is optimized for PCXL/PCXL2 ldw/ldw and stw/stw
@@ -480,9 +691,7 @@ ENTRY(copy_user_page_asm)
 	 * use ldd/std on a 32 bit kernel.
 	 * use ldd/std on a 32 bit kernel.
 	 */
 	 */
 
 
-
-1:
-	ldw		0(%r29), %r19
+1:	ldw		0(%r29), %r19
 	ldw		4(%r29), %r20
 	ldw		4(%r29), %r20
 	ldw		8(%r29), %r21
 	ldw		8(%r29), %r21
 	ldw		12(%r29), %r22
 	ldw		12(%r29), %r22
@@ -515,8 +724,10 @@ ENTRY(copy_user_page_asm)
 	stw		%r21, 56(%r28)
 	stw		%r21, 56(%r28)
 	stw		%r22, 60(%r28)
 	stw		%r22, 60(%r28)
 	ldo		64(%r28), %r28
 	ldo		64(%r28), %r28
+
 	addib,COND(>)		-1, %r1,1b
 	addib,COND(>)		-1, %r1,1b
 	ldo		64(%r29), %r29
 	ldo		64(%r29), %r29
+#endif
 
 
 	bv		%r0(%r2)
 	bv		%r0(%r2)
 	nop
 	nop
@@ -524,9 +735,8 @@ ENTRY(copy_user_page_asm)
 
 
 	.procend
 	.procend
 ENDPROC(copy_user_page_asm)
 ENDPROC(copy_user_page_asm)
-#endif
 
 
-ENTRY(__clear_user_page_asm)
+ENTRY(clear_user_page_asm)
 	.proc
 	.proc
 	.callinfo NO_CALLS
 	.callinfo NO_CALLS
 	.entry
 	.entry
@@ -550,7 +760,13 @@ ENTRY(__clear_user_page_asm)
 
 
 	/* Purge any old translation */
 	/* Purge any old translation */
 
 
+#ifdef CONFIG_PA20
+	pdtlb,l		0(%r28)
+#else
+	tlb_lock	%r20,%r21,%r22
 	pdtlb		0(%r28)
 	pdtlb		0(%r28)
+	tlb_unlock	%r20,%r21,%r22
+#endif
 
 
 #ifdef CONFIG_64BIT
 #ifdef CONFIG_64BIT
 	ldi		(PAGE_SIZE / 128), %r1
 	ldi		(PAGE_SIZE / 128), %r1
@@ -580,8 +796,7 @@ ENTRY(__clear_user_page_asm)
 #else	/* ! CONFIG_64BIT */
 #else	/* ! CONFIG_64BIT */
 	ldi		(PAGE_SIZE / 64), %r1
 	ldi		(PAGE_SIZE / 64), %r1
 
 
-1:
-	stw		%r0, 0(%r28)
+1:	stw		%r0, 0(%r28)
 	stw		%r0, 4(%r28)
 	stw		%r0, 4(%r28)
 	stw		%r0, 8(%r28)
 	stw		%r0, 8(%r28)
 	stw		%r0, 12(%r28)
 	stw		%r0, 12(%r28)
@@ -606,7 +821,7 @@ ENTRY(__clear_user_page_asm)
 	.exit
 	.exit
 
 
 	.procend
 	.procend
-ENDPROC(__clear_user_page_asm)
+ENDPROC(clear_user_page_asm)
 
 
 ENTRY(flush_dcache_page_asm)
 ENTRY(flush_dcache_page_asm)
 	.proc
 	.proc
@@ -630,7 +845,13 @@ ENTRY(flush_dcache_page_asm)
 
 
 	/* Purge any old translation */
 	/* Purge any old translation */
 
 
+#ifdef CONFIG_PA20
+	pdtlb,l		0(%r28)
+#else
+	tlb_lock	%r20,%r21,%r22
 	pdtlb		0(%r28)
 	pdtlb		0(%r28)
+	tlb_unlock	%r20,%r21,%r22
+#endif
 
 
 	ldil		L%dcache_stride, %r1
 	ldil		L%dcache_stride, %r1
 	ldw		R%dcache_stride(%r1), %r1
 	ldw		R%dcache_stride(%r1), %r1
@@ -663,8 +884,17 @@ ENTRY(flush_dcache_page_asm)
 	fdc,m		%r1(%r28)
 	fdc,m		%r1(%r28)
 
 
 	sync
 	sync
+
+#ifdef CONFIG_PA20
+	pdtlb,l		0(%r25)
+#else
+	tlb_lock	%r20,%r21,%r22
+	pdtlb		0(%r25)
+	tlb_unlock	%r20,%r21,%r22
+#endif
+
 	bv		%r0(%r2)
 	bv		%r0(%r2)
-	pdtlb		(%r25)
+	nop
 	.exit
 	.exit
 
 
 	.procend
 	.procend
@@ -692,7 +922,13 @@ ENTRY(flush_icache_page_asm)
 
 
 	/* Purge any old translation */
 	/* Purge any old translation */
 
 
-	pitlb		(%sr4,%r28)
+#ifdef CONFIG_PA20
+	pitlb,l         %r0(%sr4,%r28)
+#else
+	tlb_lock        %r20,%r21,%r22
+	pitlb           (%sr4,%r28)
+	tlb_unlock      %r20,%r21,%r22
+#endif
 
 
 	ldil		L%icache_stride, %r1
 	ldil		L%icache_stride, %r1
 	ldw		R%icache_stride(%r1), %r1
 	ldw		R%icache_stride(%r1), %r1
@@ -727,8 +963,17 @@ ENTRY(flush_icache_page_asm)
 	fic,m		%r1(%sr4,%r28)
 	fic,m		%r1(%sr4,%r28)
 
 
 	sync
 	sync
+
+#ifdef CONFIG_PA20
+	pitlb,l         %r0(%sr4,%r25)
+#else
+	tlb_lock        %r20,%r21,%r22
+	pitlb           (%sr4,%r25)
+	tlb_unlock      %r20,%r21,%r22
+#endif
+
 	bv		%r0(%r2)
 	bv		%r0(%r2)
-	pitlb		(%sr4,%r25)
+	nop
 	.exit
 	.exit
 
 
 	.procend
 	.procend
@@ -777,7 +1022,7 @@ ENTRY(flush_kernel_dcache_page_asm)
 	.procend
 	.procend
 ENDPROC(flush_kernel_dcache_page_asm)
 ENDPROC(flush_kernel_dcache_page_asm)
 
 
-ENTRY(purge_kernel_dcache_page)
+ENTRY(purge_kernel_dcache_page_asm)
 	.proc
 	.proc
 	.callinfo NO_CALLS
 	.callinfo NO_CALLS
 	.entry
 	.entry
@@ -817,7 +1062,7 @@ ENTRY(purge_kernel_dcache_page)
 	.exit
 	.exit
 
 
 	.procend
 	.procend
-ENDPROC(purge_kernel_dcache_page)
+ENDPROC(purge_kernel_dcache_page_asm)
 
 
 ENTRY(flush_user_dcache_range_asm)
 ENTRY(flush_user_dcache_range_asm)
 	.proc
 	.proc

+ 3 - 2
arch/parisc/kernel/parisc_ksyms.c

@@ -157,5 +157,6 @@ extern void _mcount(void);
 EXPORT_SYMBOL(_mcount);
 EXPORT_SYMBOL(_mcount);
 #endif
 #endif
 
 
-/* from pacache.S -- needed for copy_page */
-EXPORT_SYMBOL(copy_user_page_asm);
+/* from pacache.S -- needed for clear/copy_page */
+EXPORT_SYMBOL(clear_page_asm);
+EXPORT_SYMBOL(copy_page_asm);

+ 1 - 1
arch/parisc/kernel/signal.c

@@ -312,7 +312,7 @@ setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
 #if DEBUG_SIG
 #if DEBUG_SIG
 	/* Assert that we're flushing in the correct space... */
 	/* Assert that we're flushing in the correct space... */
 	{
 	{
-		int sid;
+		unsigned long sid;
 		asm ("mfsp %%sr3,%0" : "=r" (sid));
 		asm ("mfsp %%sr3,%0" : "=r" (sid));
 		DBG(1,"setup_rt_frame: Flushing 64 bytes at space %#x offset %p\n",
 		DBG(1,"setup_rt_frame: Flushing 64 bytes at space %#x offset %p\n",
 		       sid, frame->tramp);
 		       sid, frame->tramp);

+ 11 - 4
arch/parisc/kernel/signal32.c

@@ -65,7 +65,7 @@ put_sigset32(compat_sigset_t __user *up, sigset_t *set, size_t sz)
 {
 {
 	compat_sigset_t s;
 	compat_sigset_t s;
 
 
-	if (sz != sizeof *set)
+	if (sz != sizeof(compat_sigset_t))
 		return -EINVAL;
 		return -EINVAL;
 	sigset_64to32(&s, set);
 	sigset_64to32(&s, set);
 
 
@@ -78,7 +78,7 @@ get_sigset32(compat_sigset_t __user *up, sigset_t *set, size_t sz)
 	compat_sigset_t s;
 	compat_sigset_t s;
 	int r;
 	int r;
 
 
-	if (sz != sizeof *set)
+	if (sz != sizeof(compat_sigset_t))
 		return -EINVAL;
 		return -EINVAL;
 
 
 	if ((r = copy_from_user(&s, up, sz)) == 0) {
 	if ((r = copy_from_user(&s, up, sz)) == 0) {
@@ -94,8 +94,11 @@ int sys32_rt_sigprocmask(int how, compat_sigset_t __user *set, compat_sigset_t _
 	sigset_t old_set, new_set;
 	sigset_t old_set, new_set;
 	int ret;
 	int ret;
 
 
-	if (set && get_sigset32(set, &new_set, sigsetsize))
-		return -EFAULT;
+	if (set) {
+		ret = get_sigset32(set, &new_set, sigsetsize);
+		if (ret)
+			return ret;
+	}
 	
 	
 	KERNEL_SYSCALL(ret, sys_rt_sigprocmask, how, set ? (sigset_t __user *)&new_set : NULL,
 	KERNEL_SYSCALL(ret, sys_rt_sigprocmask, how, set ? (sigset_t __user *)&new_set : NULL,
 				 oset ? (sigset_t __user *)&old_set : NULL, sigsetsize);
 				 oset ? (sigset_t __user *)&old_set : NULL, sigsetsize);
@@ -128,6 +131,10 @@ sys32_rt_sigaction(int sig, const struct sigaction32 __user *act, struct sigacti
 	struct k_sigaction new_sa, old_sa;
 	struct k_sigaction new_sa, old_sa;
 	int ret = -EINVAL;
 	int ret = -EINVAL;
 
 
+	/* XXX: Don't preclude handling different sized sigset_t's.  */
+	if (sigsetsize != sizeof(compat_sigset_t))
+		return -EINVAL;
+
 	if (act) {
 	if (act) {
 		if (copy_from_user(&new_sa32.sa, act, sizeof new_sa32.sa))
 		if (copy_from_user(&new_sa32.sa, act, sizeof new_sa32.sa))
 			return -EFAULT;
 			return -EFAULT;

+ 12 - 4
arch/parisc/kernel/sys_parisc.c

@@ -94,11 +94,12 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
 {
 {
 	if (len > TASK_SIZE)
 	if (len > TASK_SIZE)
 		return -ENOMEM;
 		return -ENOMEM;
-	/* Might want to check for cache aliasing issues for MAP_FIXED case
-	 * like ARM or MIPS ??? --BenH.
-	 */
-	if (flags & MAP_FIXED)
+	if (flags & MAP_FIXED) {
+		if ((flags & MAP_SHARED) &&
+		    (addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1))
+			return -EINVAL;
 		return addr;
 		return addr;
+	}
 	if (!addr)
 	if (!addr)
 		addr = TASK_UNMAPPED_BASE;
 		addr = TASK_UNMAPPED_BASE;
 
 
@@ -212,6 +213,13 @@ asmlinkage long parisc_sync_file_range(int fd,
 			(loff_t)hi_nbytes << 32 | lo_nbytes, flags);
 			(loff_t)hi_nbytes << 32 | lo_nbytes, flags);
 }
 }
 
 
+asmlinkage long parisc_fallocate(int fd, int mode, u32 offhi, u32 offlo,
+				u32 lenhi, u32 lenlo)
+{
+        return sys_fallocate(fd, mode, ((u64)offhi << 32) | offlo,
+                             ((u64)lenhi << 32) | lenlo);
+}
+
 asmlinkage unsigned long sys_alloc_hugepages(int key, unsigned long addr, unsigned long len, int prot, int flag)
 asmlinkage unsigned long sys_alloc_hugepages(int key, unsigned long addr, unsigned long len, int prot, int flag)
 {
 {
 	return -ENOMEM;
 	return -ENOMEM;

+ 13 - 109
arch/parisc/kernel/sys_parisc32.c

@@ -21,7 +21,6 @@
 #include <linux/time.h>
 #include <linux/time.h>
 #include <linux/smp.h>
 #include <linux/smp.h>
 #include <linux/sem.h>
 #include <linux/sem.h>
-#include <linux/msg.h>
 #include <linux/shm.h>
 #include <linux/shm.h>
 #include <linux/slab.h>
 #include <linux/slab.h>
 #include <linux/uio.h>
 #include <linux/uio.h>
@@ -61,111 +60,23 @@ asmlinkage long sys32_unimplemented(int r26, int r25, int r24, int r23,
     return -ENOSYS;
     return -ENOSYS;
 }
 }
 
 
-asmlinkage long sys32_sched_rr_get_interval(pid_t pid,
-	struct compat_timespec __user *interval)
-{
-	struct timespec t;
-	int ret;
-
-	KERNEL_SYSCALL(ret, sys_sched_rr_get_interval, pid, (struct timespec __user *)&t);
-	if (put_compat_timespec(&t, interval))
-		return -EFAULT;
-	return ret;
-}
-
-struct msgbuf32 {
-    int mtype;
-    char mtext[1];
-};
-
-asmlinkage long sys32_msgsnd(int msqid,
-				struct msgbuf32 __user *umsgp32,
-				size_t msgsz, int msgflg)
-{
-	struct msgbuf *mb;
-	struct msgbuf32 mb32;
-	int err;
-
-	if ((mb = kmalloc(msgsz + sizeof *mb + 4, GFP_KERNEL)) == NULL)
-		return -ENOMEM;
-
-	err = get_user(mb32.mtype, &umsgp32->mtype);
-	mb->mtype = mb32.mtype;
-	err |= copy_from_user(mb->mtext, &umsgp32->mtext, msgsz);
-
-	if (err)
-		err = -EFAULT;
-	else
-		KERNEL_SYSCALL(err, sys_msgsnd, msqid, (struct msgbuf __user *)mb, msgsz, msgflg);
-
-	kfree(mb);
-	return err;
-}
-
-asmlinkage long sys32_msgrcv(int msqid,
-				struct msgbuf32 __user *umsgp32,
-				size_t msgsz, long msgtyp, int msgflg)
-{
-	struct msgbuf *mb;
-	struct msgbuf32 mb32;
-	int err, len;
-
-	if ((mb = kmalloc(msgsz + sizeof *mb + 4, GFP_KERNEL)) == NULL)
-		return -ENOMEM;
-
-	KERNEL_SYSCALL(err, sys_msgrcv, msqid, (struct msgbuf __user *)mb, msgsz, msgtyp, msgflg);
-
-	if (err >= 0) {
-		len = err;
-		mb32.mtype = mb->mtype;
-		err = put_user(mb32.mtype, &umsgp32->mtype);
-		err |= copy_to_user(&umsgp32->mtext, mb->mtext, len);
-		if (err)
-			err = -EFAULT;
-		else
-			err = len;
-	}
-
-	kfree(mb);
-	return err;
-}
-
-asmlinkage int sys32_sendfile(int out_fd, int in_fd, compat_off_t __user *offset, s32 count)
+/* Note: it is necessary to treat out_fd and in_fd as unsigned ints, with the
+ * corresponding cast to a signed int to insure that the proper conversion
+ * (sign extension) between the register representation of a signed int (msr in
+ * 32-bit mode) and the register representation of a signed int (msr in 64-bit
+ * mode) is performed.
+ */
+asmlinkage long sys32_sendfile(u32 out_fd, u32 in_fd,
+			       compat_off_t __user *offset, compat_size_t count)
 {
 {
-        mm_segment_t old_fs = get_fs();
-        int ret;
-        off_t of;
-
-        if (offset && get_user(of, offset))
-                return -EFAULT;
-
-        set_fs(KERNEL_DS);
-        ret = sys_sendfile(out_fd, in_fd, offset ? (off_t __user *)&of : NULL, count);
-        set_fs(old_fs);
-
-        if (offset && put_user(of, offset))
-                return -EFAULT;
-
-        return ret;
+	return compat_sys_sendfile((int)out_fd, (int)in_fd, offset, count);
 }
 }
 
 
-asmlinkage int sys32_sendfile64(int out_fd, int in_fd, compat_loff_t __user *offset, s32 count)
+asmlinkage long sys32_sendfile64(u32 out_fd, u32 in_fd,
+				 compat_loff_t __user *offset, compat_size_t count)
 {
 {
-	mm_segment_t old_fs = get_fs();
-	int ret;
-	loff_t lof;
-	
-	if (offset && get_user(lof, offset))
-		return -EFAULT;
-		
-	set_fs(KERNEL_DS);
-	ret = sys_sendfile64(out_fd, in_fd, offset ? (loff_t __user *)&lof : NULL, count);
-	set_fs(old_fs);
-	
-	if (offset && put_user(lof, offset))
-		return -EFAULT;
-		
-	return ret;
+	return sys_sendfile64((int)out_fd, (int)in_fd,
+				(loff_t __user *)offset, count);
 }
 }
 
 
 
 
@@ -200,13 +111,6 @@ long sys32_lookup_dcookie(u32 cookie_high, u32 cookie_low, char __user *buf,
 				  buf, len);
 				  buf, len);
 }
 }
 
 
-asmlinkage long compat_sys_fallocate(int fd, int mode, u32 offhi, u32 offlo,
-				u32 lenhi, u32 lenlo)
-{
-        return sys_fallocate(fd, mode, ((loff_t)offhi << 32) | offlo,
-                             ((loff_t)lenhi << 32) | lenlo);
-}
-
 asmlinkage long compat_sys_fanotify_mark(int fan_fd, int flags, u32 mask_hi,
 asmlinkage long compat_sys_fanotify_mark(int fan_fd, int flags, u32 mask_hi,
 					 u32 mask_lo, int fd,
 					 u32 mask_lo, int fd,
 					 const char __user *pathname)
 					 const char __user *pathname)

+ 4 - 1
arch/parisc/kernel/syscall.S

@@ -309,10 +309,13 @@ tracesys_next:
 	LDREG   TASK_PT_GR25(%r1), %r25
 	LDREG   TASK_PT_GR25(%r1), %r25
 	LDREG   TASK_PT_GR24(%r1), %r24
 	LDREG   TASK_PT_GR24(%r1), %r24
 	LDREG   TASK_PT_GR23(%r1), %r23
 	LDREG   TASK_PT_GR23(%r1), %r23
-#ifdef CONFIG_64BIT
 	LDREG   TASK_PT_GR22(%r1), %r22
 	LDREG   TASK_PT_GR22(%r1), %r22
 	LDREG   TASK_PT_GR21(%r1), %r21
 	LDREG   TASK_PT_GR21(%r1), %r21
+#ifdef CONFIG_64BIT
 	ldo	-16(%r30),%r29			/* Reference param save area */
 	ldo	-16(%r30),%r29			/* Reference param save area */
+#else
+	stw     %r22, -52(%r30)                 /* 5th argument */
+	stw     %r21, -56(%r30)                 /* 6th argument */
 #endif
 #endif
 
 
 	comiclr,>>=	__NR_Linux_syscalls, %r20, %r0
 	comiclr,>>=	__NR_Linux_syscalls, %r20, %r0

+ 14 - 13
arch/parisc/kernel/syscall_table.S

@@ -247,10 +247,7 @@
 	ENTRY_SAME(sched_yield)
 	ENTRY_SAME(sched_yield)
 	ENTRY_SAME(sched_get_priority_max)
 	ENTRY_SAME(sched_get_priority_max)
 	ENTRY_SAME(sched_get_priority_min)	/* 160 */
 	ENTRY_SAME(sched_get_priority_min)	/* 160 */
-	/* These 2 would've worked if someone had defined struct timespec
-	 * carefully, like timeval for example (which is about the same).
-	 * Unfortunately it contains a long :-( */
-	ENTRY_DIFF(sched_rr_get_interval)
+	ENTRY_COMP(sched_rr_get_interval)
 	ENTRY_COMP(nanosleep)
 	ENTRY_COMP(nanosleep)
 	ENTRY_SAME(mremap)
 	ENTRY_SAME(mremap)
 	ENTRY_SAME(setresuid)
 	ENTRY_SAME(setresuid)
@@ -286,8 +283,8 @@
 	ENTRY_SAME(semop)		/* 185 */
 	ENTRY_SAME(semop)		/* 185 */
 	ENTRY_SAME(semget)
 	ENTRY_SAME(semget)
 	ENTRY_DIFF(semctl)
 	ENTRY_DIFF(semctl)
-	ENTRY_DIFF(msgsnd)
-	ENTRY_DIFF(msgrcv)
+	ENTRY_COMP(msgsnd)
+	ENTRY_COMP(msgrcv)
 	ENTRY_SAME(msgget)		/* 190 */
 	ENTRY_SAME(msgget)		/* 190 */
 	ENTRY_SAME(msgctl)
 	ENTRY_SAME(msgctl)
 	ENTRY_SAME(shmat)
 	ENTRY_SAME(shmat)
@@ -307,7 +304,7 @@
 	ENTRY_SAME(gettid)
 	ENTRY_SAME(gettid)
 	ENTRY_OURS(readahead)
 	ENTRY_OURS(readahead)
 	ENTRY_SAME(tkill)
 	ENTRY_SAME(tkill)
-	ENTRY_SAME(sendfile64)
+	ENTRY_DIFF(sendfile64)
 	ENTRY_COMP(futex)		/* 210 */
 	ENTRY_COMP(futex)		/* 210 */
 	ENTRY_COMP(sched_setaffinity)
 	ENTRY_COMP(sched_setaffinity)
 	ENTRY_COMP(sched_getaffinity)
 	ENTRY_COMP(sched_getaffinity)
@@ -327,12 +324,12 @@
 	ENTRY_SAME(epoll_wait)
 	ENTRY_SAME(epoll_wait)
  	ENTRY_SAME(remap_file_pages)
  	ENTRY_SAME(remap_file_pages)
 	ENTRY_SAME(semtimedop)
 	ENTRY_SAME(semtimedop)
-	ENTRY_SAME(mq_open)
+	ENTRY_COMP(mq_open)
 	ENTRY_SAME(mq_unlink)		/* 230 */
 	ENTRY_SAME(mq_unlink)		/* 230 */
-	ENTRY_SAME(mq_timedsend)
-	ENTRY_SAME(mq_timedreceive)
-	ENTRY_SAME(mq_notify)
-	ENTRY_SAME(mq_getsetattr)
+	ENTRY_COMP(mq_timedsend)
+	ENTRY_COMP(mq_timedreceive)
+	ENTRY_COMP(mq_notify)
+	ENTRY_COMP(mq_getsetattr)
 	ENTRY_COMP(waitid)		/* 235 */
 	ENTRY_COMP(waitid)		/* 235 */
 	ENTRY_OURS(fadvise64_64)
 	ENTRY_OURS(fadvise64_64)
 	ENTRY_SAME(set_tid_address)
 	ENTRY_SAME(set_tid_address)
@@ -403,7 +400,7 @@
 	ENTRY_COMP(signalfd)
 	ENTRY_COMP(signalfd)
 	ENTRY_SAME(ni_syscall)		/* was timerfd */
 	ENTRY_SAME(ni_syscall)		/* was timerfd */
 	ENTRY_SAME(eventfd)
 	ENTRY_SAME(eventfd)
-	ENTRY_COMP(fallocate)		/* 305 */
+	ENTRY_OURS(fallocate)		/* 305 */
 	ENTRY_SAME(timerfd_create)
 	ENTRY_SAME(timerfd_create)
 	ENTRY_COMP(timerfd_settime)
 	ENTRY_COMP(timerfd_settime)
 	ENTRY_COMP(timerfd_gettime)
 	ENTRY_COMP(timerfd_gettime)
@@ -428,6 +425,10 @@
 	ENTRY_SAME(syncfs)
 	ENTRY_SAME(syncfs)
 	ENTRY_SAME(setns)
 	ENTRY_SAME(setns)
 	ENTRY_COMP(sendmmsg)
 	ENTRY_COMP(sendmmsg)
+	ENTRY_COMP(process_vm_readv)	/* 330 */
+	ENTRY_COMP(process_vm_writev)
+	ENTRY_SAME(kcmp)
+	ENTRY_SAME(finit_module)
 
 
 	/* Nothing yet */
 	/* Nothing yet */
 
 

+ 25 - 5
arch/parisc/mm/fault.c

@@ -175,10 +175,12 @@ void do_page_fault(struct pt_regs *regs, unsigned long code,
 	struct mm_struct *mm = tsk->mm;
 	struct mm_struct *mm = tsk->mm;
 	unsigned long acc_type;
 	unsigned long acc_type;
 	int fault;
 	int fault;
+	unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
 
 
 	if (in_atomic() || !mm)
 	if (in_atomic() || !mm)
 		goto no_context;
 		goto no_context;
 
 
+retry:
 	down_read(&mm->mmap_sem);
 	down_read(&mm->mmap_sem);
 	vma = find_vma_prev(mm, address, &prev_vma);
 	vma = find_vma_prev(mm, address, &prev_vma);
 	if (!vma || address < vma->vm_start)
 	if (!vma || address < vma->vm_start)
@@ -201,7 +203,12 @@ good_area:
 	 * fault.
 	 * fault.
 	 */
 	 */
 
 
-	fault = handle_mm_fault(mm, vma, address, (acc_type & VM_WRITE) ? FAULT_FLAG_WRITE : 0);
+	fault = handle_mm_fault(mm, vma, address,
+			flags | ((acc_type & VM_WRITE) ? FAULT_FLAG_WRITE : 0));
+
+	if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
+		return;
+
 	if (unlikely(fault & VM_FAULT_ERROR)) {
 	if (unlikely(fault & VM_FAULT_ERROR)) {
 		/*
 		/*
 		 * We hit a shared mapping outside of the file, or some
 		 * We hit a shared mapping outside of the file, or some
@@ -214,10 +221,23 @@ good_area:
 			goto bad_area;
 			goto bad_area;
 		BUG();
 		BUG();
 	}
 	}
-	if (fault & VM_FAULT_MAJOR)
-		current->maj_flt++;
-	else
-		current->min_flt++;
+	if (flags & FAULT_FLAG_ALLOW_RETRY) {
+		if (fault & VM_FAULT_MAJOR)
+			current->maj_flt++;
+		else
+			current->min_flt++;
+		if (fault & VM_FAULT_RETRY) {
+			flags &= ~FAULT_FLAG_ALLOW_RETRY;
+
+			/*
+			 * No need to up_read(&mm->mmap_sem) as we would
+			 * have already released it in __lock_page_or_retry
+			 * in mm/filemap.c.
+			 */
+
+			goto retry;
+		}
+	}
 	up_read(&mm->mmap_sem);
 	up_read(&mm->mmap_sem);
 	return;
 	return;
 
 

+ 1 - 0
drivers/parisc/Kconfig

@@ -128,6 +128,7 @@ config SUPERIO
 config CHASSIS_LCD_LED
 config CHASSIS_LCD_LED
 	bool "Chassis LCD and LED support"
 	bool "Chassis LCD and LED support"
 	default y
 	default y
+	select VM_EVENT_COUNTERS
 	help
 	help
 	  Say Y here if you want to enable support for the Heartbeat,
 	  Say Y here if you want to enable support for the Heartbeat,
 	  Disk/Network activities LEDs on some PA-RISC machines,
 	  Disk/Network activities LEDs on some PA-RISC machines,

+ 5 - 8
drivers/parisc/dino.c

@@ -580,15 +580,13 @@ dino_fixup_bus(struct pci_bus *bus)
 				
 				
 			}
 			}
 					
 					
-			DBG("DEBUG %s assigning %d [0x%lx,0x%lx]\n",
+			DBG("DEBUG %s assigning %d [%pR]\n",
 			    dev_name(&bus->self->dev), i,
 			    dev_name(&bus->self->dev), i,
-			    bus->self->resource[i].start,
-			    bus->self->resource[i].end);
+			    &bus->self->resource[i]);
 			WARN_ON(pci_assign_resource(bus->self, i));
 			WARN_ON(pci_assign_resource(bus->self, i));
-			DBG("DEBUG %s after assign %d [0x%lx,0x%lx]\n",
+			DBG("DEBUG %s after assign %d [%pR]\n",
 			    dev_name(&bus->self->dev), i,
 			    dev_name(&bus->self->dev), i,
-			    bus->self->resource[i].start,
-			    bus->self->resource[i].end);
+			    &bus->self->resource[i]);
 		}
 		}
 	}
 	}
 
 
@@ -772,8 +770,7 @@ dino_bridge_init(struct dino_device *dino_dev, const char *name)
 		result = ccio_request_resource(dino_dev->hba.dev, &res[i]);
 		result = ccio_request_resource(dino_dev->hba.dev, &res[i]);
 		if (result < 0) {
 		if (result < 0) {
 			printk(KERN_ERR "%s: failed to claim PCI Bus address "
 			printk(KERN_ERR "%s: failed to claim PCI Bus address "
-			       "space %d (0x%lx-0x%lx)!\n", name, i,
-			       (unsigned long)res[i].start, (unsigned long)res[i].end);
+			       "space %d (%pR)!\n", name, i, &res[i]);
 			return result;
 			return result;
 		}
 		}
 	}
 	}

+ 2 - 4
drivers/parisc/hppb.c

@@ -74,10 +74,8 @@ static int hppb_probe(struct parisc_device *dev)
 
 
 	status = ccio_request_resource(dev, &card->mmio_region);
 	status = ccio_request_resource(dev, &card->mmio_region);
 	if(status < 0) {
 	if(status < 0) {
-		printk(KERN_ERR "%s: failed to claim HP-PB "
-			"bus space (0x%08llx, 0x%08llx)\n",
-			__FILE__, (unsigned long long) card->mmio_region.start,
-			(unsigned long long) card->mmio_region.end);
+		printk(KERN_ERR "%s: failed to claim HP-PB bus space (%pR)\n",
+			__FILE__, &card->mmio_region);
 	}
 	}
 
 
         return 0;
         return 0;

+ 2 - 4
drivers/parisc/pdc_stable.c

@@ -212,12 +212,10 @@ pdcspath_store(struct pdcspath_entry *entry)
 			entry, devpath, entry->addr);
 			entry, devpath, entry->addr);
 
 
 	/* addr, devpath and count must be word aligned */
 	/* addr, devpath and count must be word aligned */
-	if (pdc_stable_write(entry->addr, devpath, sizeof(*devpath)) != PDC_OK) {
-		printk(KERN_ERR "%s: an error occurred when writing to PDC.\n"
+	if (pdc_stable_write(entry->addr, devpath, sizeof(*devpath)) != PDC_OK)
+		WARN(1, KERN_ERR "%s: an error occurred when writing to PDC.\n"
 				"It is likely that the Stable Storage data has been corrupted.\n"
 				"It is likely that the Stable Storage data has been corrupted.\n"
 				"Please check it carefully upon next reboot.\n", __func__);
 				"Please check it carefully upon next reboot.\n", __func__);
-		WARN_ON(1);
-	}
 		
 		
 	/* kobject is already registered */
 	/* kobject is already registered */
 	entry->ready = 2;
 	entry->ready = 2;

+ 1 - 1
drivers/parisc/superio.c

@@ -274,7 +274,7 @@ superio_init(struct pci_dev *pcidev)
 	else
 	else
 		printk(KERN_ERR PFX "USB regulator not initialized!\n");
 		printk(KERN_ERR PFX "USB regulator not initialized!\n");
 
 
-	if (request_irq(pdev->irq, superio_interrupt, IRQF_DISABLED,
+	if (request_irq(pdev->irq, superio_interrupt, 0,
 			SUPERIO, (void *)sio)) {
 			SUPERIO, (void *)sio)) {
 
 
 		printk(KERN_ERR PFX "could not get irq\n");
 		printk(KERN_ERR PFX "could not get irq\n");