Browse Source

Merge tag 'xtensa-for-next-20160111' of git://github.com/jcmvbkbc/linux-xtensa

Xtensa improvements for 4.5:

- control whether perf IRQ is treated as NMI from Kconfig;
- implement ioremap for regions outside KIO segment.
Chris Zankel 9 years ago
parent
commit
bb2f348604

+ 16 - 0
arch/xtensa/Kconfig

@@ -139,6 +139,22 @@ config XTENSA_VARIANT_HAVE_PERF_EVENTS
 
 
 	  If unsure, say N.
 	  If unsure, say N.
 
 
+config XTENSA_FAKE_NMI
+	bool "Treat PMM IRQ as NMI"
+	depends on XTENSA_VARIANT_HAVE_PERF_EVENTS
+	default n
+	help
+	  If PMM IRQ is the only IRQ at EXCM level it is safe to
+	  treat it as NMI, which improves accuracy of profiling.
+
+	  If there are other interrupts at or above PMM IRQ priority level
+	  but not above the EXCM level, PMM IRQ still may be treated as NMI,
+	  but only if these IRQs are not used. There will be a build warning
+	  saying that this is not safe, and a bugcheck if one of these IRQs
+	  actually fire.
+
+	  If unsure, say N.
+
 config XTENSA_UNALIGNED_USER
 config XTENSA_UNALIGNED_USER
 	bool "Unaligned memory access in use space"
 	bool "Unaligned memory access in use space"
 	help
 	help

+ 13 - 3
arch/xtensa/include/asm/io.h

@@ -25,9 +25,12 @@
 
 
 #ifdef CONFIG_MMU
 #ifdef CONFIG_MMU
 
 
+void __iomem *xtensa_ioremap_nocache(unsigned long addr, unsigned long size);
+void __iomem *xtensa_ioremap_cache(unsigned long addr, unsigned long size);
+void xtensa_iounmap(volatile void __iomem *addr);
+
 /*
 /*
  * Return the virtual address for the specified bus memory.
  * Return the virtual address for the specified bus memory.
- * Note that we currently don't support any address outside the KIO segment.
  */
  */
 static inline void __iomem *ioremap_nocache(unsigned long offset,
 static inline void __iomem *ioremap_nocache(unsigned long offset,
 		unsigned long size)
 		unsigned long size)
@@ -36,7 +39,7 @@ static inline void __iomem *ioremap_nocache(unsigned long offset,
 	    && offset - XCHAL_KIO_PADDR < XCHAL_KIO_SIZE)
 	    && offset - XCHAL_KIO_PADDR < XCHAL_KIO_SIZE)
 		return (void*)(offset-XCHAL_KIO_PADDR+XCHAL_KIO_BYPASS_VADDR);
 		return (void*)(offset-XCHAL_KIO_PADDR+XCHAL_KIO_BYPASS_VADDR);
 	else
 	else
-		BUG();
+		return xtensa_ioremap_nocache(offset, size);
 }
 }
 
 
 static inline void __iomem *ioremap_cache(unsigned long offset,
 static inline void __iomem *ioremap_cache(unsigned long offset,
@@ -46,7 +49,7 @@ static inline void __iomem *ioremap_cache(unsigned long offset,
 	    && offset - XCHAL_KIO_PADDR < XCHAL_KIO_SIZE)
 	    && offset - XCHAL_KIO_PADDR < XCHAL_KIO_SIZE)
 		return (void*)(offset-XCHAL_KIO_PADDR+XCHAL_KIO_CACHED_VADDR);
 		return (void*)(offset-XCHAL_KIO_PADDR+XCHAL_KIO_CACHED_VADDR);
 	else
 	else
-		BUG();
+		return xtensa_ioremap_cache(offset, size);
 }
 }
 #define ioremap_cache ioremap_cache
 #define ioremap_cache ioremap_cache
 
 
@@ -60,6 +63,13 @@ static inline void __iomem *ioremap(unsigned long offset, unsigned long size)
 
 
 static inline void iounmap(volatile void __iomem *addr)
 static inline void iounmap(volatile void __iomem *addr)
 {
 {
+	unsigned long va = (unsigned long) addr;
+
+	if (!(va >= XCHAL_KIO_CACHED_VADDR &&
+	      va - XCHAL_KIO_CACHED_VADDR < XCHAL_KIO_SIZE) &&
+	    !(va >= XCHAL_KIO_BYPASS_VADDR &&
+	      va - XCHAL_KIO_BYPASS_VADDR < XCHAL_KIO_SIZE))
+		xtensa_iounmap(addr);
 }
 }
 
 
 #define virt_to_bus     virt_to_phys
 #define virt_to_bus     virt_to_phys

+ 5 - 7
arch/xtensa/include/asm/processor.h

@@ -78,22 +78,20 @@
 #define XTENSA_INTLEVEL_MASK(level) _XTENSA_INTLEVEL_MASK(level)
 #define XTENSA_INTLEVEL_MASK(level) _XTENSA_INTLEVEL_MASK(level)
 #define _XTENSA_INTLEVEL_MASK(level) (XCHAL_INTLEVEL##level##_MASK)
 #define _XTENSA_INTLEVEL_MASK(level) (XCHAL_INTLEVEL##level##_MASK)
 
 
-#define IS_POW2(v) (((v) & ((v) - 1)) == 0)
+#define XTENSA_INTLEVEL_ANDBELOW_MASK(l) _XTENSA_INTLEVEL_ANDBELOW_MASK(l)
+#define _XTENSA_INTLEVEL_ANDBELOW_MASK(l) (XCHAL_INTLEVEL##l##_ANDBELOW_MASK)
 
 
 #define PROFILING_INTLEVEL XTENSA_INT_LEVEL(XCHAL_PROFILING_INTERRUPT)
 #define PROFILING_INTLEVEL XTENSA_INT_LEVEL(XCHAL_PROFILING_INTERRUPT)
 
 
 /* LOCKLEVEL defines the interrupt level that masks all
 /* LOCKLEVEL defines the interrupt level that masks all
  * general-purpose interrupts.
  * general-purpose interrupts.
  */
  */
-#if defined(CONFIG_XTENSA_VARIANT_HAVE_PERF_EVENTS) && \
-	defined(XCHAL_PROFILING_INTERRUPT) && \
-	PROFILING_INTLEVEL == XCHAL_EXCM_LEVEL && \
-	XCHAL_EXCM_LEVEL > 1 && \
-	IS_POW2(XTENSA_INTLEVEL_MASK(PROFILING_INTLEVEL))
-#define LOCKLEVEL (XCHAL_EXCM_LEVEL - 1)
+#if defined(CONFIG_XTENSA_FAKE_NMI) && defined(XCHAL_PROFILING_INTERRUPT)
+#define LOCKLEVEL (PROFILING_INTLEVEL - 1)
 #else
 #else
 #define LOCKLEVEL XCHAL_EXCM_LEVEL
 #define LOCKLEVEL XCHAL_EXCM_LEVEL
 #endif
 #endif
+
 #define TOPLEVEL XCHAL_EXCM_LEVEL
 #define TOPLEVEL XCHAL_EXCM_LEVEL
 #define XTENSA_FAKE_NMI (LOCKLEVEL < TOPLEVEL)
 #define XTENSA_FAKE_NMI (LOCKLEVEL < TOPLEVEL)
 
 

+ 3 - 6
arch/xtensa/include/asm/timex.h

@@ -12,19 +12,16 @@
 #include <asm/processor.h>
 #include <asm/processor.h>
 #include <linux/stringify.h>
 #include <linux/stringify.h>
 
 
-#define _INTLEVEL(x)	XCHAL_INT ## x ## _LEVEL
-#define INTLEVEL(x)	_INTLEVEL(x)
-
 #if XCHAL_NUM_TIMERS > 0 && \
 #if XCHAL_NUM_TIMERS > 0 && \
-	INTLEVEL(XCHAL_TIMER0_INTERRUPT) <= XCHAL_EXCM_LEVEL
+	XTENSA_INT_LEVEL(XCHAL_TIMER0_INTERRUPT) <= XCHAL_EXCM_LEVEL
 # define LINUX_TIMER     0
 # define LINUX_TIMER     0
 # define LINUX_TIMER_INT XCHAL_TIMER0_INTERRUPT
 # define LINUX_TIMER_INT XCHAL_TIMER0_INTERRUPT
 #elif XCHAL_NUM_TIMERS > 1 && \
 #elif XCHAL_NUM_TIMERS > 1 && \
-	INTLEVEL(XCHAL_TIMER1_INTERRUPT) <= XCHAL_EXCM_LEVEL
+	XTENSA_INT_LEVEL(XCHAL_TIMER1_INTERRUPT) <= XCHAL_EXCM_LEVEL
 # define LINUX_TIMER     1
 # define LINUX_TIMER     1
 # define LINUX_TIMER_INT XCHAL_TIMER1_INTERRUPT
 # define LINUX_TIMER_INT XCHAL_TIMER1_INTERRUPT
 #elif XCHAL_NUM_TIMERS > 2 && \
 #elif XCHAL_NUM_TIMERS > 2 && \
-	INTLEVEL(XCHAL_TIMER2_INTERRUPT) <= XCHAL_EXCM_LEVEL
+	XTENSA_INT_LEVEL(XCHAL_TIMER2_INTERRUPT) <= XCHAL_EXCM_LEVEL
 # define LINUX_TIMER     2
 # define LINUX_TIMER     2
 # define LINUX_TIMER_INT XCHAL_TIMER2_INTERRUPT
 # define LINUX_TIMER_INT XCHAL_TIMER2_INTERRUPT
 #else
 #else

+ 27 - 0
arch/xtensa/kernel/traps.c

@@ -205,6 +205,32 @@ extern void do_IRQ(int, struct pt_regs *);
 
 
 #if XTENSA_FAKE_NMI
 #if XTENSA_FAKE_NMI
 
 
+#define IS_POW2(v) (((v) & ((v) - 1)) == 0)
+
+#if !(PROFILING_INTLEVEL == XCHAL_EXCM_LEVEL && \
+      IS_POW2(XTENSA_INTLEVEL_MASK(PROFILING_INTLEVEL)))
+#warning "Fake NMI is requested for PMM, but there are other IRQs at or above its level."
+#warning "Fake NMI will be used, but there will be a bugcheck if one of those IRQs fire."
+
+static inline void check_valid_nmi(void)
+{
+	unsigned intread = get_sr(interrupt);
+	unsigned intenable = get_sr(intenable);
+
+	BUG_ON(intread & intenable &
+	       ~(XTENSA_INTLEVEL_ANDBELOW_MASK(PROFILING_INTLEVEL) ^
+		 XTENSA_INTLEVEL_MASK(PROFILING_INTLEVEL) ^
+		 BIT(XCHAL_PROFILING_INTERRUPT)));
+}
+
+#else
+
+static inline void check_valid_nmi(void)
+{
+}
+
+#endif
+
 irqreturn_t xtensa_pmu_irq_handler(int irq, void *dev_id);
 irqreturn_t xtensa_pmu_irq_handler(int irq, void *dev_id);
 
 
 DEFINE_PER_CPU(unsigned long, nmi_count);
 DEFINE_PER_CPU(unsigned long, nmi_count);
@@ -219,6 +245,7 @@ void do_nmi(struct pt_regs *regs)
 	old_regs = set_irq_regs(regs);
 	old_regs = set_irq_regs(regs);
 	nmi_enter();
 	nmi_enter();
 	++*this_cpu_ptr(&nmi_count);
 	++*this_cpu_ptr(&nmi_count);
+	check_valid_nmi();
 	xtensa_pmu_irq_handler(0, NULL);
 	xtensa_pmu_irq_handler(0, NULL);
 	nmi_exit();
 	nmi_exit();
 	set_irq_regs(old_regs);
 	set_irq_regs(old_regs);

+ 1 - 1
arch/xtensa/mm/Makefile

@@ -3,5 +3,5 @@
 #
 #
 
 
 obj-y			:= init.o misc.o
 obj-y			:= init.o misc.o
-obj-$(CONFIG_MMU)	+= cache.o fault.o mmu.o tlb.o
+obj-$(CONFIG_MMU)	+= cache.o fault.o ioremap.o mmu.o tlb.o
 obj-$(CONFIG_HIGHMEM)	+= highmem.o
 obj-$(CONFIG_HIGHMEM)	+= highmem.o

+ 68 - 0
arch/xtensa/mm/ioremap.c

@@ -0,0 +1,68 @@
+/*
+ * ioremap implementation.
+ *
+ * Copyright (C) 2015 Cadence Design Systems Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/io.h>
+#include <linux/vmalloc.h>
+#include <asm/cacheflush.h>
+#include <asm/io.h>
+#include <asm/pgtable.h>
+
+static void __iomem *xtensa_ioremap(unsigned long paddr, unsigned long size,
+				    pgprot_t prot)
+{
+	unsigned long offset = paddr & ~PAGE_MASK;
+	unsigned long pfn = __phys_to_pfn(paddr);
+	struct vm_struct *area;
+	unsigned long vaddr;
+	int err;
+
+	paddr &= PAGE_MASK;
+
+	WARN_ON(pfn_valid(pfn));
+
+	size = PAGE_ALIGN(offset + size);
+
+	area = get_vm_area(size, VM_IOREMAP);
+	if (!area)
+		return NULL;
+
+	vaddr = (unsigned long)area->addr;
+	area->phys_addr = paddr;
+
+	err = ioremap_page_range(vaddr, vaddr + size, paddr, prot);
+
+	if (err) {
+		vunmap((void *)vaddr);
+		return NULL;
+	}
+
+	flush_cache_vmap(vaddr, vaddr + size);
+	return (void __iomem *)(offset + vaddr);
+}
+
+void __iomem *xtensa_ioremap_nocache(unsigned long addr, unsigned long size)
+{
+	return xtensa_ioremap(addr, size, pgprot_noncached(PAGE_KERNEL));
+}
+EXPORT_SYMBOL(xtensa_ioremap_nocache);
+
+void __iomem *xtensa_ioremap_cache(unsigned long addr, unsigned long size)
+{
+	return xtensa_ioremap(addr, size, PAGE_KERNEL);
+}
+EXPORT_SYMBOL(xtensa_ioremap_cache);
+
+void xtensa_iounmap(volatile void __iomem *io_addr)
+{
+	void *addr = (void *)(PAGE_MASK & (unsigned long)io_addr);
+
+	vunmap(addr);
+}
+EXPORT_SYMBOL(xtensa_iounmap);