|
@@ -51,6 +51,7 @@
|
|
#include <acpi/actbl1.h>
|
|
#include <acpi/actbl1.h>
|
|
#include <acpi/ghes.h>
|
|
#include <acpi/ghes.h>
|
|
#include <acpi/apei.h>
|
|
#include <acpi/apei.h>
|
|
|
|
+#include <asm/fixmap.h>
|
|
#include <asm/tlbflush.h>
|
|
#include <asm/tlbflush.h>
|
|
#include <ras/ras_event.h>
|
|
#include <ras/ras_event.h>
|
|
|
|
|
|
@@ -112,7 +113,7 @@ static DEFINE_MUTEX(ghes_list_mutex);
|
|
* Because the memory area used to transfer hardware error information
|
|
* Because the memory area used to transfer hardware error information
|
|
* from BIOS to Linux can be determined only in NMI, IRQ or timer
|
|
* from BIOS to Linux can be determined only in NMI, IRQ or timer
|
|
* handler, but general ioremap can not be used in atomic context, so
|
|
* handler, but general ioremap can not be used in atomic context, so
|
|
- * a special version of atomic ioremap is implemented for that.
|
|
|
|
|
|
+ * the fixmap is used instead.
|
|
*/
|
|
*/
|
|
|
|
|
|
/*
|
|
/*
|
|
@@ -126,8 +127,8 @@ static DEFINE_MUTEX(ghes_list_mutex);
|
|
/* virtual memory area for atomic ioremap */
|
|
/* virtual memory area for atomic ioremap */
|
|
static struct vm_struct *ghes_ioremap_area;
|
|
static struct vm_struct *ghes_ioremap_area;
|
|
/*
|
|
/*
|
|
- * These 2 spinlock is used to prevent atomic ioremap virtual memory
|
|
|
|
- * area from being mapped simultaneously.
|
|
|
|
|
|
+ * These 2 spinlocks are used to prevent the fixmap entries from being used
|
|
|
|
+ * simultaneously.
|
|
*/
|
|
*/
|
|
static DEFINE_RAW_SPINLOCK(ghes_ioremap_lock_nmi);
|
|
static DEFINE_RAW_SPINLOCK(ghes_ioremap_lock_nmi);
|
|
static DEFINE_SPINLOCK(ghes_ioremap_lock_irq);
|
|
static DEFINE_SPINLOCK(ghes_ioremap_lock_irq);
|
|
@@ -159,53 +160,36 @@ static void ghes_ioremap_exit(void)
|
|
|
|
|
|
static void __iomem *ghes_ioremap_pfn_nmi(u64 pfn)
|
|
static void __iomem *ghes_ioremap_pfn_nmi(u64 pfn)
|
|
{
|
|
{
|
|
- unsigned long vaddr;
|
|
|
|
phys_addr_t paddr;
|
|
phys_addr_t paddr;
|
|
pgprot_t prot;
|
|
pgprot_t prot;
|
|
|
|
|
|
- vaddr = (unsigned long)GHES_IOREMAP_NMI_PAGE(ghes_ioremap_area->addr);
|
|
|
|
-
|
|
|
|
paddr = pfn << PAGE_SHIFT;
|
|
paddr = pfn << PAGE_SHIFT;
|
|
prot = arch_apei_get_mem_attribute(paddr);
|
|
prot = arch_apei_get_mem_attribute(paddr);
|
|
- ioremap_page_range(vaddr, vaddr + PAGE_SIZE, paddr, prot);
|
|
|
|
|
|
+ __set_fixmap(FIX_APEI_GHES_NMI, paddr, prot);
|
|
|
|
|
|
- return (void __iomem *)vaddr;
|
|
|
|
|
|
+ return (void __iomem *) fix_to_virt(FIX_APEI_GHES_NMI);
|
|
}
|
|
}
|
|
|
|
|
|
static void __iomem *ghes_ioremap_pfn_irq(u64 pfn)
|
|
static void __iomem *ghes_ioremap_pfn_irq(u64 pfn)
|
|
{
|
|
{
|
|
- unsigned long vaddr;
|
|
|
|
phys_addr_t paddr;
|
|
phys_addr_t paddr;
|
|
pgprot_t prot;
|
|
pgprot_t prot;
|
|
|
|
|
|
- vaddr = (unsigned long)GHES_IOREMAP_IRQ_PAGE(ghes_ioremap_area->addr);
|
|
|
|
-
|
|
|
|
paddr = pfn << PAGE_SHIFT;
|
|
paddr = pfn << PAGE_SHIFT;
|
|
prot = arch_apei_get_mem_attribute(paddr);
|
|
prot = arch_apei_get_mem_attribute(paddr);
|
|
|
|
+ __set_fixmap(FIX_APEI_GHES_IRQ, paddr, prot);
|
|
|
|
|
|
- ioremap_page_range(vaddr, vaddr + PAGE_SIZE, paddr, prot);
|
|
|
|
-
|
|
|
|
- return (void __iomem *)vaddr;
|
|
|
|
|
|
+ return (void __iomem *) fix_to_virt(FIX_APEI_GHES_IRQ);
|
|
}
|
|
}
|
|
|
|
|
|
-static void ghes_iounmap_nmi(void __iomem *vaddr_ptr)
|
|
|
|
|
|
+static void ghes_iounmap_nmi(void)
|
|
{
|
|
{
|
|
- unsigned long vaddr = (unsigned long __force)vaddr_ptr;
|
|
|
|
- void *base = ghes_ioremap_area->addr;
|
|
|
|
-
|
|
|
|
- BUG_ON(vaddr != (unsigned long)GHES_IOREMAP_NMI_PAGE(base));
|
|
|
|
- unmap_kernel_range_noflush(vaddr, PAGE_SIZE);
|
|
|
|
- arch_apei_flush_tlb_one(vaddr);
|
|
|
|
|
|
+ clear_fixmap(FIX_APEI_GHES_NMI);
|
|
}
|
|
}
|
|
|
|
|
|
-static void ghes_iounmap_irq(void __iomem *vaddr_ptr)
|
|
|
|
|
|
+static void ghes_iounmap_irq(void)
|
|
{
|
|
{
|
|
- unsigned long vaddr = (unsigned long __force)vaddr_ptr;
|
|
|
|
- void *base = ghes_ioremap_area->addr;
|
|
|
|
-
|
|
|
|
- BUG_ON(vaddr != (unsigned long)GHES_IOREMAP_IRQ_PAGE(base));
|
|
|
|
- unmap_kernel_range_noflush(vaddr, PAGE_SIZE);
|
|
|
|
- arch_apei_flush_tlb_one(vaddr);
|
|
|
|
|
|
+ clear_fixmap(FIX_APEI_GHES_IRQ);
|
|
}
|
|
}
|
|
|
|
|
|
static int ghes_estatus_pool_init(void)
|
|
static int ghes_estatus_pool_init(void)
|
|
@@ -361,10 +345,10 @@ static void ghes_copy_tofrom_phys(void *buffer, u64 paddr, u32 len,
|
|
paddr += trunk;
|
|
paddr += trunk;
|
|
buffer += trunk;
|
|
buffer += trunk;
|
|
if (in_nmi) {
|
|
if (in_nmi) {
|
|
- ghes_iounmap_nmi(vaddr);
|
|
|
|
|
|
+ ghes_iounmap_nmi();
|
|
raw_spin_unlock(&ghes_ioremap_lock_nmi);
|
|
raw_spin_unlock(&ghes_ioremap_lock_nmi);
|
|
} else {
|
|
} else {
|
|
- ghes_iounmap_irq(vaddr);
|
|
|
|
|
|
+ ghes_iounmap_irq();
|
|
spin_unlock_irqrestore(&ghes_ioremap_lock_irq, flags);
|
|
spin_unlock_irqrestore(&ghes_ioremap_lock_irq, flags);
|
|
}
|
|
}
|
|
}
|
|
}
|