|
@@ -149,25 +149,28 @@ static inline void flush_tlb_page(struct vm_area_struct *vma,
|
|
* This is meant to avoid soft lock-ups on large TLB flushing ranges and not
|
|
* This is meant to avoid soft lock-ups on large TLB flushing ranges and not
|
|
* necessarily a performance improvement.
|
|
* necessarily a performance improvement.
|
|
*/
|
|
*/
|
|
-#define MAX_TLB_RANGE (1024UL << PAGE_SHIFT)
|
|
|
|
|
|
+#define MAX_TLBI_OPS 1024UL
|
|
|
|
|
|
static inline void __flush_tlb_range(struct vm_area_struct *vma,
|
|
static inline void __flush_tlb_range(struct vm_area_struct *vma,
|
|
unsigned long start, unsigned long end,
|
|
unsigned long start, unsigned long end,
|
|
- bool last_level)
|
|
|
|
|
|
+ unsigned long stride, bool last_level)
|
|
{
|
|
{
|
|
unsigned long asid = ASID(vma->vm_mm);
|
|
unsigned long asid = ASID(vma->vm_mm);
|
|
unsigned long addr;
|
|
unsigned long addr;
|
|
|
|
|
|
- if ((end - start) > MAX_TLB_RANGE) {
|
|
|
|
|
|
+ if ((end - start) > (MAX_TLBI_OPS * stride)) {
|
|
flush_tlb_mm(vma->vm_mm);
|
|
flush_tlb_mm(vma->vm_mm);
|
|
return;
|
|
return;
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+ /* Convert the stride into units of 4k */
|
|
|
|
+ stride >>= 12;
|
|
|
|
+
|
|
start = __TLBI_VADDR(start, asid);
|
|
start = __TLBI_VADDR(start, asid);
|
|
end = __TLBI_VADDR(end, asid);
|
|
end = __TLBI_VADDR(end, asid);
|
|
|
|
|
|
dsb(ishst);
|
|
dsb(ishst);
|
|
- for (addr = start; addr < end; addr += 1 << (PAGE_SHIFT - 12)) {
|
|
|
|
|
|
+ for (addr = start; addr < end; addr += stride) {
|
|
if (last_level) {
|
|
if (last_level) {
|
|
__tlbi(vale1is, addr);
|
|
__tlbi(vale1is, addr);
|
|
__tlbi_user(vale1is, addr);
|
|
__tlbi_user(vale1is, addr);
|
|
@@ -186,14 +189,14 @@ static inline void flush_tlb_range(struct vm_area_struct *vma,
|
|
* We cannot use leaf-only invalidation here, since we may be invalidating
|
|
* We cannot use leaf-only invalidation here, since we may be invalidating
|
|
* table entries as part of collapsing hugepages or moving page tables.
|
|
* table entries as part of collapsing hugepages or moving page tables.
|
|
*/
|
|
*/
|
|
- __flush_tlb_range(vma, start, end, false);
|
|
|
|
|
|
+ __flush_tlb_range(vma, start, end, PAGE_SIZE, false);
|
|
}
|
|
}
|
|
|
|
|
|
static inline void flush_tlb_kernel_range(unsigned long start, unsigned long end)
|
|
static inline void flush_tlb_kernel_range(unsigned long start, unsigned long end)
|
|
{
|
|
{
|
|
unsigned long addr;
|
|
unsigned long addr;
|
|
|
|
|
|
- if ((end - start) > MAX_TLB_RANGE) {
|
|
|
|
|
|
+ if ((end - start) > (MAX_TLBI_OPS * PAGE_SIZE)) {
|
|
flush_tlb_all();
|
|
flush_tlb_all();
|
|
return;
|
|
return;
|
|
}
|
|
}
|