|
@@ -27,6 +27,20 @@ static inline int tag_compare(unsigned long tag, unsigned long vaddr)
|
|
return (tag == (vaddr >> 22));
|
|
return (tag == (vaddr >> 22));
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+static void flush_tsb_kernel_range_scan(unsigned long start, unsigned long end)
|
|
|
|
+{
|
|
|
|
+ unsigned long idx;
|
|
|
|
+
|
|
|
|
+ for (idx = 0; idx < KERNEL_TSB_NENTRIES; idx++) {
|
|
|
|
+ struct tsb *ent = &swapper_tsb[idx];
|
|
|
|
+ unsigned long match = idx << 13;
|
|
|
|
+
|
|
|
|
+ match |= (ent->tag << 22);
|
|
|
|
+ if (match >= start && match < end)
|
|
|
|
+ ent->tag = (1UL << TSB_TAG_INVALID_BIT);
|
|
|
|
+ }
|
|
|
|
+}
|
|
|
|
+
|
|
/* TSB flushes need only occur on the processor initiating the address
|
|
/* TSB flushes need only occur on the processor initiating the address
|
|
* space modification, not on each cpu the address space has run on.
|
|
* space modification, not on each cpu the address space has run on.
|
|
* Only the TLB flush needs that treatment.
|
|
* Only the TLB flush needs that treatment.
|
|
@@ -36,6 +50,9 @@ void flush_tsb_kernel_range(unsigned long start, unsigned long end)
|
|
{
|
|
{
|
|
unsigned long v;
|
|
unsigned long v;
|
|
|
|
|
|
|
|
+ if ((end - start) >> PAGE_SHIFT >= 2 * KERNEL_TSB_NENTRIES)
|
|
|
|
+ return flush_tsb_kernel_range_scan(start, end);
|
|
|
|
+
|
|
for (v = start; v < end; v += PAGE_SIZE) {
|
|
for (v = start; v < end; v += PAGE_SIZE) {
|
|
unsigned long hash = tsb_hash(v, PAGE_SHIFT,
|
|
unsigned long hash = tsb_hash(v, PAGE_SHIFT,
|
|
KERNEL_TSB_NENTRIES);
|
|
KERNEL_TSB_NENTRIES);
|