|
@@ -39,6 +39,13 @@
|
|
|
*/
|
|
|
static DEFINE_SPINLOCK(npu_context_lock);
|
|
|
|
|
|
+/*
|
|
|
+ * When an address shootdown range exceeds this threshold we invalidate the
|
|
|
+ * entire TLB on the GPU for the given PID rather than each specific address in
|
|
|
+ * the range.
|
|
|
+ */
|
|
|
+#define ATSD_THRESHOLD (2*1024*1024)
|
|
|
+
|
|
|
/*
|
|
|
* Other types of TCE cache invalidation are not functional in the
|
|
|
* hardware.
|
|
@@ -677,11 +684,19 @@ static void pnv_npu2_mn_invalidate_range(struct mmu_notifier *mn,
|
|
|
struct npu_context *npu_context = mn_to_npu_context(mn);
|
|
|
unsigned long address;
|
|
|
|
|
|
- for (address = start; address < end; address += PAGE_SIZE)
|
|
|
- mmio_invalidate(npu_context, 1, address, false);
|
|
|
+ if (end - start > ATSD_THRESHOLD) {
|
|
|
+ /*
|
|
|
+ * Just invalidate the entire PID if the address range is too
|
|
|
+ * large.
|
|
|
+ */
|
|
|
+ mmio_invalidate(npu_context, 0, 0, true);
|
|
|
+ } else {
|
|
|
+ for (address = start; address < end; address += PAGE_SIZE)
|
|
|
+ mmio_invalidate(npu_context, 1, address, false);
|
|
|
|
|
|
- /* Do the flush only on the final addess == end */
|
|
|
- mmio_invalidate(npu_context, 1, address, true);
|
|
|
+ /* Do the flush only on the final addess == end */
|
|
|
+ mmio_invalidate(npu_context, 1, address, true);
|
|
|
+ }
|
|
|
}
|
|
|
|
|
|
static const struct mmu_notifier_ops nv_nmmu_notifier_ops = {
|