Browse Source

Merge remote-tracking branch 'airlied/drm-next' into drm-misc-next-fixes

Backmerge drm-next with rc7
Sean Paul 8 years ago
parent
commit
c048c984de
100 changed files with 17028 additions and 2067 deletions
  1. 4 3
      Documentation/devicetree/bindings/clock/sunxi-ccu.txt
  2. 3 3
      Documentation/devicetree/bindings/gpio/gpio-mvebu.txt
  3. 1 1
      Documentation/devicetree/bindings/mfd/stm32-timers.txt
  4. 1 1
      Documentation/devicetree/bindings/net/dsa/b53.txt
  5. 1 0
      Documentation/devicetree/bindings/net/smsc911x.txt
  6. 2 2
      Makefile
  7. 3 2
      arch/arm64/kernel/vdso.c
  8. 0 1
      arch/arm64/kernel/vdso/gettimeofday.S
  9. 5 1
      arch/mips/kvm/tlb.c
  10. 1 0
      arch/powerpc/include/asm/kprobes.h
  11. 7 4
      arch/powerpc/kernel/exceptions-64s.S
  12. 17 0
      arch/powerpc/kernel/kprobes.c
  13. 28 3
      arch/powerpc/kernel/setup_64.c
  14. 46 13
      arch/powerpc/kernel/trace/ftrace_64_mprofile.S
  15. 51 0
      arch/powerpc/kvm/book3s_hv.c
  16. 11 1
      arch/powerpc/kvm/book3s_hv_interrupts.S
  17. 56 19
      arch/powerpc/kvm/book3s_hv_rmhandlers.S
  18. 2 1
      arch/powerpc/perf/perf_regs.c
  19. 65 29
      arch/powerpc/platforms/powernv/npu-dma.c
  20. 6 9
      arch/s390/kvm/gaccess.c
  21. 2 2
      arch/x86/events/intel/core.c
  22. 1 0
      arch/x86/include/asm/kvm_emulate.h
  23. 1 2
      arch/x86/include/asm/mshyperv.h
  24. 1 0
      arch/x86/kvm/emulate.c
  25. 32 30
      arch/x86/kvm/x86.c
  26. 46 12
      block/blk-mq-sched.c
  27. 0 9
      block/blk-mq-sched.h
  28. 13 3
      block/blk-mq.c
  29. 37 30
      drivers/acpi/scan.c
  30. 12 14
      drivers/block/xen-blkback/blkback.c
  31. 6 20
      drivers/block/xen-blkback/common.h
  32. 8 7
      drivers/block/xen-blkback/xenbus.c
  33. 0 1
      drivers/bus/Kconfig
  34. 6 6
      drivers/char/random.c
  35. 1 0
      drivers/clk/meson/Kconfig
  36. 1 0
      drivers/clk/sunxi-ng/Kconfig
  37. 3 1
      drivers/clk/sunxi-ng/ccu-sun50i-a64.h
  38. 1 1
      drivers/clk/sunxi-ng/ccu-sun5i.c
  39. 1 1
      drivers/clk/sunxi-ng/ccu-sun6i-a31.c
  40. 3 1
      drivers/clk/sunxi-ng/ccu-sun8i-h3.h
  41. 1 1
      drivers/clk/sunxi-ng/ccu-sun8i-v3s.c
  42. 2 2
      drivers/clocksource/arm_arch_timer.c
  43. 1 0
      drivers/clocksource/cadence_ttc_timer.c
  44. 1 0
      drivers/clocksource/timer-sun5i.c
  45. 3 3
      drivers/gpio/gpio-mvebu.c
  46. 4 0
      drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
  47. 1 0
      drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
  48. 2 2
      drivers/gpu/drm/amd/amdgpu/atombios_crtc.c
  49. 2 9
      drivers/gpu/drm/arm/hdlcd_crtc.c
  50. 9 0
      drivers/gpu/drm/arm/hdlcd_drv.c
  51. 20 18
      drivers/gpu/drm/drm_connector.c
  52. 10 1
      drivers/gpu/drm/i915/Makefile
  53. 4 6
      drivers/gpu/drm/i915/dvo_ch7xxx.c
  54. 1 1
      drivers/gpu/drm/i915/gvt/Makefile
  55. 3 53
      drivers/gpu/drm/i915/gvt/cmd_parser.c
  56. 23 33
      drivers/gpu/drm/i915/gvt/execlist.c
  57. 2 7
      drivers/gpu/drm/i915/gvt/firmware.c
  58. 9 6
      drivers/gpu/drm/i915/gvt/gtt.c
  59. 4 2
      drivers/gpu/drm/i915/gvt/gvt.c
  60. 97 3
      drivers/gpu/drm/i915/gvt/gvt.h
  61. 177 126
      drivers/gpu/drm/i915/gvt/handlers.c
  62. 10 10
      drivers/gpu/drm/i915/gvt/interrupt.c
  63. 25 96
      drivers/gpu/drm/i915/gvt/mmio.c
  64. 16 28
      drivers/gpu/drm/i915/gvt/mmio.h
  65. 1 2
      drivers/gpu/drm/i915/gvt/mpt.h
  66. 40 8
      drivers/gpu/drm/i915/gvt/render.c
  67. 2 2
      drivers/gpu/drm/i915/gvt/render.h
  68. 22 5
      drivers/gpu/drm/i915/gvt/sched_policy.c
  69. 32 7
      drivers/gpu/drm/i915/gvt/scheduler.c
  70. 4 0
      drivers/gpu/drm/i915/gvt/scheduler.h
  71. 129 49
      drivers/gpu/drm/i915/gvt/trace.h
  72. 7 2
      drivers/gpu/drm/i915/gvt/vgpu.c
  73. 41 27
      drivers/gpu/drm/i915/i915_debugfs.c
  74. 41 5
      drivers/gpu/drm/i915/i915_drv.c
  75. 76 30
      drivers/gpu/drm/i915/i915_drv.h
  76. 102 62
      drivers/gpu/drm/i915/i915_gem.c
  77. 17 2
      drivers/gpu/drm/i915/i915_gem_batch_pool.c
  78. 8 9
      drivers/gpu/drm/i915/i915_gem_clflush.c
  79. 82 4
      drivers/gpu/drm/i915/i915_gem_context.c
  80. 26 0
      drivers/gpu/drm/i915/i915_gem_context.h
  81. 65 54
      drivers/gpu/drm/i915/i915_gem_evict.c
  82. 1631 1123
      drivers/gpu/drm/i915/i915_gem_execbuffer.c
  83. 13 3
      drivers/gpu/drm/i915/i915_gem_gtt.c
  84. 2 0
      drivers/gpu/drm/i915/i915_gem_gtt.h
  85. 3 1
      drivers/gpu/drm/i915/i915_gem_internal.c
  86. 18 4
      drivers/gpu/drm/i915/i915_gem_object.h
  87. 7 13
      drivers/gpu/drm/i915/i915_gem_request.c
  88. 21 7
      drivers/gpu/drm/i915/i915_gem_shrinker.c
  89. 1 0
      drivers/gpu/drm/i915/i915_gem_stolen.c
  90. 19 5
      drivers/gpu/drm/i915/i915_gem_userptr.c
  91. 1 1
      drivers/gpu/drm/i915/i915_guc_submission.c
  92. 4 2
      drivers/gpu/drm/i915/i915_irq.c
  93. 5376 0
      drivers/gpu/drm/i915/i915_oa_bdw.c
  94. 40 0
      drivers/gpu/drm/i915/i915_oa_bdw.h
  95. 2690 0
      drivers/gpu/drm/i915/i915_oa_bxt.c
  96. 40 0
      drivers/gpu/drm/i915/i915_oa_bxt.h
  97. 2873 0
      drivers/gpu/drm/i915/i915_oa_chv.c
  98. 40 0
      drivers/gpu/drm/i915/i915_oa_chv.h
  99. 2602 0
      drivers/gpu/drm/i915/i915_oa_glk.c
  100. 40 0
      drivers/gpu/drm/i915/i915_oa_glk.h

+ 4 - 3
Documentation/devicetree/bindings/clock/sunxi-ccu.txt

@@ -22,7 +22,8 @@ Required properties :
 - #clock-cells : must contain 1
 - #clock-cells : must contain 1
 - #reset-cells : must contain 1
 - #reset-cells : must contain 1
 
 
-For the PRCM CCUs on H3/A64, one more clock is needed:
+For the PRCM CCUs on H3/A64, two more clocks are needed:
+- "pll-periph": the SoC's peripheral PLL from the main CCU
 - "iosc": the SoC's internal frequency oscillator
 - "iosc": the SoC's internal frequency oscillator
 
 
 Example for generic CCU:
 Example for generic CCU:
@@ -39,8 +40,8 @@ Example for PRCM CCU:
 r_ccu: clock@01f01400 {
 r_ccu: clock@01f01400 {
 	compatible = "allwinner,sun50i-a64-r-ccu";
 	compatible = "allwinner,sun50i-a64-r-ccu";
 	reg = <0x01f01400 0x100>;
 	reg = <0x01f01400 0x100>;
-	clocks = <&osc24M>, <&osc32k>, <&iosc>;
-	clock-names = "hosc", "losc", "iosc";
+	clocks = <&osc24M>, <&osc32k>, <&iosc>, <&ccu CLK_PLL_PERIPH0>;
+	clock-names = "hosc", "losc", "iosc", "pll-periph";
 	#clock-cells = <1>;
 	#clock-cells = <1>;
 	#reset-cells = <1>;
 	#reset-cells = <1>;
 };
 };

+ 3 - 3
Documentation/devicetree/bindings/gpio/gpio-mvebu.txt

@@ -41,9 +41,9 @@ Required properties:
 Optional properties:
 Optional properties:
 
 
 In order to use the GPIO lines in PWM mode, some additional optional
 In order to use the GPIO lines in PWM mode, some additional optional
-properties are required. Only Armada 370 and XP support these properties.
+properties are required.
 
 
-- compatible: Must contain "marvell,armada-370-xp-gpio"
+- compatible: Must contain "marvell,armada-370-gpio"
 
 
 - reg: an additional register set is needed, for the GPIO Blink
 - reg: an additional register set is needed, for the GPIO Blink
   Counter on/off registers.
   Counter on/off registers.
@@ -71,7 +71,7 @@ Example:
 		};
 		};
 
 
 		gpio1: gpio@18140 {
 		gpio1: gpio@18140 {
-			compatible = "marvell,armada-370-xp-gpio";
+			compatible = "marvell,armada-370-gpio";
 			reg = <0x18140 0x40>, <0x181c8 0x08>;
 			reg = <0x18140 0x40>, <0x181c8 0x08>;
 			reg-names = "gpio", "pwm";
 			reg-names = "gpio", "pwm";
 			ngpios = <17>;
 			ngpios = <17>;

+ 1 - 1
Documentation/devicetree/bindings/mfd/stm32-timers.txt

@@ -31,7 +31,7 @@ Example:
 		compatible = "st,stm32-timers";
 		compatible = "st,stm32-timers";
 		reg = <0x40010000 0x400>;
 		reg = <0x40010000 0x400>;
 		clocks = <&rcc 0 160>;
 		clocks = <&rcc 0 160>;
-		clock-names = "clk_int";
+		clock-names = "int";
 
 
 		pwm {
 		pwm {
 			compatible = "st,stm32-pwm";
 			compatible = "st,stm32-pwm";

+ 1 - 1
Documentation/devicetree/bindings/net/dsa/b53.txt

@@ -34,7 +34,7 @@ Required properties:
       "brcm,bcm6328-switch"
       "brcm,bcm6328-switch"
       "brcm,bcm6368-switch" and the mandatory "brcm,bcm63xx-switch"
       "brcm,bcm6368-switch" and the mandatory "brcm,bcm63xx-switch"
 
 
-See Documentation/devicetree/bindings/dsa/dsa.txt for a list of additional
+See Documentation/devicetree/bindings/net/dsa/dsa.txt for a list of additional
 required and optional properties.
 required and optional properties.
 
 
 Examples:
 Examples:

+ 1 - 0
Documentation/devicetree/bindings/net/smsc911x.txt

@@ -27,6 +27,7 @@ Optional properties:
   of the device. On many systems this is wired high so the device goes
   of the device. On many systems this is wired high so the device goes
   out of reset at power-on, but if it is under program control, this
   out of reset at power-on, but if it is under program control, this
   optional GPIO can wake up in response to it.
   optional GPIO can wake up in response to it.
+- vdd33a-supply, vddvario-supply : 3.3V analog and IO logic power supplies
 
 
 Examples:
 Examples:
 
 

+ 2 - 2
Makefile

@@ -1,7 +1,7 @@
 VERSION = 4
 VERSION = 4
 PATCHLEVEL = 12
 PATCHLEVEL = 12
 SUBLEVEL = 0
 SUBLEVEL = 0
-EXTRAVERSION = -rc6
+EXTRAVERSION = -rc7
 NAME = Fearless Coyote
 NAME = Fearless Coyote
 
 
 # *DOCUMENTATION*
 # *DOCUMENTATION*
@@ -1437,7 +1437,7 @@ help:
 	@echo  '  make V=0|1 [targets] 0 => quiet build (default), 1 => verbose build'
 	@echo  '  make V=0|1 [targets] 0 => quiet build (default), 1 => verbose build'
 	@echo  '  make V=2   [targets] 2 => give reason for rebuild of target'
 	@echo  '  make V=2   [targets] 2 => give reason for rebuild of target'
 	@echo  '  make O=dir [targets] Locate all output files in "dir", including .config'
 	@echo  '  make O=dir [targets] Locate all output files in "dir", including .config'
-	@echo  '  make C=1   [targets] Check all c source with $$CHECK (sparse by default)'
+	@echo  '  make C=1   [targets] Check re-compiled c source with $$CHECK (sparse by default)'
 	@echo  '  make C=2   [targets] Force check of all c source with $$CHECK'
 	@echo  '  make C=2   [targets] Force check of all c source with $$CHECK'
 	@echo  '  make RECORDMCOUNT_WARN=1 [targets] Warn about ignored mcount sections'
 	@echo  '  make RECORDMCOUNT_WARN=1 [targets] Warn about ignored mcount sections'
 	@echo  '  make W=n   [targets] Enable extra gcc checks, n=1,2,3 where'
 	@echo  '  make W=n   [targets] Enable extra gcc checks, n=1,2,3 where'

+ 3 - 2
arch/arm64/kernel/vdso.c

@@ -221,10 +221,11 @@ void update_vsyscall(struct timekeeper *tk)
 		/* tkr_mono.cycle_last == tkr_raw.cycle_last */
 		/* tkr_mono.cycle_last == tkr_raw.cycle_last */
 		vdso_data->cs_cycle_last	= tk->tkr_mono.cycle_last;
 		vdso_data->cs_cycle_last	= tk->tkr_mono.cycle_last;
 		vdso_data->raw_time_sec		= tk->raw_time.tv_sec;
 		vdso_data->raw_time_sec		= tk->raw_time.tv_sec;
-		vdso_data->raw_time_nsec	= tk->raw_time.tv_nsec;
+		vdso_data->raw_time_nsec	= (tk->raw_time.tv_nsec <<
+						   tk->tkr_raw.shift) +
+						  tk->tkr_raw.xtime_nsec;
 		vdso_data->xtime_clock_sec	= tk->xtime_sec;
 		vdso_data->xtime_clock_sec	= tk->xtime_sec;
 		vdso_data->xtime_clock_nsec	= tk->tkr_mono.xtime_nsec;
 		vdso_data->xtime_clock_nsec	= tk->tkr_mono.xtime_nsec;
-		/* tkr_raw.xtime_nsec == 0 */
 		vdso_data->cs_mono_mult		= tk->tkr_mono.mult;
 		vdso_data->cs_mono_mult		= tk->tkr_mono.mult;
 		vdso_data->cs_raw_mult		= tk->tkr_raw.mult;
 		vdso_data->cs_raw_mult		= tk->tkr_raw.mult;
 		/* tkr_mono.shift == tkr_raw.shift */
 		/* tkr_mono.shift == tkr_raw.shift */

+ 0 - 1
arch/arm64/kernel/vdso/gettimeofday.S

@@ -256,7 +256,6 @@ monotonic_raw:
 	seqcnt_check fail=monotonic_raw
 	seqcnt_check fail=monotonic_raw
 
 
 	/* All computations are done with left-shifted nsecs. */
 	/* All computations are done with left-shifted nsecs. */
-	lsl	x14, x14, x12
 	get_nsec_per_sec res=x9
 	get_nsec_per_sec res=x9
 	lsl	x9, x9, x12
 	lsl	x9, x9, x12
 
 

+ 5 - 1
arch/mips/kvm/tlb.c

@@ -166,7 +166,11 @@ static int _kvm_mips_host_tlb_inv(unsigned long entryhi)
 int kvm_mips_host_tlb_inv(struct kvm_vcpu *vcpu, unsigned long va,
 int kvm_mips_host_tlb_inv(struct kvm_vcpu *vcpu, unsigned long va,
 			  bool user, bool kernel)
 			  bool user, bool kernel)
 {
 {
-	int idx_user, idx_kernel;
+	/*
+	 * Initialize idx_user and idx_kernel to workaround bogus
+	 * maybe-initialized warning when using GCC 6.
+	 */
+	int idx_user = 0, idx_kernel = 0;
 	unsigned long flags, old_entryhi;
 	unsigned long flags, old_entryhi;
 
 
 	local_irq_save(flags);
 	local_irq_save(flags);

+ 1 - 0
arch/powerpc/include/asm/kprobes.h

@@ -103,6 +103,7 @@ extern int kprobe_exceptions_notify(struct notifier_block *self,
 extern int kprobe_fault_handler(struct pt_regs *regs, int trapnr);
 extern int kprobe_fault_handler(struct pt_regs *regs, int trapnr);
 extern int kprobe_handler(struct pt_regs *regs);
 extern int kprobe_handler(struct pt_regs *regs);
 extern int kprobe_post_handler(struct pt_regs *regs);
 extern int kprobe_post_handler(struct pt_regs *regs);
+extern int is_current_kprobe_addr(unsigned long addr);
 #ifdef CONFIG_KPROBES_ON_FTRACE
 #ifdef CONFIG_KPROBES_ON_FTRACE
 extern int skip_singlestep(struct kprobe *p, struct pt_regs *regs,
 extern int skip_singlestep(struct kprobe *p, struct pt_regs *regs,
 			   struct kprobe_ctlblk *kcb);
 			   struct kprobe_ctlblk *kcb);

+ 7 - 4
arch/powerpc/kernel/exceptions-64s.S

@@ -1411,10 +1411,8 @@ USE_TEXT_SECTION()
 	.balign	IFETCH_ALIGN_BYTES
 	.balign	IFETCH_ALIGN_BYTES
 do_hash_page:
 do_hash_page:
 #ifdef CONFIG_PPC_STD_MMU_64
 #ifdef CONFIG_PPC_STD_MMU_64
-	andis.	r0,r4,0xa410		/* weird error? */
+	andis.	r0,r4,0xa450		/* weird error? */
 	bne-	handle_page_fault	/* if not, try to insert a HPTE */
 	bne-	handle_page_fault	/* if not, try to insert a HPTE */
-	andis.  r0,r4,DSISR_DABRMATCH@h
-	bne-    handle_dabr_fault
 	CURRENT_THREAD_INFO(r11, r1)
 	CURRENT_THREAD_INFO(r11, r1)
 	lwz	r0,TI_PREEMPT(r11)	/* If we're in an "NMI" */
 	lwz	r0,TI_PREEMPT(r11)	/* If we're in an "NMI" */
 	andis.	r0,r0,NMI_MASK@h	/* (i.e. an irq when soft-disabled) */
 	andis.	r0,r0,NMI_MASK@h	/* (i.e. an irq when soft-disabled) */
@@ -1438,11 +1436,16 @@ do_hash_page:
 
 
 	/* Error */
 	/* Error */
 	blt-	13f
 	blt-	13f
+
+	/* Reload DSISR into r4 for the DABR check below */
+	ld      r4,_DSISR(r1)
 #endif /* CONFIG_PPC_STD_MMU_64 */
 #endif /* CONFIG_PPC_STD_MMU_64 */
 
 
 /* Here we have a page fault that hash_page can't handle. */
 /* Here we have a page fault that hash_page can't handle. */
 handle_page_fault:
 handle_page_fault:
-11:	ld	r4,_DAR(r1)
+11:	andis.  r0,r4,DSISR_DABRMATCH@h
+	bne-    handle_dabr_fault
+	ld	r4,_DAR(r1)
 	ld	r5,_DSISR(r1)
 	ld	r5,_DSISR(r1)
 	addi	r3,r1,STACK_FRAME_OVERHEAD
 	addi	r3,r1,STACK_FRAME_OVERHEAD
 	bl	do_page_fault
 	bl	do_page_fault

+ 17 - 0
arch/powerpc/kernel/kprobes.c

@@ -43,6 +43,12 @@ DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
 
 
 struct kretprobe_blackpoint kretprobe_blacklist[] = {{NULL, NULL}};
 struct kretprobe_blackpoint kretprobe_blacklist[] = {{NULL, NULL}};
 
 
+int is_current_kprobe_addr(unsigned long addr)
+{
+	struct kprobe *p = kprobe_running();
+	return (p && (unsigned long)p->addr == addr) ? 1 : 0;
+}
+
 bool arch_within_kprobe_blacklist(unsigned long addr)
 bool arch_within_kprobe_blacklist(unsigned long addr)
 {
 {
 	return  (addr >= (unsigned long)__kprobes_text_start &&
 	return  (addr >= (unsigned long)__kprobes_text_start &&
@@ -617,6 +623,15 @@ int setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
 	regs->gpr[2] = (unsigned long)(((func_descr_t *)jp->entry)->toc);
 	regs->gpr[2] = (unsigned long)(((func_descr_t *)jp->entry)->toc);
 #endif
 #endif
 
 
+	/*
+	 * jprobes use jprobe_return() which skips the normal return
+	 * path of the function, and this messes up the accounting of the
+	 * function graph tracer.
+	 *
+	 * Pause function graph tracing while performing the jprobe function.
+	 */
+	pause_graph_tracing();
+
 	return 1;
 	return 1;
 }
 }
 NOKPROBE_SYMBOL(setjmp_pre_handler);
 NOKPROBE_SYMBOL(setjmp_pre_handler);
@@ -642,6 +657,8 @@ int longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
 	 * saved regs...
 	 * saved regs...
 	 */
 	 */
 	memcpy(regs, &kcb->jprobe_saved_regs, sizeof(struct pt_regs));
 	memcpy(regs, &kcb->jprobe_saved_regs, sizeof(struct pt_regs));
+	/* It's OK to start function graph tracing again */
+	unpause_graph_tracing();
 	preempt_enable_no_resched();
 	preempt_enable_no_resched();
 	return 1;
 	return 1;
 }
 }

+ 28 - 3
arch/powerpc/kernel/setup_64.c

@@ -615,6 +615,24 @@ void __init exc_lvl_early_init(void)
 }
 }
 #endif
 #endif
 
 
+/*
+ * Emergency stacks are used for a range of things, from asynchronous
+ * NMIs (system reset, machine check) to synchronous, process context.
+ * We set preempt_count to zero, even though that isn't necessarily correct. To
+ * get the right value we'd need to copy it from the previous thread_info, but
+ * doing that might fault causing more problems.
+ * TODO: what to do with accounting?
+ */
+static void emerg_stack_init_thread_info(struct thread_info *ti, int cpu)
+{
+	ti->task = NULL;
+	ti->cpu = cpu;
+	ti->preempt_count = 0;
+	ti->local_flags = 0;
+	ti->flags = 0;
+	klp_init_thread_info(ti);
+}
+
 /*
 /*
  * Stack space used when we detect a bad kernel stack pointer, and
  * Stack space used when we detect a bad kernel stack pointer, and
  * early in SMP boots before relocation is enabled. Exclusive emergency
  * early in SMP boots before relocation is enabled. Exclusive emergency
@@ -633,24 +651,31 @@ void __init emergency_stack_init(void)
 	 * Since we use these as temporary stacks during secondary CPU
 	 * Since we use these as temporary stacks during secondary CPU
 	 * bringup, we need to get at them in real mode. This means they
 	 * bringup, we need to get at them in real mode. This means they
 	 * must also be within the RMO region.
 	 * must also be within the RMO region.
+	 *
+	 * The IRQ stacks allocated elsewhere in this file are zeroed and
+	 * initialized in kernel/irq.c. These are initialized here in order
+	 * to have emergency stacks available as early as possible.
 	 */
 	 */
 	limit = min(safe_stack_limit(), ppc64_rma_size);
 	limit = min(safe_stack_limit(), ppc64_rma_size);
 
 
 	for_each_possible_cpu(i) {
 	for_each_possible_cpu(i) {
 		struct thread_info *ti;
 		struct thread_info *ti;
 		ti = __va(memblock_alloc_base(THREAD_SIZE, THREAD_SIZE, limit));
 		ti = __va(memblock_alloc_base(THREAD_SIZE, THREAD_SIZE, limit));
-		klp_init_thread_info(ti);
+		memset(ti, 0, THREAD_SIZE);
+		emerg_stack_init_thread_info(ti, i);
 		paca[i].emergency_sp = (void *)ti + THREAD_SIZE;
 		paca[i].emergency_sp = (void *)ti + THREAD_SIZE;
 
 
 #ifdef CONFIG_PPC_BOOK3S_64
 #ifdef CONFIG_PPC_BOOK3S_64
 		/* emergency stack for NMI exception handling. */
 		/* emergency stack for NMI exception handling. */
 		ti = __va(memblock_alloc_base(THREAD_SIZE, THREAD_SIZE, limit));
 		ti = __va(memblock_alloc_base(THREAD_SIZE, THREAD_SIZE, limit));
-		klp_init_thread_info(ti);
+		memset(ti, 0, THREAD_SIZE);
+		emerg_stack_init_thread_info(ti, i);
 		paca[i].nmi_emergency_sp = (void *)ti + THREAD_SIZE;
 		paca[i].nmi_emergency_sp = (void *)ti + THREAD_SIZE;
 
 
 		/* emergency stack for machine check exception handling. */
 		/* emergency stack for machine check exception handling. */
 		ti = __va(memblock_alloc_base(THREAD_SIZE, THREAD_SIZE, limit));
 		ti = __va(memblock_alloc_base(THREAD_SIZE, THREAD_SIZE, limit));
-		klp_init_thread_info(ti);
+		memset(ti, 0, THREAD_SIZE);
+		emerg_stack_init_thread_info(ti, i);
 		paca[i].mc_emergency_sp = (void *)ti + THREAD_SIZE;
 		paca[i].mc_emergency_sp = (void *)ti + THREAD_SIZE;
 #endif
 #endif
 	}
 	}

+ 46 - 13
arch/powerpc/kernel/trace/ftrace_64_mprofile.S

@@ -45,10 +45,14 @@ _GLOBAL(ftrace_caller)
 	stdu	r1,-SWITCH_FRAME_SIZE(r1)
 	stdu	r1,-SWITCH_FRAME_SIZE(r1)
 
 
 	/* Save all gprs to pt_regs */
 	/* Save all gprs to pt_regs */
-	SAVE_8GPRS(0,r1)
-	SAVE_8GPRS(8,r1)
-	SAVE_8GPRS(16,r1)
-	SAVE_8GPRS(24,r1)
+	SAVE_GPR(0, r1)
+	SAVE_10GPRS(2, r1)
+	SAVE_10GPRS(12, r1)
+	SAVE_10GPRS(22, r1)
+
+	/* Save previous stack pointer (r1) */
+	addi	r8, r1, SWITCH_FRAME_SIZE
+	std	r8, GPR1(r1)
 
 
 	/* Load special regs for save below */
 	/* Load special regs for save below */
 	mfmsr   r8
 	mfmsr   r8
@@ -95,18 +99,44 @@ ftrace_call:
 	bl	ftrace_stub
 	bl	ftrace_stub
 	nop
 	nop
 
 
-	/* Load ctr with the possibly modified NIP */
-	ld	r3, _NIP(r1)
-	mtctr	r3
+	/* Load the possibly modified NIP */
+	ld	r15, _NIP(r1)
+
 #ifdef CONFIG_LIVEPATCH
 #ifdef CONFIG_LIVEPATCH
-	cmpd	r14,r3		/* has NIP been altered? */
+	cmpd	r14, r15	/* has NIP been altered? */
+#endif
+
+#if defined(CONFIG_LIVEPATCH) && defined(CONFIG_KPROBES_ON_FTRACE)
+	/* NIP has not been altered, skip over further checks */
+	beq	1f
+
+	/* Check if there is an active kprobe on us */
+	subi	r3, r14, 4
+	bl	is_current_kprobe_addr
+	nop
+
+	/*
+	 * If r3 == 1, then this is a kprobe/jprobe.
+	 * else, this is livepatched function.
+	 *
+	 * The conditional branch for livepatch_handler below will use the
+	 * result of this comparison. For kprobe/jprobe, we just need to branch to
+	 * the new NIP, not call livepatch_handler. The branch below is bne, so we
+	 * want CR0[EQ] to be true if this is a kprobe/jprobe. Which means we want
+	 * CR0[EQ] = (r3 == 1).
+	 */
+	cmpdi	r3, 1
+1:
 #endif
 #endif
 
 
+	/* Load CTR with the possibly modified NIP */
+	mtctr	r15
+
 	/* Restore gprs */
 	/* Restore gprs */
-	REST_8GPRS(0,r1)
-	REST_8GPRS(8,r1)
-	REST_8GPRS(16,r1)
-	REST_8GPRS(24,r1)
+	REST_GPR(0,r1)
+	REST_10GPRS(2,r1)
+	REST_10GPRS(12,r1)
+	REST_10GPRS(22,r1)
 
 
 	/* Restore possibly modified LR */
 	/* Restore possibly modified LR */
 	ld	r0, _LINK(r1)
 	ld	r0, _LINK(r1)
@@ -119,7 +149,10 @@ ftrace_call:
 	addi r1, r1, SWITCH_FRAME_SIZE
 	addi r1, r1, SWITCH_FRAME_SIZE
 
 
 #ifdef CONFIG_LIVEPATCH
 #ifdef CONFIG_LIVEPATCH
-        /* Based on the cmpd above, if the NIP was altered handle livepatch */
+        /*
+	 * Based on the cmpd or cmpdi above, if the NIP was altered and we're
+	 * not on a kprobe/jprobe, then handle livepatch.
+	 */
 	bne-	livepatch_handler
 	bne-	livepatch_handler
 #endif
 #endif
 
 

+ 51 - 0
arch/powerpc/kvm/book3s_hv.c

@@ -1486,6 +1486,14 @@ static int kvmppc_set_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
 		r = set_vpa(vcpu, &vcpu->arch.dtl, addr, len);
 		r = set_vpa(vcpu, &vcpu->arch.dtl, addr, len);
 		break;
 		break;
 	case KVM_REG_PPC_TB_OFFSET:
 	case KVM_REG_PPC_TB_OFFSET:
+		/*
+		 * POWER9 DD1 has an erratum where writing TBU40 causes
+		 * the timebase to lose ticks.  So we don't let the
+		 * timebase offset be changed on P9 DD1.  (It is
+		 * initialized to zero.)
+		 */
+		if (cpu_has_feature(CPU_FTR_POWER9_DD1))
+			break;
 		/* round up to multiple of 2^24 */
 		/* round up to multiple of 2^24 */
 		vcpu->arch.vcore->tb_offset =
 		vcpu->arch.vcore->tb_offset =
 			ALIGN(set_reg_val(id, *val), 1UL << 24);
 			ALIGN(set_reg_val(id, *val), 1UL << 24);
@@ -2907,12 +2915,36 @@ static int kvmppc_vcpu_run_hv(struct kvm_run *run, struct kvm_vcpu *vcpu)
 {
 {
 	int r;
 	int r;
 	int srcu_idx;
 	int srcu_idx;
+	unsigned long ebb_regs[3] = {};	/* shut up GCC */
+	unsigned long user_tar = 0;
+	unsigned int user_vrsave;
 
 
 	if (!vcpu->arch.sane) {
 	if (!vcpu->arch.sane) {
 		run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
 		run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
 		return -EINVAL;
 		return -EINVAL;
 	}
 	}
 
 
+	/*
+	 * Don't allow entry with a suspended transaction, because
+	 * the guest entry/exit code will lose it.
+	 * If the guest has TM enabled, save away their TM-related SPRs
+	 * (they will get restored by the TM unavailable interrupt).
+	 */
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+	if (cpu_has_feature(CPU_FTR_TM) && current->thread.regs &&
+	    (current->thread.regs->msr & MSR_TM)) {
+		if (MSR_TM_ACTIVE(current->thread.regs->msr)) {
+			run->exit_reason = KVM_EXIT_FAIL_ENTRY;
+			run->fail_entry.hardware_entry_failure_reason = 0;
+			return -EINVAL;
+		}
+		current->thread.tm_tfhar = mfspr(SPRN_TFHAR);
+		current->thread.tm_tfiar = mfspr(SPRN_TFIAR);
+		current->thread.tm_texasr = mfspr(SPRN_TEXASR);
+		current->thread.regs->msr &= ~MSR_TM;
+	}
+#endif
+
 	kvmppc_core_prepare_to_enter(vcpu);
 	kvmppc_core_prepare_to_enter(vcpu);
 
 
 	/* No need to go into the guest when all we'll do is come back out */
 	/* No need to go into the guest when all we'll do is come back out */
@@ -2934,6 +2966,15 @@ static int kvmppc_vcpu_run_hv(struct kvm_run *run, struct kvm_vcpu *vcpu)
 
 
 	flush_all_to_thread(current);
 	flush_all_to_thread(current);
 
 
+	/* Save userspace EBB and other register values */
+	if (cpu_has_feature(CPU_FTR_ARCH_207S)) {
+		ebb_regs[0] = mfspr(SPRN_EBBHR);
+		ebb_regs[1] = mfspr(SPRN_EBBRR);
+		ebb_regs[2] = mfspr(SPRN_BESCR);
+		user_tar = mfspr(SPRN_TAR);
+	}
+	user_vrsave = mfspr(SPRN_VRSAVE);
+
 	vcpu->arch.wqp = &vcpu->arch.vcore->wq;
 	vcpu->arch.wqp = &vcpu->arch.vcore->wq;
 	vcpu->arch.pgdir = current->mm->pgd;
 	vcpu->arch.pgdir = current->mm->pgd;
 	vcpu->arch.state = KVMPPC_VCPU_BUSY_IN_HOST;
 	vcpu->arch.state = KVMPPC_VCPU_BUSY_IN_HOST;
@@ -2960,6 +3001,16 @@ static int kvmppc_vcpu_run_hv(struct kvm_run *run, struct kvm_vcpu *vcpu)
 		}
 		}
 	} while (is_kvmppc_resume_guest(r));
 	} while (is_kvmppc_resume_guest(r));
 
 
+	/* Restore userspace EBB and other register values */
+	if (cpu_has_feature(CPU_FTR_ARCH_207S)) {
+		mtspr(SPRN_EBBHR, ebb_regs[0]);
+		mtspr(SPRN_EBBRR, ebb_regs[1]);
+		mtspr(SPRN_BESCR, ebb_regs[2]);
+		mtspr(SPRN_TAR, user_tar);
+		mtspr(SPRN_FSCR, current->thread.fscr);
+	}
+	mtspr(SPRN_VRSAVE, user_vrsave);
+
  out:
  out:
 	vcpu->arch.state = KVMPPC_VCPU_NOTREADY;
 	vcpu->arch.state = KVMPPC_VCPU_NOTREADY;
 	atomic_dec(&vcpu->kvm->arch.vcpus_running);
 	atomic_dec(&vcpu->kvm->arch.vcpus_running);

+ 11 - 1
arch/powerpc/kvm/book3s_hv_interrupts.S

@@ -121,10 +121,20 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
 	 * Put whatever is in the decrementer into the
 	 * Put whatever is in the decrementer into the
 	 * hypervisor decrementer.
 	 * hypervisor decrementer.
 	 */
 	 */
+BEGIN_FTR_SECTION
+	ld	r5, HSTATE_KVM_VCORE(r13)
+	ld	r6, VCORE_KVM(r5)
+	ld	r9, KVM_HOST_LPCR(r6)
+	andis.	r9, r9, LPCR_LD@h
+END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
 	mfspr	r8,SPRN_DEC
 	mfspr	r8,SPRN_DEC
 	mftb	r7
 	mftb	r7
-	mtspr	SPRN_HDEC,r8
+BEGIN_FTR_SECTION
+	/* On POWER9, don't sign-extend if host LPCR[LD] bit is set */
+	bne	32f
+END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
 	extsw	r8,r8
 	extsw	r8,r8
+32:	mtspr	SPRN_HDEC,r8
 	add	r8,r8,r7
 	add	r8,r8,r7
 	std	r8,HSTATE_DECEXP(r13)
 	std	r8,HSTATE_DECEXP(r13)
 
 

+ 56 - 19
arch/powerpc/kvm/book3s_hv_rmhandlers.S

@@ -32,12 +32,29 @@
 #include <asm/opal.h>
 #include <asm/opal.h>
 #include <asm/xive-regs.h>
 #include <asm/xive-regs.h>
 
 
+/* Sign-extend HDEC if not on POWER9 */
+#define EXTEND_HDEC(reg)			\
+BEGIN_FTR_SECTION;				\
+	extsw	reg, reg;			\
+END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
+
 #define VCPU_GPRS_TM(reg) (((reg) * ULONG_SIZE) + VCPU_GPR_TM)
 #define VCPU_GPRS_TM(reg) (((reg) * ULONG_SIZE) + VCPU_GPR_TM)
 
 
 /* Values in HSTATE_NAPPING(r13) */
 /* Values in HSTATE_NAPPING(r13) */
 #define NAPPING_CEDE	1
 #define NAPPING_CEDE	1
 #define NAPPING_NOVCPU	2
 #define NAPPING_NOVCPU	2
 
 
+/* Stack frame offsets for kvmppc_hv_entry */
+#define SFS			144
+#define STACK_SLOT_TRAP		(SFS-4)
+#define STACK_SLOT_TID		(SFS-16)
+#define STACK_SLOT_PSSCR	(SFS-24)
+#define STACK_SLOT_PID		(SFS-32)
+#define STACK_SLOT_IAMR		(SFS-40)
+#define STACK_SLOT_CIABR	(SFS-48)
+#define STACK_SLOT_DAWR		(SFS-56)
+#define STACK_SLOT_DAWRX	(SFS-64)
+
 /*
 /*
  * Call kvmppc_hv_entry in real mode.
  * Call kvmppc_hv_entry in real mode.
  * Must be called with interrupts hard-disabled.
  * Must be called with interrupts hard-disabled.
@@ -214,6 +231,8 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
 kvmppc_primary_no_guest:
 kvmppc_primary_no_guest:
 	/* We handle this much like a ceded vcpu */
 	/* We handle this much like a ceded vcpu */
 	/* put the HDEC into the DEC, since HDEC interrupts don't wake us */
 	/* put the HDEC into the DEC, since HDEC interrupts don't wake us */
+	/* HDEC may be larger than DEC for arch >= v3.00, but since the */
+	/* HDEC value came from DEC in the first place, it will fit */
 	mfspr	r3, SPRN_HDEC
 	mfspr	r3, SPRN_HDEC
 	mtspr	SPRN_DEC, r3
 	mtspr	SPRN_DEC, r3
 	/*
 	/*
@@ -295,8 +314,9 @@ kvm_novcpu_wakeup:
 
 
 	/* See if our timeslice has expired (HDEC is negative) */
 	/* See if our timeslice has expired (HDEC is negative) */
 	mfspr	r0, SPRN_HDEC
 	mfspr	r0, SPRN_HDEC
+	EXTEND_HDEC(r0)
 	li	r12, BOOK3S_INTERRUPT_HV_DECREMENTER
 	li	r12, BOOK3S_INTERRUPT_HV_DECREMENTER
-	cmpwi	r0, 0
+	cmpdi	r0, 0
 	blt	kvm_novcpu_exit
 	blt	kvm_novcpu_exit
 
 
 	/* Got an IPI but other vcpus aren't yet exiting, must be a latecomer */
 	/* Got an IPI but other vcpus aren't yet exiting, must be a latecomer */
@@ -319,10 +339,10 @@ kvm_novcpu_exit:
 	bl	kvmhv_accumulate_time
 	bl	kvmhv_accumulate_time
 #endif
 #endif
 13:	mr	r3, r12
 13:	mr	r3, r12
-	stw	r12, 112-4(r1)
+	stw	r12, STACK_SLOT_TRAP(r1)
 	bl	kvmhv_commence_exit
 	bl	kvmhv_commence_exit
 	nop
 	nop
-	lwz	r12, 112-4(r1)
+	lwz	r12, STACK_SLOT_TRAP(r1)
 	b	kvmhv_switch_to_host
 	b	kvmhv_switch_to_host
 
 
 /*
 /*
@@ -390,8 +410,8 @@ kvm_secondary_got_guest:
 	lbz	r4, HSTATE_PTID(r13)
 	lbz	r4, HSTATE_PTID(r13)
 	cmpwi	r4, 0
 	cmpwi	r4, 0
 	bne	63f
 	bne	63f
-	lis	r6, 0x7fff
-	ori	r6, r6, 0xffff
+	LOAD_REG_ADDR(r6, decrementer_max)
+	ld	r6, 0(r6)
 	mtspr	SPRN_HDEC, r6
 	mtspr	SPRN_HDEC, r6
 	/* and set per-LPAR registers, if doing dynamic micro-threading */
 	/* and set per-LPAR registers, if doing dynamic micro-threading */
 	ld	r6, HSTATE_SPLIT_MODE(r13)
 	ld	r6, HSTATE_SPLIT_MODE(r13)
@@ -545,11 +565,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
  *                                                                            *
  *                                                                            *
  *****************************************************************************/
  *****************************************************************************/
 
 
-/* Stack frame offsets */
-#define STACK_SLOT_TID		(112-16)
-#define STACK_SLOT_PSSCR	(112-24)
-#define STACK_SLOT_PID		(112-32)
-
 .global kvmppc_hv_entry
 .global kvmppc_hv_entry
 kvmppc_hv_entry:
 kvmppc_hv_entry:
 
 
@@ -565,7 +580,7 @@ kvmppc_hv_entry:
 	 */
 	 */
 	mflr	r0
 	mflr	r0
 	std	r0, PPC_LR_STKOFF(r1)
 	std	r0, PPC_LR_STKOFF(r1)
-	stdu	r1, -112(r1)
+	stdu	r1, -SFS(r1)
 
 
 	/* Save R1 in the PACA */
 	/* Save R1 in the PACA */
 	std	r1, HSTATE_HOST_R1(r13)
 	std	r1, HSTATE_HOST_R1(r13)
@@ -749,10 +764,20 @@ BEGIN_FTR_SECTION
 	mfspr	r5, SPRN_TIDR
 	mfspr	r5, SPRN_TIDR
 	mfspr	r6, SPRN_PSSCR
 	mfspr	r6, SPRN_PSSCR
 	mfspr	r7, SPRN_PID
 	mfspr	r7, SPRN_PID
+	mfspr	r8, SPRN_IAMR
 	std	r5, STACK_SLOT_TID(r1)
 	std	r5, STACK_SLOT_TID(r1)
 	std	r6, STACK_SLOT_PSSCR(r1)
 	std	r6, STACK_SLOT_PSSCR(r1)
 	std	r7, STACK_SLOT_PID(r1)
 	std	r7, STACK_SLOT_PID(r1)
+	std	r8, STACK_SLOT_IAMR(r1)
 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
+BEGIN_FTR_SECTION
+	mfspr	r5, SPRN_CIABR
+	mfspr	r6, SPRN_DAWR
+	mfspr	r7, SPRN_DAWRX
+	std	r5, STACK_SLOT_CIABR(r1)
+	std	r6, STACK_SLOT_DAWR(r1)
+	std	r7, STACK_SLOT_DAWRX(r1)
+END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
 
 
 BEGIN_FTR_SECTION
 BEGIN_FTR_SECTION
 	/* Set partition DABR */
 	/* Set partition DABR */
@@ -968,7 +993,8 @@ ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300)
 
 
 	/* Check if HDEC expires soon */
 	/* Check if HDEC expires soon */
 	mfspr	r3, SPRN_HDEC
 	mfspr	r3, SPRN_HDEC
-	cmpwi	r3, 512		/* 1 microsecond */
+	EXTEND_HDEC(r3)
+	cmpdi	r3, 512		/* 1 microsecond */
 	blt	hdec_soon
 	blt	hdec_soon
 
 
 #ifdef CONFIG_KVM_XICS
 #ifdef CONFIG_KVM_XICS
@@ -1505,11 +1531,10 @@ ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300)
 	 * set by the guest could disrupt the host.
 	 * set by the guest could disrupt the host.
 	 */
 	 */
 	li	r0, 0
 	li	r0, 0
-	mtspr	SPRN_IAMR, r0
-	mtspr	SPRN_CIABR, r0
-	mtspr	SPRN_DAWRX, r0
+	mtspr	SPRN_PSPB, r0
 	mtspr	SPRN_WORT, r0
 	mtspr	SPRN_WORT, r0
 BEGIN_FTR_SECTION
 BEGIN_FTR_SECTION
+	mtspr	SPRN_IAMR, r0
 	mtspr	SPRN_TCSCR, r0
 	mtspr	SPRN_TCSCR, r0
 	/* Set MMCRS to 1<<31 to freeze and disable the SPMC counters */
 	/* Set MMCRS to 1<<31 to freeze and disable the SPMC counters */
 	li	r0, 1
 	li	r0, 1
@@ -1525,6 +1550,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
 	std	r6,VCPU_UAMOR(r9)
 	std	r6,VCPU_UAMOR(r9)
 	li	r6,0
 	li	r6,0
 	mtspr	SPRN_AMR,r6
 	mtspr	SPRN_AMR,r6
+	mtspr	SPRN_UAMOR, r6
 
 
 	/* Switch DSCR back to host value */
 	/* Switch DSCR back to host value */
 	mfspr	r8, SPRN_DSCR
 	mfspr	r8, SPRN_DSCR
@@ -1669,13 +1695,23 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
 	ptesync
 	ptesync
 
 
 	/* Restore host values of some registers */
 	/* Restore host values of some registers */
+BEGIN_FTR_SECTION
+	ld	r5, STACK_SLOT_CIABR(r1)
+	ld	r6, STACK_SLOT_DAWR(r1)
+	ld	r7, STACK_SLOT_DAWRX(r1)
+	mtspr	SPRN_CIABR, r5
+	mtspr	SPRN_DAWR, r6
+	mtspr	SPRN_DAWRX, r7
+END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
 BEGIN_FTR_SECTION
 BEGIN_FTR_SECTION
 	ld	r5, STACK_SLOT_TID(r1)
 	ld	r5, STACK_SLOT_TID(r1)
 	ld	r6, STACK_SLOT_PSSCR(r1)
 	ld	r6, STACK_SLOT_PSSCR(r1)
 	ld	r7, STACK_SLOT_PID(r1)
 	ld	r7, STACK_SLOT_PID(r1)
+	ld	r8, STACK_SLOT_IAMR(r1)
 	mtspr	SPRN_TIDR, r5
 	mtspr	SPRN_TIDR, r5
 	mtspr	SPRN_PSSCR, r6
 	mtspr	SPRN_PSSCR, r6
 	mtspr	SPRN_PID, r7
 	mtspr	SPRN_PID, r7
+	mtspr	SPRN_IAMR, r8
 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
 BEGIN_FTR_SECTION
 BEGIN_FTR_SECTION
 	PPC_INVALIDATE_ERAT
 	PPC_INVALIDATE_ERAT
@@ -1819,8 +1855,8 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_RADIX)
 	li	r0, KVM_GUEST_MODE_NONE
 	li	r0, KVM_GUEST_MODE_NONE
 	stb	r0, HSTATE_IN_GUEST(r13)
 	stb	r0, HSTATE_IN_GUEST(r13)
 
 
-	ld	r0, 112+PPC_LR_STKOFF(r1)
-	addi	r1, r1, 112
+	ld	r0, SFS+PPC_LR_STKOFF(r1)
+	addi	r1, r1, SFS
 	mtlr	r0
 	mtlr	r0
 	blr
 	blr
 
 
@@ -2366,12 +2402,13 @@ END_FTR_SECTION_IFSET(CPU_FTR_TM)
 	mfspr	r3, SPRN_DEC
 	mfspr	r3, SPRN_DEC
 	mfspr	r4, SPRN_HDEC
 	mfspr	r4, SPRN_HDEC
 	mftb	r5
 	mftb	r5
-	cmpw	r3, r4
+	extsw	r3, r3
+	EXTEND_HDEC(r4)
+	cmpd	r3, r4
 	ble	67f
 	ble	67f
 	mtspr	SPRN_DEC, r4
 	mtspr	SPRN_DEC, r4
 67:
 67:
 	/* save expiry time of guest decrementer */
 	/* save expiry time of guest decrementer */
-	extsw	r3, r3
 	add	r3, r3, r5
 	add	r3, r3, r5
 	ld	r4, HSTATE_KVM_VCPU(r13)
 	ld	r4, HSTATE_KVM_VCPU(r13)
 	ld	r5, HSTATE_KVM_VCORE(r13)
 	ld	r5, HSTATE_KVM_VCORE(r13)

+ 2 - 1
arch/powerpc/perf/perf_regs.c

@@ -101,5 +101,6 @@ void perf_get_regs_user(struct perf_regs *regs_user,
 			struct pt_regs *regs_user_copy)
 			struct pt_regs *regs_user_copy)
 {
 {
 	regs_user->regs = task_pt_regs(current);
 	regs_user->regs = task_pt_regs(current);
-	regs_user->abi  = perf_reg_abi(current);
+	regs_user->abi = (regs_user->regs) ? perf_reg_abi(current) :
+			 PERF_SAMPLE_REGS_ABI_NONE;
 }
 }

+ 65 - 29
arch/powerpc/platforms/powernv/npu-dma.c

@@ -449,7 +449,7 @@ static int mmio_launch_invalidate(struct npu *npu, unsigned long launch,
 	return mmio_atsd_reg;
 	return mmio_atsd_reg;
 }
 }
 
 
-static int mmio_invalidate_pid(struct npu *npu, unsigned long pid)
+static int mmio_invalidate_pid(struct npu *npu, unsigned long pid, bool flush)
 {
 {
 	unsigned long launch;
 	unsigned long launch;
 
 
@@ -465,12 +465,15 @@ static int mmio_invalidate_pid(struct npu *npu, unsigned long pid)
 	/* PID */
 	/* PID */
 	launch |= pid << PPC_BITLSHIFT(38);
 	launch |= pid << PPC_BITLSHIFT(38);
 
 
+	/* No flush */
+	launch |= !flush << PPC_BITLSHIFT(39);
+
 	/* Invalidating the entire process doesn't use a va */
 	/* Invalidating the entire process doesn't use a va */
 	return mmio_launch_invalidate(npu, launch, 0);
 	return mmio_launch_invalidate(npu, launch, 0);
 }
 }
 
 
 static int mmio_invalidate_va(struct npu *npu, unsigned long va,
 static int mmio_invalidate_va(struct npu *npu, unsigned long va,
-			unsigned long pid)
+			unsigned long pid, bool flush)
 {
 {
 	unsigned long launch;
 	unsigned long launch;
 
 
@@ -486,26 +489,60 @@ static int mmio_invalidate_va(struct npu *npu, unsigned long va,
 	/* PID */
 	/* PID */
 	launch |= pid << PPC_BITLSHIFT(38);
 	launch |= pid << PPC_BITLSHIFT(38);
 
 
+	/* No flush */
+	launch |= !flush << PPC_BITLSHIFT(39);
+
 	return mmio_launch_invalidate(npu, launch, va);
 	return mmio_launch_invalidate(npu, launch, va);
 }
 }
 
 
 #define mn_to_npu_context(x) container_of(x, struct npu_context, mn)
 #define mn_to_npu_context(x) container_of(x, struct npu_context, mn)
 
 
+struct mmio_atsd_reg {
+	struct npu *npu;
+	int reg;
+};
+
+static void mmio_invalidate_wait(
+	struct mmio_atsd_reg mmio_atsd_reg[NV_MAX_NPUS], bool flush)
+{
+	struct npu *npu;
+	int i, reg;
+
+	/* Wait for all invalidations to complete */
+	for (i = 0; i <= max_npu2_index; i++) {
+		if (mmio_atsd_reg[i].reg < 0)
+			continue;
+
+		/* Wait for completion */
+		npu = mmio_atsd_reg[i].npu;
+		reg = mmio_atsd_reg[i].reg;
+		while (__raw_readq(npu->mmio_atsd_regs[reg] + XTS_ATSD_STAT))
+			cpu_relax();
+
+		put_mmio_atsd_reg(npu, reg);
+
+		/*
+		 * The GPU requires two flush ATSDs to ensure all entries have
+		 * been flushed. We use PID 0 as it will never be used for a
+		 * process on the GPU.
+		 */
+		if (flush)
+			mmio_invalidate_pid(npu, 0, true);
+	}
+}
+
 /*
 /*
  * Invalidate either a single address or an entire PID depending on
  * Invalidate either a single address or an entire PID depending on
  * the value of va.
  * the value of va.
  */
  */
 static void mmio_invalidate(struct npu_context *npu_context, int va,
 static void mmio_invalidate(struct npu_context *npu_context, int va,
-			unsigned long address)
+			unsigned long address, bool flush)
 {
 {
-	int i, j, reg;
+	int i, j;
 	struct npu *npu;
 	struct npu *npu;
 	struct pnv_phb *nphb;
 	struct pnv_phb *nphb;
 	struct pci_dev *npdev;
 	struct pci_dev *npdev;
-	struct {
-		struct npu *npu;
-		int reg;
-	} mmio_atsd_reg[NV_MAX_NPUS];
+	struct mmio_atsd_reg mmio_atsd_reg[NV_MAX_NPUS];
 	unsigned long pid = npu_context->mm->context.id;
 	unsigned long pid = npu_context->mm->context.id;
 
 
 	/*
 	/*
@@ -525,10 +562,11 @@ static void mmio_invalidate(struct npu_context *npu_context, int va,
 
 
 			if (va)
 			if (va)
 				mmio_atsd_reg[i].reg =
 				mmio_atsd_reg[i].reg =
-					mmio_invalidate_va(npu, address, pid);
+					mmio_invalidate_va(npu, address, pid,
+							flush);
 			else
 			else
 				mmio_atsd_reg[i].reg =
 				mmio_atsd_reg[i].reg =
-					mmio_invalidate_pid(npu, pid);
+					mmio_invalidate_pid(npu, pid, flush);
 
 
 			/*
 			/*
 			 * The NPU hardware forwards the shootdown to all GPUs
 			 * The NPU hardware forwards the shootdown to all GPUs
@@ -544,18 +582,10 @@ static void mmio_invalidate(struct npu_context *npu_context, int va,
 	 */
 	 */
 	flush_tlb_mm(npu_context->mm);
 	flush_tlb_mm(npu_context->mm);
 
 
-	/* Wait for all invalidations to complete */
-	for (i = 0; i <= max_npu2_index; i++) {
-		if (mmio_atsd_reg[i].reg < 0)
-			continue;
-
-		/* Wait for completion */
-		npu = mmio_atsd_reg[i].npu;
-		reg = mmio_atsd_reg[i].reg;
-		while (__raw_readq(npu->mmio_atsd_regs[reg] + XTS_ATSD_STAT))
-			cpu_relax();
-		put_mmio_atsd_reg(npu, reg);
-	}
+	mmio_invalidate_wait(mmio_atsd_reg, flush);
+	if (flush)
+		/* Wait for the flush to complete */
+		mmio_invalidate_wait(mmio_atsd_reg, false);
 }
 }
 
 
 static void pnv_npu2_mn_release(struct mmu_notifier *mn,
 static void pnv_npu2_mn_release(struct mmu_notifier *mn,
@@ -571,7 +601,7 @@ static void pnv_npu2_mn_release(struct mmu_notifier *mn,
 	 * There should be no more translation requests for this PID, but we
 	 * There should be no more translation requests for this PID, but we
 	 * need to ensure any entries for it are removed from the TLB.
 	 * need to ensure any entries for it are removed from the TLB.
 	 */
 	 */
-	mmio_invalidate(npu_context, 0, 0);
+	mmio_invalidate(npu_context, 0, 0, true);
 }
 }
 
 
 static void pnv_npu2_mn_change_pte(struct mmu_notifier *mn,
 static void pnv_npu2_mn_change_pte(struct mmu_notifier *mn,
@@ -581,7 +611,7 @@ static void pnv_npu2_mn_change_pte(struct mmu_notifier *mn,
 {
 {
 	struct npu_context *npu_context = mn_to_npu_context(mn);
 	struct npu_context *npu_context = mn_to_npu_context(mn);
 
 
-	mmio_invalidate(npu_context, 1, address);
+	mmio_invalidate(npu_context, 1, address, true);
 }
 }
 
 
 static void pnv_npu2_mn_invalidate_page(struct mmu_notifier *mn,
 static void pnv_npu2_mn_invalidate_page(struct mmu_notifier *mn,
@@ -590,7 +620,7 @@ static void pnv_npu2_mn_invalidate_page(struct mmu_notifier *mn,
 {
 {
 	struct npu_context *npu_context = mn_to_npu_context(mn);
 	struct npu_context *npu_context = mn_to_npu_context(mn);
 
 
-	mmio_invalidate(npu_context, 1, address);
+	mmio_invalidate(npu_context, 1, address, true);
 }
 }
 
 
 static void pnv_npu2_mn_invalidate_range(struct mmu_notifier *mn,
 static void pnv_npu2_mn_invalidate_range(struct mmu_notifier *mn,
@@ -600,8 +630,11 @@ static void pnv_npu2_mn_invalidate_range(struct mmu_notifier *mn,
 	struct npu_context *npu_context = mn_to_npu_context(mn);
 	struct npu_context *npu_context = mn_to_npu_context(mn);
 	unsigned long address;
 	unsigned long address;
 
 
-	for (address = start; address <= end; address += PAGE_SIZE)
-		mmio_invalidate(npu_context, 1, address);
+	for (address = start; address < end; address += PAGE_SIZE)
+		mmio_invalidate(npu_context, 1, address, false);
+
+	/* Do the flush only on the final addess == end */
+	mmio_invalidate(npu_context, 1, address, true);
 }
 }
 
 
 static const struct mmu_notifier_ops nv_nmmu_notifier_ops = {
 static const struct mmu_notifier_ops nv_nmmu_notifier_ops = {
@@ -651,8 +684,11 @@ struct npu_context *pnv_npu2_init_context(struct pci_dev *gpdev,
 		/* No nvlink associated with this GPU device */
 		/* No nvlink associated with this GPU device */
 		return ERR_PTR(-ENODEV);
 		return ERR_PTR(-ENODEV);
 
 
-	if (!mm) {
-		/* kernel thread contexts are not supported */
+	if (!mm || mm->context.id == 0) {
+		/*
+		 * Kernel thread contexts are not supported and context id 0 is
+		 * reserved on the GPU.
+		 */
 		return ERR_PTR(-EINVAL);
 		return ERR_PTR(-EINVAL);
 	}
 	}
 
 

+ 6 - 9
arch/s390/kvm/gaccess.c

@@ -977,11 +977,12 @@ static int kvm_s390_shadow_tables(struct gmap *sg, unsigned long saddr,
 	ptr = asce.origin * 4096;
 	ptr = asce.origin * 4096;
 	if (asce.r) {
 	if (asce.r) {
 		*fake = 1;
 		*fake = 1;
+		ptr = 0;
 		asce.dt = ASCE_TYPE_REGION1;
 		asce.dt = ASCE_TYPE_REGION1;
 	}
 	}
 	switch (asce.dt) {
 	switch (asce.dt) {
 	case ASCE_TYPE_REGION1:
 	case ASCE_TYPE_REGION1:
-		if (vaddr.rfx01 > asce.tl && !asce.r)
+		if (vaddr.rfx01 > asce.tl && !*fake)
 			return PGM_REGION_FIRST_TRANS;
 			return PGM_REGION_FIRST_TRANS;
 		break;
 		break;
 	case ASCE_TYPE_REGION2:
 	case ASCE_TYPE_REGION2:
@@ -1009,8 +1010,7 @@ static int kvm_s390_shadow_tables(struct gmap *sg, unsigned long saddr,
 		union region1_table_entry rfte;
 		union region1_table_entry rfte;
 
 
 		if (*fake) {
 		if (*fake) {
-			/* offset in 16EB guest memory block */
-			ptr = ptr + ((unsigned long) vaddr.rsx << 53UL);
+			ptr += (unsigned long) vaddr.rfx << 53;
 			rfte.val = ptr;
 			rfte.val = ptr;
 			goto shadow_r2t;
 			goto shadow_r2t;
 		}
 		}
@@ -1036,8 +1036,7 @@ shadow_r2t:
 		union region2_table_entry rste;
 		union region2_table_entry rste;
 
 
 		if (*fake) {
 		if (*fake) {
-			/* offset in 8PB guest memory block */
-			ptr = ptr + ((unsigned long) vaddr.rtx << 42UL);
+			ptr += (unsigned long) vaddr.rsx << 42;
 			rste.val = ptr;
 			rste.val = ptr;
 			goto shadow_r3t;
 			goto shadow_r3t;
 		}
 		}
@@ -1064,8 +1063,7 @@ shadow_r3t:
 		union region3_table_entry rtte;
 		union region3_table_entry rtte;
 
 
 		if (*fake) {
 		if (*fake) {
-			/* offset in 4TB guest memory block */
-			ptr = ptr + ((unsigned long) vaddr.sx << 31UL);
+			ptr += (unsigned long) vaddr.rtx << 31;
 			rtte.val = ptr;
 			rtte.val = ptr;
 			goto shadow_sgt;
 			goto shadow_sgt;
 		}
 		}
@@ -1101,8 +1099,7 @@ shadow_sgt:
 		union segment_table_entry ste;
 		union segment_table_entry ste;
 
 
 		if (*fake) {
 		if (*fake) {
-			/* offset in 2G guest memory block */
-			ptr = ptr + ((unsigned long) vaddr.sx << 20UL);
+			ptr += (unsigned long) vaddr.sx << 20;
 			ste.val = ptr;
 			ste.val = ptr;
 			goto shadow_pgt;
 			goto shadow_pgt;
 		}
 		}

+ 2 - 2
arch/x86/events/intel/core.c

@@ -431,11 +431,11 @@ static __initconst const u64 skl_hw_cache_event_ids
  [ C(DTLB) ] = {
  [ C(DTLB) ] = {
 	[ C(OP_READ) ] = {
 	[ C(OP_READ) ] = {
 		[ C(RESULT_ACCESS) ] = 0x81d0,	/* MEM_INST_RETIRED.ALL_LOADS */
 		[ C(RESULT_ACCESS) ] = 0x81d0,	/* MEM_INST_RETIRED.ALL_LOADS */
-		[ C(RESULT_MISS)   ] = 0x608,	/* DTLB_LOAD_MISSES.WALK_COMPLETED */
+		[ C(RESULT_MISS)   ] = 0xe08,	/* DTLB_LOAD_MISSES.WALK_COMPLETED */
 	},
 	},
 	[ C(OP_WRITE) ] = {
 	[ C(OP_WRITE) ] = {
 		[ C(RESULT_ACCESS) ] = 0x82d0,	/* MEM_INST_RETIRED.ALL_STORES */
 		[ C(RESULT_ACCESS) ] = 0x82d0,	/* MEM_INST_RETIRED.ALL_STORES */
-		[ C(RESULT_MISS)   ] = 0x649,	/* DTLB_STORE_MISSES.WALK_COMPLETED */
+		[ C(RESULT_MISS)   ] = 0xe49,	/* DTLB_STORE_MISSES.WALK_COMPLETED */
 	},
 	},
 	[ C(OP_PREFETCH) ] = {
 	[ C(OP_PREFETCH) ] = {
 		[ C(RESULT_ACCESS) ] = 0x0,
 		[ C(RESULT_ACCESS) ] = 0x0,

+ 1 - 0
arch/x86/include/asm/kvm_emulate.h

@@ -296,6 +296,7 @@ struct x86_emulate_ctxt {
 
 
 	bool perm_ok; /* do not check permissions if true */
 	bool perm_ok; /* do not check permissions if true */
 	bool ud;	/* inject an #UD if host doesn't support insn */
 	bool ud;	/* inject an #UD if host doesn't support insn */
+	bool tf;	/* TF value before instruction (after for syscall/sysret) */
 
 
 	bool have_exception;
 	bool have_exception;
 	struct x86_exception exception;
 	struct x86_exception exception;

+ 1 - 2
arch/x86/include/asm/mshyperv.h

@@ -2,8 +2,7 @@
 #define _ASM_X86_MSHYPER_H
 #define _ASM_X86_MSHYPER_H
 
 
 #include <linux/types.h>
 #include <linux/types.h>
-#include <linux/interrupt.h>
-#include <linux/clocksource.h>
+#include <linux/atomic.h>
 #include <asm/hyperv.h>
 #include <asm/hyperv.h>
 
 
 /*
 /*

+ 1 - 0
arch/x86/kvm/emulate.c

@@ -2742,6 +2742,7 @@ static int em_syscall(struct x86_emulate_ctxt *ctxt)
 		ctxt->eflags &= ~(X86_EFLAGS_VM | X86_EFLAGS_IF);
 		ctxt->eflags &= ~(X86_EFLAGS_VM | X86_EFLAGS_IF);
 	}
 	}
 
 
+	ctxt->tf = (ctxt->eflags & X86_EFLAGS_TF) != 0;
 	return X86EMUL_CONTINUE;
 	return X86EMUL_CONTINUE;
 }
 }
 
 

+ 32 - 30
arch/x86/kvm/x86.c

@@ -5313,6 +5313,8 @@ static void init_emulate_ctxt(struct kvm_vcpu *vcpu)
 	kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
 	kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
 
 
 	ctxt->eflags = kvm_get_rflags(vcpu);
 	ctxt->eflags = kvm_get_rflags(vcpu);
+	ctxt->tf = (ctxt->eflags & X86_EFLAGS_TF) != 0;
+
 	ctxt->eip = kvm_rip_read(vcpu);
 	ctxt->eip = kvm_rip_read(vcpu);
 	ctxt->mode = (!is_protmode(vcpu))		? X86EMUL_MODE_REAL :
 	ctxt->mode = (!is_protmode(vcpu))		? X86EMUL_MODE_REAL :
 		     (ctxt->eflags & X86_EFLAGS_VM)	? X86EMUL_MODE_VM86 :
 		     (ctxt->eflags & X86_EFLAGS_VM)	? X86EMUL_MODE_VM86 :
@@ -5528,36 +5530,25 @@ static int kvm_vcpu_check_hw_bp(unsigned long addr, u32 type, u32 dr7,
 	return dr6;
 	return dr6;
 }
 }
 
 
-static void kvm_vcpu_check_singlestep(struct kvm_vcpu *vcpu, unsigned long rflags, int *r)
+static void kvm_vcpu_do_singlestep(struct kvm_vcpu *vcpu, int *r)
 {
 {
 	struct kvm_run *kvm_run = vcpu->run;
 	struct kvm_run *kvm_run = vcpu->run;
 
 
-	/*
-	 * rflags is the old, "raw" value of the flags.  The new value has
-	 * not been saved yet.
-	 *
-	 * This is correct even for TF set by the guest, because "the
-	 * processor will not generate this exception after the instruction
-	 * that sets the TF flag".
-	 */
-	if (unlikely(rflags & X86_EFLAGS_TF)) {
-		if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) {
-			kvm_run->debug.arch.dr6 = DR6_BS | DR6_FIXED_1 |
-						  DR6_RTM;
-			kvm_run->debug.arch.pc = vcpu->arch.singlestep_rip;
-			kvm_run->debug.arch.exception = DB_VECTOR;
-			kvm_run->exit_reason = KVM_EXIT_DEBUG;
-			*r = EMULATE_USER_EXIT;
-		} else {
-			/*
-			 * "Certain debug exceptions may clear bit 0-3.  The
-			 * remaining contents of the DR6 register are never
-			 * cleared by the processor".
-			 */
-			vcpu->arch.dr6 &= ~15;
-			vcpu->arch.dr6 |= DR6_BS | DR6_RTM;
-			kvm_queue_exception(vcpu, DB_VECTOR);
-		}
+	if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) {
+		kvm_run->debug.arch.dr6 = DR6_BS | DR6_FIXED_1 | DR6_RTM;
+		kvm_run->debug.arch.pc = vcpu->arch.singlestep_rip;
+		kvm_run->debug.arch.exception = DB_VECTOR;
+		kvm_run->exit_reason = KVM_EXIT_DEBUG;
+		*r = EMULATE_USER_EXIT;
+	} else {
+		/*
+		 * "Certain debug exceptions may clear bit 0-3.  The
+		 * remaining contents of the DR6 register are never
+		 * cleared by the processor".
+		 */
+		vcpu->arch.dr6 &= ~15;
+		vcpu->arch.dr6 |= DR6_BS | DR6_RTM;
+		kvm_queue_exception(vcpu, DB_VECTOR);
 	}
 	}
 }
 }
 
 
@@ -5567,7 +5558,17 @@ int kvm_skip_emulated_instruction(struct kvm_vcpu *vcpu)
 	int r = EMULATE_DONE;
 	int r = EMULATE_DONE;
 
 
 	kvm_x86_ops->skip_emulated_instruction(vcpu);
 	kvm_x86_ops->skip_emulated_instruction(vcpu);
-	kvm_vcpu_check_singlestep(vcpu, rflags, &r);
+
+	/*
+	 * rflags is the old, "raw" value of the flags.  The new value has
+	 * not been saved yet.
+	 *
+	 * This is correct even for TF set by the guest, because "the
+	 * processor will not generate this exception after the instruction
+	 * that sets the TF flag".
+	 */
+	if (unlikely(rflags & X86_EFLAGS_TF))
+		kvm_vcpu_do_singlestep(vcpu, &r);
 	return r == EMULATE_DONE;
 	return r == EMULATE_DONE;
 }
 }
 EXPORT_SYMBOL_GPL(kvm_skip_emulated_instruction);
 EXPORT_SYMBOL_GPL(kvm_skip_emulated_instruction);
@@ -5726,8 +5727,9 @@ restart:
 		toggle_interruptibility(vcpu, ctxt->interruptibility);
 		toggle_interruptibility(vcpu, ctxt->interruptibility);
 		vcpu->arch.emulate_regs_need_sync_to_vcpu = false;
 		vcpu->arch.emulate_regs_need_sync_to_vcpu = false;
 		kvm_rip_write(vcpu, ctxt->eip);
 		kvm_rip_write(vcpu, ctxt->eip);
-		if (r == EMULATE_DONE)
-			kvm_vcpu_check_singlestep(vcpu, rflags, &r);
+		if (r == EMULATE_DONE &&
+		    (ctxt->tf || (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)))
+			kvm_vcpu_do_singlestep(vcpu, &r);
 		if (!ctxt->have_exception ||
 		if (!ctxt->have_exception ||
 		    exception_type(ctxt->exception.vector) == EXCPT_TRAP)
 		    exception_type(ctxt->exception.vector) == EXCPT_TRAP)
 			__kvm_set_rflags(vcpu, ctxt->eflags);
 			__kvm_set_rflags(vcpu, ctxt->eflags);

+ 46 - 12
block/blk-mq-sched.c

@@ -68,6 +68,45 @@ static void blk_mq_sched_assign_ioc(struct request_queue *q,
 		__blk_mq_sched_assign_ioc(q, rq, bio, ioc);
 		__blk_mq_sched_assign_ioc(q, rq, bio, ioc);
 }
 }
 
 
+/*
+ * Mark a hardware queue as needing a restart. For shared queues, maintain
+ * a count of how many hardware queues are marked for restart.
+ */
+static void blk_mq_sched_mark_restart_hctx(struct blk_mq_hw_ctx *hctx)
+{
+	if (test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state))
+		return;
+
+	if (hctx->flags & BLK_MQ_F_TAG_SHARED) {
+		struct request_queue *q = hctx->queue;
+
+		if (!test_and_set_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state))
+			atomic_inc(&q->shared_hctx_restart);
+	} else
+		set_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state);
+}
+
+static bool blk_mq_sched_restart_hctx(struct blk_mq_hw_ctx *hctx)
+{
+	if (!test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state))
+		return false;
+
+	if (hctx->flags & BLK_MQ_F_TAG_SHARED) {
+		struct request_queue *q = hctx->queue;
+
+		if (test_and_clear_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state))
+			atomic_dec(&q->shared_hctx_restart);
+	} else
+		clear_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state);
+
+	if (blk_mq_hctx_has_pending(hctx)) {
+		blk_mq_run_hw_queue(hctx, true);
+		return true;
+	}
+
+	return false;
+}
+
 struct request *blk_mq_sched_get_request(struct request_queue *q,
 struct request *blk_mq_sched_get_request(struct request_queue *q,
 					 struct bio *bio,
 					 struct bio *bio,
 					 unsigned int op,
 					 unsigned int op,
@@ -266,18 +305,6 @@ static bool blk_mq_sched_bypass_insert(struct blk_mq_hw_ctx *hctx,
 	return true;
 	return true;
 }
 }
 
 
-static bool blk_mq_sched_restart_hctx(struct blk_mq_hw_ctx *hctx)
-{
-	if (test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state)) {
-		clear_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state);
-		if (blk_mq_hctx_has_pending(hctx)) {
-			blk_mq_run_hw_queue(hctx, true);
-			return true;
-		}
-	}
-	return false;
-}
-
 /**
 /**
  * list_for_each_entry_rcu_rr - iterate in a round-robin fashion over rcu list
  * list_for_each_entry_rcu_rr - iterate in a round-robin fashion over rcu list
  * @pos:    loop cursor.
  * @pos:    loop cursor.
@@ -309,6 +336,13 @@ void blk_mq_sched_restart(struct blk_mq_hw_ctx *const hctx)
 	unsigned int i, j;
 	unsigned int i, j;
 
 
 	if (set->flags & BLK_MQ_F_TAG_SHARED) {
 	if (set->flags & BLK_MQ_F_TAG_SHARED) {
+		/*
+		 * If this is 0, then we know that no hardware queues
+		 * have RESTART marked. We're done.
+		 */
+		if (!atomic_read(&queue->shared_hctx_restart))
+			return;
+
 		rcu_read_lock();
 		rcu_read_lock();
 		list_for_each_entry_rcu_rr(q, queue, &set->tag_list,
 		list_for_each_entry_rcu_rr(q, queue, &set->tag_list,
 					   tag_set_list) {
 					   tag_set_list) {

+ 0 - 9
block/blk-mq-sched.h

@@ -115,15 +115,6 @@ static inline bool blk_mq_sched_has_work(struct blk_mq_hw_ctx *hctx)
 	return false;
 	return false;
 }
 }
 
 
-/*
- * Mark a hardware queue as needing a restart.
- */
-static inline void blk_mq_sched_mark_restart_hctx(struct blk_mq_hw_ctx *hctx)
-{
-	if (!test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state))
-		set_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state);
-}
-
 static inline bool blk_mq_sched_needs_restart(struct blk_mq_hw_ctx *hctx)
 static inline bool blk_mq_sched_needs_restart(struct blk_mq_hw_ctx *hctx)
 {
 {
 	return test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state);
 	return test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state);

+ 13 - 3
block/blk-mq.c

@@ -2103,20 +2103,30 @@ static void blk_mq_map_swqueue(struct request_queue *q,
 	}
 	}
 }
 }
 
 
+/*
+ * Caller needs to ensure that we're either frozen/quiesced, or that
+ * the queue isn't live yet.
+ */
 static void queue_set_hctx_shared(struct request_queue *q, bool shared)
 static void queue_set_hctx_shared(struct request_queue *q, bool shared)
 {
 {
 	struct blk_mq_hw_ctx *hctx;
 	struct blk_mq_hw_ctx *hctx;
 	int i;
 	int i;
 
 
 	queue_for_each_hw_ctx(q, hctx, i) {
 	queue_for_each_hw_ctx(q, hctx, i) {
-		if (shared)
+		if (shared) {
+			if (test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state))
+				atomic_inc(&q->shared_hctx_restart);
 			hctx->flags |= BLK_MQ_F_TAG_SHARED;
 			hctx->flags |= BLK_MQ_F_TAG_SHARED;
-		else
+		} else {
+			if (test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state))
+				atomic_dec(&q->shared_hctx_restart);
 			hctx->flags &= ~BLK_MQ_F_TAG_SHARED;
 			hctx->flags &= ~BLK_MQ_F_TAG_SHARED;
+		}
 	}
 	}
 }
 }
 
 
-static void blk_mq_update_tag_set_depth(struct blk_mq_tag_set *set, bool shared)
+static void blk_mq_update_tag_set_depth(struct blk_mq_tag_set *set,
+					bool shared)
 {
 {
 	struct request_queue *q;
 	struct request_queue *q;
 
 

+ 37 - 30
drivers/acpi/scan.c

@@ -1428,6 +1428,37 @@ static void acpi_init_coherency(struct acpi_device *adev)
 	adev->flags.coherent_dma = cca;
 	adev->flags.coherent_dma = cca;
 }
 }
 
 
+static int acpi_check_spi_i2c_slave(struct acpi_resource *ares, void *data)
+{
+	bool *is_spi_i2c_slave_p = data;
+
+	if (ares->type != ACPI_RESOURCE_TYPE_SERIAL_BUS)
+		return 1;
+
+	/*
+	 * devices that are connected to UART still need to be enumerated to
+	 * platform bus
+	 */
+	if (ares->data.common_serial_bus.type != ACPI_RESOURCE_SERIAL_TYPE_UART)
+		*is_spi_i2c_slave_p = true;
+
+	 /* no need to do more checking */
+	return -1;
+}
+
+static bool acpi_is_spi_i2c_slave(struct acpi_device *device)
+{
+	struct list_head resource_list;
+	bool is_spi_i2c_slave = false;
+
+	INIT_LIST_HEAD(&resource_list);
+	acpi_dev_get_resources(device, &resource_list, acpi_check_spi_i2c_slave,
+			       &is_spi_i2c_slave);
+	acpi_dev_free_resource_list(&resource_list);
+
+	return is_spi_i2c_slave;
+}
+
 void acpi_init_device_object(struct acpi_device *device, acpi_handle handle,
 void acpi_init_device_object(struct acpi_device *device, acpi_handle handle,
 			     int type, unsigned long long sta)
 			     int type, unsigned long long sta)
 {
 {
@@ -1443,6 +1474,7 @@ void acpi_init_device_object(struct acpi_device *device, acpi_handle handle,
 	acpi_bus_get_flags(device);
 	acpi_bus_get_flags(device);
 	device->flags.match_driver = false;
 	device->flags.match_driver = false;
 	device->flags.initialized = true;
 	device->flags.initialized = true;
+	device->flags.spi_i2c_slave = acpi_is_spi_i2c_slave(device);
 	acpi_device_clear_enumerated(device);
 	acpi_device_clear_enumerated(device);
 	device_initialize(&device->dev);
 	device_initialize(&device->dev);
 	dev_set_uevent_suppress(&device->dev, true);
 	dev_set_uevent_suppress(&device->dev, true);
@@ -1727,38 +1759,13 @@ static acpi_status acpi_bus_check_add(acpi_handle handle, u32 lvl_not_used,
 	return AE_OK;
 	return AE_OK;
 }
 }
 
 
-static int acpi_check_spi_i2c_slave(struct acpi_resource *ares, void *data)
-{
-	bool *is_spi_i2c_slave_p = data;
-
-	if (ares->type != ACPI_RESOURCE_TYPE_SERIAL_BUS)
-		return 1;
-
-	/*
-	 * devices that are connected to UART still need to be enumerated to
-	 * platform bus
-	 */
-	if (ares->data.common_serial_bus.type != ACPI_RESOURCE_SERIAL_TYPE_UART)
-		*is_spi_i2c_slave_p = true;
-
-	 /* no need to do more checking */
-	return -1;
-}
-
 static void acpi_default_enumeration(struct acpi_device *device)
 static void acpi_default_enumeration(struct acpi_device *device)
 {
 {
-	struct list_head resource_list;
-	bool is_spi_i2c_slave = false;
-
 	/*
 	/*
 	 * Do not enumerate SPI/I2C slaves as they will be enumerated by their
 	 * Do not enumerate SPI/I2C slaves as they will be enumerated by their
 	 * respective parents.
 	 * respective parents.
 	 */
 	 */
-	INIT_LIST_HEAD(&resource_list);
-	acpi_dev_get_resources(device, &resource_list, acpi_check_spi_i2c_slave,
-			       &is_spi_i2c_slave);
-	acpi_dev_free_resource_list(&resource_list);
-	if (!is_spi_i2c_slave) {
+	if (!device->flags.spi_i2c_slave) {
 		acpi_create_platform_device(device, NULL);
 		acpi_create_platform_device(device, NULL);
 		acpi_device_set_enumerated(device);
 		acpi_device_set_enumerated(device);
 	} else {
 	} else {
@@ -1854,7 +1861,7 @@ static void acpi_bus_attach(struct acpi_device *device)
 		return;
 		return;
 
 
 	device->flags.match_driver = true;
 	device->flags.match_driver = true;
-	if (ret > 0) {
+	if (ret > 0 && !device->flags.spi_i2c_slave) {
 		acpi_device_set_enumerated(device);
 		acpi_device_set_enumerated(device);
 		goto ok;
 		goto ok;
 	}
 	}
@@ -1863,10 +1870,10 @@ static void acpi_bus_attach(struct acpi_device *device)
 	if (ret < 0)
 	if (ret < 0)
 		return;
 		return;
 
 
-	if (device->pnp.type.platform_id)
-		acpi_default_enumeration(device);
-	else
+	if (!device->pnp.type.platform_id && !device->flags.spi_i2c_slave)
 		acpi_device_set_enumerated(device);
 		acpi_device_set_enumerated(device);
+	else
+		acpi_default_enumeration(device);
 
 
  ok:
  ok:
 	list_for_each_entry(child, &device->children, node)
 	list_for_each_entry(child, &device->children, node)

+ 12 - 14
drivers/block/xen-blkback/blkback.c

@@ -609,8 +609,6 @@ int xen_blkif_schedule(void *arg)
 	unsigned long timeout;
 	unsigned long timeout;
 	int ret;
 	int ret;
 
 
-	xen_blkif_get(blkif);
-
 	set_freezable();
 	set_freezable();
 	while (!kthread_should_stop()) {
 	while (!kthread_should_stop()) {
 		if (try_to_freeze())
 		if (try_to_freeze())
@@ -665,7 +663,6 @@ purge_gnt_list:
 		print_stats(ring);
 		print_stats(ring);
 
 
 	ring->xenblkd = NULL;
 	ring->xenblkd = NULL;
-	xen_blkif_put(blkif);
 
 
 	return 0;
 	return 0;
 }
 }
@@ -1436,34 +1433,35 @@ static int dispatch_rw_block_io(struct xen_blkif_ring *ring,
 static void make_response(struct xen_blkif_ring *ring, u64 id,
 static void make_response(struct xen_blkif_ring *ring, u64 id,
 			  unsigned short op, int st)
 			  unsigned short op, int st)
 {
 {
-	struct blkif_response  resp;
+	struct blkif_response *resp;
 	unsigned long     flags;
 	unsigned long     flags;
 	union blkif_back_rings *blk_rings;
 	union blkif_back_rings *blk_rings;
 	int notify;
 	int notify;
 
 
-	resp.id        = id;
-	resp.operation = op;
-	resp.status    = st;
-
 	spin_lock_irqsave(&ring->blk_ring_lock, flags);
 	spin_lock_irqsave(&ring->blk_ring_lock, flags);
 	blk_rings = &ring->blk_rings;
 	blk_rings = &ring->blk_rings;
 	/* Place on the response ring for the relevant domain. */
 	/* Place on the response ring for the relevant domain. */
 	switch (ring->blkif->blk_protocol) {
 	switch (ring->blkif->blk_protocol) {
 	case BLKIF_PROTOCOL_NATIVE:
 	case BLKIF_PROTOCOL_NATIVE:
-		memcpy(RING_GET_RESPONSE(&blk_rings->native, blk_rings->native.rsp_prod_pvt),
-		       &resp, sizeof(resp));
+		resp = RING_GET_RESPONSE(&blk_rings->native,
+					 blk_rings->native.rsp_prod_pvt);
 		break;
 		break;
 	case BLKIF_PROTOCOL_X86_32:
 	case BLKIF_PROTOCOL_X86_32:
-		memcpy(RING_GET_RESPONSE(&blk_rings->x86_32, blk_rings->x86_32.rsp_prod_pvt),
-		       &resp, sizeof(resp));
+		resp = RING_GET_RESPONSE(&blk_rings->x86_32,
+					 blk_rings->x86_32.rsp_prod_pvt);
 		break;
 		break;
 	case BLKIF_PROTOCOL_X86_64:
 	case BLKIF_PROTOCOL_X86_64:
-		memcpy(RING_GET_RESPONSE(&blk_rings->x86_64, blk_rings->x86_64.rsp_prod_pvt),
-		       &resp, sizeof(resp));
+		resp = RING_GET_RESPONSE(&blk_rings->x86_64,
+					 blk_rings->x86_64.rsp_prod_pvt);
 		break;
 		break;
 	default:
 	default:
 		BUG();
 		BUG();
 	}
 	}
+
+	resp->id        = id;
+	resp->operation = op;
+	resp->status    = st;
+
 	blk_rings->common.rsp_prod_pvt++;
 	blk_rings->common.rsp_prod_pvt++;
 	RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&blk_rings->common, notify);
 	RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&blk_rings->common, notify);
 	spin_unlock_irqrestore(&ring->blk_ring_lock, flags);
 	spin_unlock_irqrestore(&ring->blk_ring_lock, flags);

+ 6 - 20
drivers/block/xen-blkback/common.h

@@ -75,9 +75,8 @@ extern unsigned int xenblk_max_queues;
 struct blkif_common_request {
 struct blkif_common_request {
 	char dummy;
 	char dummy;
 };
 };
-struct blkif_common_response {
-	char dummy;
-};
+
+/* i386 protocol version */
 
 
 struct blkif_x86_32_request_rw {
 struct blkif_x86_32_request_rw {
 	uint8_t        nr_segments;  /* number of segments                   */
 	uint8_t        nr_segments;  /* number of segments                   */
@@ -129,14 +128,6 @@ struct blkif_x86_32_request {
 	} u;
 	} u;
 } __attribute__((__packed__));
 } __attribute__((__packed__));
 
 
-/* i386 protocol version */
-#pragma pack(push, 4)
-struct blkif_x86_32_response {
-	uint64_t        id;              /* copied from request */
-	uint8_t         operation;       /* copied from request */
-	int16_t         status;          /* BLKIF_RSP_???       */
-};
-#pragma pack(pop)
 /* x86_64 protocol version */
 /* x86_64 protocol version */
 
 
 struct blkif_x86_64_request_rw {
 struct blkif_x86_64_request_rw {
@@ -193,18 +184,12 @@ struct blkif_x86_64_request {
 	} u;
 	} u;
 } __attribute__((__packed__));
 } __attribute__((__packed__));
 
 
-struct blkif_x86_64_response {
-	uint64_t       __attribute__((__aligned__(8))) id;
-	uint8_t         operation;       /* copied from request */
-	int16_t         status;          /* BLKIF_RSP_???       */
-};
-
 DEFINE_RING_TYPES(blkif_common, struct blkif_common_request,
 DEFINE_RING_TYPES(blkif_common, struct blkif_common_request,
-		  struct blkif_common_response);
+		  struct blkif_response);
 DEFINE_RING_TYPES(blkif_x86_32, struct blkif_x86_32_request,
 DEFINE_RING_TYPES(blkif_x86_32, struct blkif_x86_32_request,
-		  struct blkif_x86_32_response);
+		  struct blkif_response __packed);
 DEFINE_RING_TYPES(blkif_x86_64, struct blkif_x86_64_request,
 DEFINE_RING_TYPES(blkif_x86_64, struct blkif_x86_64_request,
-		  struct blkif_x86_64_response);
+		  struct blkif_response);
 
 
 union blkif_back_rings {
 union blkif_back_rings {
 	struct blkif_back_ring        native;
 	struct blkif_back_ring        native;
@@ -281,6 +266,7 @@ struct xen_blkif_ring {
 
 
 	wait_queue_head_t	wq;
 	wait_queue_head_t	wq;
 	atomic_t		inflight;
 	atomic_t		inflight;
+	bool			active;
 	/* One thread per blkif ring. */
 	/* One thread per blkif ring. */
 	struct task_struct	*xenblkd;
 	struct task_struct	*xenblkd;
 	unsigned int		waiting_reqs;
 	unsigned int		waiting_reqs;

+ 8 - 7
drivers/block/xen-blkback/xenbus.c

@@ -159,7 +159,7 @@ static int xen_blkif_alloc_rings(struct xen_blkif *blkif)
 		init_waitqueue_head(&ring->shutdown_wq);
 		init_waitqueue_head(&ring->shutdown_wq);
 		ring->blkif = blkif;
 		ring->blkif = blkif;
 		ring->st_print = jiffies;
 		ring->st_print = jiffies;
-		xen_blkif_get(blkif);
+		ring->active = true;
 	}
 	}
 
 
 	return 0;
 	return 0;
@@ -249,10 +249,12 @@ static int xen_blkif_disconnect(struct xen_blkif *blkif)
 		struct xen_blkif_ring *ring = &blkif->rings[r];
 		struct xen_blkif_ring *ring = &blkif->rings[r];
 		unsigned int i = 0;
 		unsigned int i = 0;
 
 
+		if (!ring->active)
+			continue;
+
 		if (ring->xenblkd) {
 		if (ring->xenblkd) {
 			kthread_stop(ring->xenblkd);
 			kthread_stop(ring->xenblkd);
 			wake_up(&ring->shutdown_wq);
 			wake_up(&ring->shutdown_wq);
-			ring->xenblkd = NULL;
 		}
 		}
 
 
 		/* The above kthread_stop() guarantees that at this point we
 		/* The above kthread_stop() guarantees that at this point we
@@ -296,7 +298,7 @@ static int xen_blkif_disconnect(struct xen_blkif *blkif)
 		BUG_ON(ring->free_pages_num != 0);
 		BUG_ON(ring->free_pages_num != 0);
 		BUG_ON(ring->persistent_gnt_c != 0);
 		BUG_ON(ring->persistent_gnt_c != 0);
 		WARN_ON(i != (XEN_BLKIF_REQS_PER_PAGE * blkif->nr_ring_pages));
 		WARN_ON(i != (XEN_BLKIF_REQS_PER_PAGE * blkif->nr_ring_pages));
-		xen_blkif_put(blkif);
+		ring->active = false;
 	}
 	}
 	blkif->nr_ring_pages = 0;
 	blkif->nr_ring_pages = 0;
 	/*
 	/*
@@ -312,9 +314,10 @@ static int xen_blkif_disconnect(struct xen_blkif *blkif)
 
 
 static void xen_blkif_free(struct xen_blkif *blkif)
 static void xen_blkif_free(struct xen_blkif *blkif)
 {
 {
-
-	xen_blkif_disconnect(blkif);
+	WARN_ON(xen_blkif_disconnect(blkif));
 	xen_vbd_free(&blkif->vbd);
 	xen_vbd_free(&blkif->vbd);
+	kfree(blkif->be->mode);
+	kfree(blkif->be);
 
 
 	/* Make sure everything is drained before shutting down */
 	/* Make sure everything is drained before shutting down */
 	kmem_cache_free(xen_blkif_cachep, blkif);
 	kmem_cache_free(xen_blkif_cachep, blkif);
@@ -511,8 +514,6 @@ static int xen_blkbk_remove(struct xenbus_device *dev)
 		xen_blkif_put(be->blkif);
 		xen_blkif_put(be->blkif);
 	}
 	}
 
 
-	kfree(be->mode);
-	kfree(be);
 	return 0;
 	return 0;
 }
 }
 
 

+ 0 - 1
drivers/bus/Kconfig

@@ -121,7 +121,6 @@ config QCOM_EBI2
 config SIMPLE_PM_BUS
 config SIMPLE_PM_BUS
 	bool "Simple Power-Managed Bus Driver"
 	bool "Simple Power-Managed Bus Driver"
 	depends on OF && PM
 	depends on OF && PM
-	depends on ARCH_RENESAS || COMPILE_TEST
 	help
 	help
 	  Driver for transparent busses that don't need a real driver, but
 	  Driver for transparent busses that don't need a real driver, but
 	  where the bus controller is part of a PM domain, or under the control
 	  where the bus controller is part of a PM domain, or under the control

+ 6 - 6
drivers/char/random.c

@@ -803,13 +803,13 @@ static int crng_fast_load(const char *cp, size_t len)
 		p[crng_init_cnt % CHACHA20_KEY_SIZE] ^= *cp;
 		p[crng_init_cnt % CHACHA20_KEY_SIZE] ^= *cp;
 		cp++; crng_init_cnt++; len--;
 		cp++; crng_init_cnt++; len--;
 	}
 	}
+	spin_unlock_irqrestore(&primary_crng.lock, flags);
 	if (crng_init_cnt >= CRNG_INIT_CNT_THRESH) {
 	if (crng_init_cnt >= CRNG_INIT_CNT_THRESH) {
 		invalidate_batched_entropy();
 		invalidate_batched_entropy();
 		crng_init = 1;
 		crng_init = 1;
 		wake_up_interruptible(&crng_init_wait);
 		wake_up_interruptible(&crng_init_wait);
 		pr_notice("random: fast init done\n");
 		pr_notice("random: fast init done\n");
 	}
 	}
-	spin_unlock_irqrestore(&primary_crng.lock, flags);
 	return 1;
 	return 1;
 }
 }
 
 
@@ -841,6 +841,7 @@ static void crng_reseed(struct crng_state *crng, struct entropy_store *r)
 	}
 	}
 	memzero_explicit(&buf, sizeof(buf));
 	memzero_explicit(&buf, sizeof(buf));
 	crng->init_time = jiffies;
 	crng->init_time = jiffies;
+	spin_unlock_irqrestore(&primary_crng.lock, flags);
 	if (crng == &primary_crng && crng_init < 2) {
 	if (crng == &primary_crng && crng_init < 2) {
 		invalidate_batched_entropy();
 		invalidate_batched_entropy();
 		crng_init = 2;
 		crng_init = 2;
@@ -848,7 +849,6 @@ static void crng_reseed(struct crng_state *crng, struct entropy_store *r)
 		wake_up_interruptible(&crng_init_wait);
 		wake_up_interruptible(&crng_init_wait);
 		pr_notice("random: crng init done\n");
 		pr_notice("random: crng init done\n");
 	}
 	}
-	spin_unlock_irqrestore(&primary_crng.lock, flags);
 }
 }
 
 
 static inline void crng_wait_ready(void)
 static inline void crng_wait_ready(void)
@@ -2041,8 +2041,8 @@ static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u64);
 u64 get_random_u64(void)
 u64 get_random_u64(void)
 {
 {
 	u64 ret;
 	u64 ret;
-	bool use_lock = crng_init < 2;
-	unsigned long flags;
+	bool use_lock = READ_ONCE(crng_init) < 2;
+	unsigned long flags = 0;
 	struct batched_entropy *batch;
 	struct batched_entropy *batch;
 
 
 #if BITS_PER_LONG == 64
 #if BITS_PER_LONG == 64
@@ -2073,8 +2073,8 @@ static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u32);
 u32 get_random_u32(void)
 u32 get_random_u32(void)
 {
 {
 	u32 ret;
 	u32 ret;
-	bool use_lock = crng_init < 2;
-	unsigned long flags;
+	bool use_lock = READ_ONCE(crng_init) < 2;
+	unsigned long flags = 0;
 	struct batched_entropy *batch;
 	struct batched_entropy *batch;
 
 
 	if (arch_get_random_int(&ret))
 	if (arch_get_random_int(&ret))

+ 1 - 0
drivers/clk/meson/Kconfig

@@ -14,6 +14,7 @@ config COMMON_CLK_MESON8B
 config COMMON_CLK_GXBB
 config COMMON_CLK_GXBB
 	bool
 	bool
 	depends on COMMON_CLK_AMLOGIC
 	depends on COMMON_CLK_AMLOGIC
+	select RESET_CONTROLLER
 	help
 	help
 	  Support for the clock controller on AmLogic S905 devices, aka gxbb.
 	  Support for the clock controller on AmLogic S905 devices, aka gxbb.
 	  Say Y if you want peripherals and CPU frequency scaling to work.
 	  Say Y if you want peripherals and CPU frequency scaling to work.

+ 1 - 0
drivers/clk/sunxi-ng/Kconfig

@@ -156,6 +156,7 @@ config SUN8I_R_CCU
 	bool "Support for Allwinner SoCs' PRCM CCUs"
 	bool "Support for Allwinner SoCs' PRCM CCUs"
 	select SUNXI_CCU_DIV
 	select SUNXI_CCU_DIV
 	select SUNXI_CCU_GATE
 	select SUNXI_CCU_GATE
+	select SUNXI_CCU_MP
 	default MACH_SUN8I || (ARCH_SUNXI && ARM64)
 	default MACH_SUN8I || (ARCH_SUNXI && ARM64)
 
 
 endif
 endif

+ 3 - 1
drivers/clk/sunxi-ng/ccu-sun50i-a64.h

@@ -31,7 +31,9 @@
 #define CLK_PLL_VIDEO0_2X		8
 #define CLK_PLL_VIDEO0_2X		8
 #define CLK_PLL_VE			9
 #define CLK_PLL_VE			9
 #define CLK_PLL_DDR0			10
 #define CLK_PLL_DDR0			10
-#define CLK_PLL_PERIPH0			11
+
+/* PLL_PERIPH0 exported for PRCM */
+
 #define CLK_PLL_PERIPH0_2X		12
 #define CLK_PLL_PERIPH0_2X		12
 #define CLK_PLL_PERIPH1			13
 #define CLK_PLL_PERIPH1			13
 #define CLK_PLL_PERIPH1_2X		14
 #define CLK_PLL_PERIPH1_2X		14

+ 1 - 1
drivers/clk/sunxi-ng/ccu-sun5i.c

@@ -243,7 +243,7 @@ static SUNXI_CCU_GATE(ahb_ss_clk,	"ahb-ss",	"ahb",
 static SUNXI_CCU_GATE(ahb_dma_clk,	"ahb-dma",	"ahb",
 static SUNXI_CCU_GATE(ahb_dma_clk,	"ahb-dma",	"ahb",
 		      0x060, BIT(6), 0);
 		      0x060, BIT(6), 0);
 static SUNXI_CCU_GATE(ahb_bist_clk,	"ahb-bist",	"ahb",
 static SUNXI_CCU_GATE(ahb_bist_clk,	"ahb-bist",	"ahb",
-		      0x060, BIT(6), 0);
+		      0x060, BIT(7), 0);
 static SUNXI_CCU_GATE(ahb_mmc0_clk,	"ahb-mmc0",	"ahb",
 static SUNXI_CCU_GATE(ahb_mmc0_clk,	"ahb-mmc0",	"ahb",
 		      0x060, BIT(8), 0);
 		      0x060, BIT(8), 0);
 static SUNXI_CCU_GATE(ahb_mmc1_clk,	"ahb-mmc1",	"ahb",
 static SUNXI_CCU_GATE(ahb_mmc1_clk,	"ahb-mmc1",	"ahb",

+ 1 - 1
drivers/clk/sunxi-ng/ccu-sun6i-a31.c

@@ -556,7 +556,7 @@ static SUNXI_CCU_M_WITH_MUX_GATE(lcd0_ch1_clk, "lcd0-ch1", lcd_ch1_parents,
 				 0x12c, 0, 4, 24, 3, BIT(31),
 				 0x12c, 0, 4, 24, 3, BIT(31),
 				 CLK_SET_RATE_PARENT);
 				 CLK_SET_RATE_PARENT);
 static SUNXI_CCU_M_WITH_MUX_GATE(lcd1_ch1_clk, "lcd1-ch1", lcd_ch1_parents,
 static SUNXI_CCU_M_WITH_MUX_GATE(lcd1_ch1_clk, "lcd1-ch1", lcd_ch1_parents,
-				 0x12c, 0, 4, 24, 3, BIT(31),
+				 0x130, 0, 4, 24, 3, BIT(31),
 				 CLK_SET_RATE_PARENT);
 				 CLK_SET_RATE_PARENT);
 
 
 static const char * const csi_sclk_parents[] = { "pll-video0", "pll-video1",
 static const char * const csi_sclk_parents[] = { "pll-video0", "pll-video1",

+ 3 - 1
drivers/clk/sunxi-ng/ccu-sun8i-h3.h

@@ -29,7 +29,9 @@
 #define CLK_PLL_VIDEO		6
 #define CLK_PLL_VIDEO		6
 #define CLK_PLL_VE		7
 #define CLK_PLL_VE		7
 #define CLK_PLL_DDR		8
 #define CLK_PLL_DDR		8
-#define CLK_PLL_PERIPH0		9
+
+/* PLL_PERIPH0 exported for PRCM */
+
 #define CLK_PLL_PERIPH0_2X	10
 #define CLK_PLL_PERIPH0_2X	10
 #define CLK_PLL_GPU		11
 #define CLK_PLL_GPU		11
 #define CLK_PLL_PERIPH1		12
 #define CLK_PLL_PERIPH1		12

+ 1 - 1
drivers/clk/sunxi-ng/ccu-sun8i-v3s.c

@@ -537,7 +537,7 @@ static struct ccu_reset_map sun8i_v3s_ccu_resets[] = {
 	[RST_BUS_EMAC]		=  { 0x2c0, BIT(17) },
 	[RST_BUS_EMAC]		=  { 0x2c0, BIT(17) },
 	[RST_BUS_HSTIMER]	=  { 0x2c0, BIT(19) },
 	[RST_BUS_HSTIMER]	=  { 0x2c0, BIT(19) },
 	[RST_BUS_SPI0]		=  { 0x2c0, BIT(20) },
 	[RST_BUS_SPI0]		=  { 0x2c0, BIT(20) },
-	[RST_BUS_OTG]		=  { 0x2c0, BIT(23) },
+	[RST_BUS_OTG]		=  { 0x2c0, BIT(24) },
 	[RST_BUS_EHCI0]		=  { 0x2c0, BIT(26) },
 	[RST_BUS_EHCI0]		=  { 0x2c0, BIT(26) },
 	[RST_BUS_OHCI0]		=  { 0x2c0, BIT(29) },
 	[RST_BUS_OHCI0]		=  { 0x2c0, BIT(29) },
 
 

+ 2 - 2
drivers/clocksource/arm_arch_timer.c

@@ -1209,9 +1209,9 @@ arch_timer_mem_frame_get_cntfrq(struct arch_timer_mem_frame *frame)
 		return 0;
 		return 0;
 	}
 	}
 
 
-	rate = readl_relaxed(frame + CNTFRQ);
+	rate = readl_relaxed(base + CNTFRQ);
 
 
-	iounmap(frame);
+	iounmap(base);
 
 
 	return rate;
 	return rate;
 }
 }

+ 1 - 0
drivers/clocksource/cadence_ttc_timer.c

@@ -18,6 +18,7 @@
 #include <linux/clk.h>
 #include <linux/clk.h>
 #include <linux/interrupt.h>
 #include <linux/interrupt.h>
 #include <linux/clockchips.h>
 #include <linux/clockchips.h>
+#include <linux/clocksource.h>
 #include <linux/of_address.h>
 #include <linux/of_address.h>
 #include <linux/of_irq.h>
 #include <linux/of_irq.h>
 #include <linux/slab.h>
 #include <linux/slab.h>

+ 1 - 0
drivers/clocksource/timer-sun5i.c

@@ -12,6 +12,7 @@
 
 
 #include <linux/clk.h>
 #include <linux/clk.h>
 #include <linux/clockchips.h>
 #include <linux/clockchips.h>
+#include <linux/clocksource.h>
 #include <linux/delay.h>
 #include <linux/delay.h>
 #include <linux/interrupt.h>
 #include <linux/interrupt.h>
 #include <linux/irq.h>
 #include <linux/irq.h>

+ 3 - 3
drivers/gpio/gpio-mvebu.c

@@ -721,7 +721,7 @@ static int mvebu_pwm_probe(struct platform_device *pdev,
 	u32 set;
 	u32 set;
 
 
 	if (!of_device_is_compatible(mvchip->chip.of_node,
 	if (!of_device_is_compatible(mvchip->chip.of_node,
-				     "marvell,armada-370-xp-gpio"))
+				     "marvell,armada-370-gpio"))
 		return 0;
 		return 0;
 
 
 	if (IS_ERR(mvchip->clk))
 	if (IS_ERR(mvchip->clk))
@@ -852,7 +852,7 @@ static const struct of_device_id mvebu_gpio_of_match[] = {
 		.data	    = (void *) MVEBU_GPIO_SOC_VARIANT_ARMADAXP,
 		.data	    = (void *) MVEBU_GPIO_SOC_VARIANT_ARMADAXP,
 	},
 	},
 	{
 	{
-		.compatible = "marvell,armada-370-xp-gpio",
+		.compatible = "marvell,armada-370-gpio",
 		.data	    = (void *) MVEBU_GPIO_SOC_VARIANT_ORION,
 		.data	    = (void *) MVEBU_GPIO_SOC_VARIANT_ORION,
 	},
 	},
 	{
 	{
@@ -1128,7 +1128,7 @@ static int mvebu_gpio_probe(struct platform_device *pdev)
 						 mvchip);
 						 mvchip);
 	}
 	}
 
 
-	/* Armada 370/XP has simple PWM support for GPIO lines */
+	/* Some MVEBU SoCs have simple PWM support for GPIO lines */
 	if (IS_ENABLED(CONFIG_PWM))
 	if (IS_ENABLED(CONFIG_PWM))
 		return mvebu_pwm_probe(pdev, mvchip, id);
 		return mvebu_pwm_probe(pdev, mvchip, id);
 
 

+ 4 - 0
drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c

@@ -693,6 +693,10 @@ int amdgpu_atombios_get_clock_info(struct amdgpu_device *adev)
 			DRM_INFO("Changing default dispclk from %dMhz to 600Mhz\n",
 			DRM_INFO("Changing default dispclk from %dMhz to 600Mhz\n",
 				 adev->clock.default_dispclk / 100);
 				 adev->clock.default_dispclk / 100);
 			adev->clock.default_dispclk = 60000;
 			adev->clock.default_dispclk = 60000;
+		} else if (adev->clock.default_dispclk <= 60000) {
+			DRM_INFO("Changing default dispclk from %dMhz to 625Mhz\n",
+				 adev->clock.default_dispclk / 100);
+			adev->clock.default_dispclk = 62500;
 		}
 		}
 		adev->clock.dp_extclk =
 		adev->clock.dp_extclk =
 			le16_to_cpu(firmware_info->info_21.usUniphyDPModeExtClkFreq);
 			le16_to_cpu(firmware_info->info_21.usUniphyDPModeExtClkFreq);

+ 1 - 0
drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c

@@ -475,6 +475,7 @@ static const struct pci_device_id pciidlist[] = {
 	{0x1002, 0x6986, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12},
 	{0x1002, 0x6986, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12},
 	{0x1002, 0x6987, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12},
 	{0x1002, 0x6987, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12},
 	{0x1002, 0x6995, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12},
 	{0x1002, 0x6995, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12},
+	{0x1002, 0x6997, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12},
 	{0x1002, 0x699F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12},
 	{0x1002, 0x699F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12},
 	/* Vega 10 */
 	/* Vega 10 */
 	{0x1002, 0x6860, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10|AMD_EXP_HW_SUPPORT},
 	{0x1002, 0x6860, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10|AMD_EXP_HW_SUPPORT},

+ 2 - 2
drivers/gpu/drm/amd/amdgpu/atombios_crtc.c

@@ -165,7 +165,7 @@ void amdgpu_atombios_crtc_powergate(struct drm_crtc *crtc, int state)
 	struct drm_device *dev = crtc->dev;
 	struct drm_device *dev = crtc->dev;
 	struct amdgpu_device *adev = dev->dev_private;
 	struct amdgpu_device *adev = dev->dev_private;
 	int index = GetIndexIntoMasterTable(COMMAND, EnableDispPowerGating);
 	int index = GetIndexIntoMasterTable(COMMAND, EnableDispPowerGating);
-	ENABLE_DISP_POWER_GATING_PARAMETERS_V2_1 args;
+	ENABLE_DISP_POWER_GATING_PS_ALLOCATION args;
 
 
 	memset(&args, 0, sizeof(args));
 	memset(&args, 0, sizeof(args));
 
 
@@ -178,7 +178,7 @@ void amdgpu_atombios_crtc_powergate(struct drm_crtc *crtc, int state)
 void amdgpu_atombios_crtc_powergate_init(struct amdgpu_device *adev)
 void amdgpu_atombios_crtc_powergate_init(struct amdgpu_device *adev)
 {
 {
 	int index = GetIndexIntoMasterTable(COMMAND, EnableDispPowerGating);
 	int index = GetIndexIntoMasterTable(COMMAND, EnableDispPowerGating);
-	ENABLE_DISP_POWER_GATING_PARAMETERS_V2_1 args;
+	ENABLE_DISP_POWER_GATING_PS_ALLOCATION args;
 
 
 	memset(&args, 0, sizeof(args));
 	memset(&args, 0, sizeof(args));
 
 

+ 2 - 9
drivers/gpu/drm/arm/hdlcd_crtc.c

@@ -261,21 +261,14 @@ static void hdlcd_plane_atomic_update(struct drm_plane *plane,
 {
 {
 	struct drm_framebuffer *fb = plane->state->fb;
 	struct drm_framebuffer *fb = plane->state->fb;
 	struct hdlcd_drm_private *hdlcd;
 	struct hdlcd_drm_private *hdlcd;
-	struct drm_gem_cma_object *gem;
-	u32 src_x, src_y, dest_h;
+	u32 dest_h;
 	dma_addr_t scanout_start;
 	dma_addr_t scanout_start;
 
 
 	if (!fb)
 	if (!fb)
 		return;
 		return;
 
 
-	src_x = plane->state->src.x1 >> 16;
-	src_y = plane->state->src.y1 >> 16;
 	dest_h = drm_rect_height(&plane->state->dst);
 	dest_h = drm_rect_height(&plane->state->dst);
-	gem = drm_fb_cma_get_gem_obj(fb, 0);
-
-	scanout_start = gem->paddr + fb->offsets[0] +
-			src_y * fb->pitches[0] +
-			src_x *	fb->format->cpp[0];
+	scanout_start = drm_fb_cma_get_gem_addr(fb, plane->state, 0);
 
 
 	hdlcd = plane->dev->dev_private;
 	hdlcd = plane->dev->dev_private;
 	hdlcd_write(hdlcd, HDLCD_REG_FB_LINE_LENGTH, fb->pitches[0]);
 	hdlcd_write(hdlcd, HDLCD_REG_FB_LINE_LENGTH, fb->pitches[0]);

+ 9 - 0
drivers/gpu/drm/arm/hdlcd_drv.c

@@ -297,6 +297,9 @@ static int hdlcd_drm_bind(struct device *dev)
 	if (ret)
 	if (ret)
 		goto err_free;
 		goto err_free;
 
 
+	/* Set the CRTC's port so that the encoder component can find it */
+	hdlcd->crtc.port = of_graph_get_port_by_id(dev->of_node, 0);
+
 	ret = component_bind_all(dev, drm);
 	ret = component_bind_all(dev, drm);
 	if (ret) {
 	if (ret) {
 		DRM_ERROR("Failed to bind all components\n");
 		DRM_ERROR("Failed to bind all components\n");
@@ -340,11 +343,14 @@ err_register:
 	}
 	}
 err_fbdev:
 err_fbdev:
 	drm_kms_helper_poll_fini(drm);
 	drm_kms_helper_poll_fini(drm);
+	drm_vblank_cleanup(drm);
 err_vblank:
 err_vblank:
 	pm_runtime_disable(drm->dev);
 	pm_runtime_disable(drm->dev);
 err_pm_active:
 err_pm_active:
 	component_unbind_all(dev, drm);
 	component_unbind_all(dev, drm);
 err_unload:
 err_unload:
+	of_node_put(hdlcd->crtc.port);
+	hdlcd->crtc.port = NULL;
 	drm_irq_uninstall(drm);
 	drm_irq_uninstall(drm);
 	of_reserved_mem_device_release(drm->dev);
 	of_reserved_mem_device_release(drm->dev);
 err_free:
 err_free:
@@ -367,6 +373,9 @@ static void hdlcd_drm_unbind(struct device *dev)
 	}
 	}
 	drm_kms_helper_poll_fini(drm);
 	drm_kms_helper_poll_fini(drm);
 	component_unbind_all(dev, drm);
 	component_unbind_all(dev, drm);
+	of_node_put(hdlcd->crtc.port);
+	hdlcd->crtc.port = NULL;
+	drm_vblank_cleanup(drm);
 	pm_runtime_get_sync(drm->dev);
 	pm_runtime_get_sync(drm->dev);
 	drm_irq_uninstall(drm);
 	drm_irq_uninstall(drm);
 	pm_runtime_put_sync(drm->dev);
 	pm_runtime_put_sync(drm->dev);

+ 20 - 18
drivers/gpu/drm/drm_connector.c

@@ -1293,21 +1293,6 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
 	if (!connector)
 	if (!connector)
 		return -ENOENT;
 		return -ENOENT;
 
 
-	drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
-	encoder = drm_connector_get_encoder(connector);
-	if (encoder)
-		out_resp->encoder_id = encoder->base.id;
-	else
-		out_resp->encoder_id = 0;
-
-	ret = drm_mode_object_get_properties(&connector->base, file_priv->atomic,
-			(uint32_t __user *)(unsigned long)(out_resp->props_ptr),
-			(uint64_t __user *)(unsigned long)(out_resp->prop_values_ptr),
-			&out_resp->count_props);
-	drm_modeset_unlock(&dev->mode_config.connection_mutex);
-	if (ret)
-		goto out_unref;
-
 	for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++)
 	for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++)
 		if (connector->encoder_ids[i] != 0)
 		if (connector->encoder_ids[i] != 0)
 			encoders_count++;
 			encoders_count++;
@@ -1320,7 +1305,7 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
 				if (put_user(connector->encoder_ids[i],
 				if (put_user(connector->encoder_ids[i],
 					     encoder_ptr + copied)) {
 					     encoder_ptr + copied)) {
 					ret = -EFAULT;
 					ret = -EFAULT;
-					goto out_unref;
+					goto out;
 				}
 				}
 				copied++;
 				copied++;
 			}
 			}
@@ -1364,15 +1349,32 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
 			if (copy_to_user(mode_ptr + copied,
 			if (copy_to_user(mode_ptr + copied,
 					 &u_mode, sizeof(u_mode))) {
 					 &u_mode, sizeof(u_mode))) {
 				ret = -EFAULT;
 				ret = -EFAULT;
+				mutex_unlock(&dev->mode_config.mutex);
+
 				goto out;
 				goto out;
 			}
 			}
 			copied++;
 			copied++;
 		}
 		}
 	}
 	}
 	out_resp->count_modes = mode_count;
 	out_resp->count_modes = mode_count;
-out:
 	mutex_unlock(&dev->mode_config.mutex);
 	mutex_unlock(&dev->mode_config.mutex);
-out_unref:
+
+	drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
+	encoder = drm_connector_get_encoder(connector);
+	if (encoder)
+		out_resp->encoder_id = encoder->base.id;
+	else
+		out_resp->encoder_id = 0;
+
+	/* Only grab properties after probing, to make sure EDID and other
+	 * properties reflect the latest status. */
+	ret = drm_mode_object_get_properties(&connector->base, file_priv->atomic,
+			(uint32_t __user *)(unsigned long)(out_resp->props_ptr),
+			(uint64_t __user *)(unsigned long)(out_resp->prop_values_ptr),
+			&out_resp->count_props);
+	drm_modeset_unlock(&dev->mode_config.connection_mutex);
+
+out:
 	drm_connector_put(connector);
 	drm_connector_put(connector);
 
 
 	return ret;
 	return ret;

+ 10 - 1
drivers/gpu/drm/i915/Makefile

@@ -129,7 +129,16 @@ i915-y += i915_vgpu.o
 
 
 # perf code
 # perf code
 i915-y += i915_perf.o \
 i915-y += i915_perf.o \
-	  i915_oa_hsw.o
+	  i915_oa_hsw.o \
+	  i915_oa_bdw.o \
+	  i915_oa_chv.o \
+	  i915_oa_sklgt2.o \
+	  i915_oa_sklgt3.o \
+	  i915_oa_sklgt4.o \
+	  i915_oa_bxt.o \
+	  i915_oa_kblgt2.o \
+	  i915_oa_kblgt3.o \
+	  i915_oa_glk.o
 
 
 ifeq ($(CONFIG_DRM_I915_GVT),y)
 ifeq ($(CONFIG_DRM_I915_GVT),y)
 i915-y += intel_gvt.o
 i915-y += intel_gvt.o

+ 4 - 6
drivers/gpu/drm/i915/dvo_ch7xxx.c

@@ -217,9 +217,8 @@ static bool ch7xxx_init(struct intel_dvo_device *dvo,
 
 
 	name = ch7xxx_get_id(vendor);
 	name = ch7xxx_get_id(vendor);
 	if (!name) {
 	if (!name) {
-		DRM_DEBUG_KMS("ch7xxx not detected; got 0x%02x from %s "
-				"slave %d.\n",
-			  vendor, adapter->name, dvo->slave_addr);
+		DRM_DEBUG_KMS("ch7xxx not detected; got VID 0x%02x from %s slave %d.\n",
+			      vendor, adapter->name, dvo->slave_addr);
 		goto out;
 		goto out;
 	}
 	}
 
 
@@ -229,9 +228,8 @@ static bool ch7xxx_init(struct intel_dvo_device *dvo,
 
 
 	devid = ch7xxx_get_did(device);
 	devid = ch7xxx_get_did(device);
 	if (!devid) {
 	if (!devid) {
-		DRM_DEBUG_KMS("ch7xxx not detected; got 0x%02x from %s "
-				"slave %d.\n",
-			  vendor, adapter->name, dvo->slave_addr);
+		DRM_DEBUG_KMS("ch7xxx not detected; got DID 0x%02x from %s slave %d.\n",
+			      device, adapter->name, dvo->slave_addr);
 		goto out;
 		goto out;
 	}
 	}
 
 

+ 1 - 1
drivers/gpu/drm/i915/gvt/Makefile

@@ -3,6 +3,6 @@ GVT_SOURCE := gvt.o aperture_gm.o handlers.o vgpu.o trace_points.o firmware.o \
 	interrupt.o gtt.o cfg_space.o opregion.o mmio.o display.o edid.o \
 	interrupt.o gtt.o cfg_space.o opregion.o mmio.o display.o edid.o \
 	execlist.o scheduler.o sched_policy.o render.o cmd_parser.o
 	execlist.o scheduler.o sched_policy.o render.o cmd_parser.o
 
 
-ccflags-y				+= -I$(src) -I$(src)/$(GVT_DIR) -Wall
+ccflags-y				+= -I$(src) -I$(src)/$(GVT_DIR)
 i915-y					+= $(addprefix $(GVT_DIR)/, $(GVT_SOURCE))
 i915-y					+= $(addprefix $(GVT_DIR)/, $(GVT_SOURCE))
 obj-$(CONFIG_DRM_I915_GVT_KVMGT)	+= $(GVT_DIR)/kvmgt.o
 obj-$(CONFIG_DRM_I915_GVT_KVMGT)	+= $(GVT_DIR)/kvmgt.o

+ 3 - 53
drivers/gpu/drm/i915/gvt/cmd_parser.c

@@ -2414,53 +2414,13 @@ static void add_cmd_entry(struct intel_gvt *gvt, struct cmd_entry *e)
 	hash_add(gvt->cmd_table, &e->hlist, e->info->opcode);
 	hash_add(gvt->cmd_table, &e->hlist, e->info->opcode);
 }
 }
 
 
-#define GVT_MAX_CMD_LENGTH     20  /* In Dword */
-
-static void trace_cs_command(struct parser_exec_state *s,
-		cycles_t cost_pre_cmd_handler, cycles_t cost_cmd_handler)
-{
-	/* This buffer is used by ftrace to store all commands copied from
-	 * guest gma space. Sometimes commands can cross pages, this should
-	 * not be handled in ftrace logic. So this is just used as a
-	 * 'bounce buffer'
-	 */
-	u32 cmd_trace_buf[GVT_MAX_CMD_LENGTH];
-	int i;
-	u32 cmd_len = cmd_length(s);
-	/* The chosen value of GVT_MAX_CMD_LENGTH are just based on
-	 * following two considerations:
-	 * 1) From observation, most common ring commands is not that long.
-	 *    But there are execeptions. So it indeed makes sence to observe
-	 *    longer commands.
-	 * 2) From the performance and debugging point of view, dumping all
-	 *    contents of very commands is not necessary.
-	 * We mgith shrink GVT_MAX_CMD_LENGTH or remove this trace event in
-	 * future for performance considerations.
-	 */
-	if (unlikely(cmd_len > GVT_MAX_CMD_LENGTH)) {
-		gvt_dbg_cmd("cmd length exceed tracing limitation!\n");
-		cmd_len = GVT_MAX_CMD_LENGTH;
-	}
-
-	for (i = 0; i < cmd_len; i++)
-		cmd_trace_buf[i] = cmd_val(s, i);
-
-	trace_gvt_command(s->vgpu->id, s->ring_id, s->ip_gma, cmd_trace_buf,
-			cmd_len, s->buf_type == RING_BUFFER_INSTRUCTION,
-			cost_pre_cmd_handler, cost_cmd_handler);
-}
-
 /* call the cmd handler, and advance ip */
 /* call the cmd handler, and advance ip */
 static int cmd_parser_exec(struct parser_exec_state *s)
 static int cmd_parser_exec(struct parser_exec_state *s)
 {
 {
+	struct intel_vgpu *vgpu = s->vgpu;
 	struct cmd_info *info;
 	struct cmd_info *info;
 	u32 cmd;
 	u32 cmd;
 	int ret = 0;
 	int ret = 0;
-	cycles_t t0, t1, t2;
-	struct parser_exec_state s_before_advance_custom;
-	struct intel_vgpu *vgpu = s->vgpu;
-
-	t0 = get_cycles();
 
 
 	cmd = cmd_val(s, 0);
 	cmd = cmd_val(s, 0);
 
 
@@ -2471,13 +2431,10 @@ static int cmd_parser_exec(struct parser_exec_state *s)
 		return -EINVAL;
 		return -EINVAL;
 	}
 	}
 
 
-	gvt_dbg_cmd("%s\n", info->name);
-
 	s->info = info;
 	s->info = info;
 
 
-	t1 = get_cycles();
-
-	s_before_advance_custom = *s;
+	trace_gvt_command(vgpu->id, s->ring_id, s->ip_gma, s->ip_va,
+			  cmd_length(s), s->buf_type);
 
 
 	if (info->handler) {
 	if (info->handler) {
 		ret = info->handler(s);
 		ret = info->handler(s);
@@ -2486,9 +2443,6 @@ static int cmd_parser_exec(struct parser_exec_state *s)
 			return ret;
 			return ret;
 		}
 		}
 	}
 	}
-	t2 = get_cycles();
-
-	trace_cs_command(&s_before_advance_custom, t1 - t0, t2 - t1);
 
 
 	if (!(info->flag & F_IP_ADVANCE_CUSTOM)) {
 	if (!(info->flag & F_IP_ADVANCE_CUSTOM)) {
 		ret = cmd_advance_default(s);
 		ret = cmd_advance_default(s);
@@ -2522,8 +2476,6 @@ static int command_scan(struct parser_exec_state *s,
 	gma_tail = rb_start + rb_tail;
 	gma_tail = rb_start + rb_tail;
 	gma_bottom = rb_start +  rb_len;
 	gma_bottom = rb_start +  rb_len;
 
 
-	gvt_dbg_cmd("scan_start: start=%lx end=%lx\n", gma_head, gma_tail);
-
 	while (s->ip_gma != gma_tail) {
 	while (s->ip_gma != gma_tail) {
 		if (s->buf_type == RING_BUFFER_INSTRUCTION) {
 		if (s->buf_type == RING_BUFFER_INSTRUCTION) {
 			if (!(s->ip_gma >= rb_start) ||
 			if (!(s->ip_gma >= rb_start) ||
@@ -2552,8 +2504,6 @@ static int command_scan(struct parser_exec_state *s,
 		}
 		}
 	}
 	}
 
 
-	gvt_dbg_cmd("scan_end\n");
-
 	return ret;
 	return ret;
 }
 }
 
 

+ 23 - 33
drivers/gpu/drm/i915/gvt/execlist.c

@@ -708,53 +708,43 @@ static int submit_context(struct intel_vgpu *vgpu, int ring_id,
 int intel_vgpu_submit_execlist(struct intel_vgpu *vgpu, int ring_id)
 int intel_vgpu_submit_execlist(struct intel_vgpu *vgpu, int ring_id)
 {
 {
 	struct intel_vgpu_execlist *execlist = &vgpu->execlist[ring_id];
 	struct intel_vgpu_execlist *execlist = &vgpu->execlist[ring_id];
-	struct execlist_ctx_descriptor_format *desc[2], valid_desc[2];
-	unsigned long valid_desc_bitmap = 0;
-	bool emulate_schedule_in = true;
-	int ret;
-	int i;
+	struct execlist_ctx_descriptor_format desc[2];
+	int i, ret;
 
 
-	memset(valid_desc, 0, sizeof(valid_desc));
+	desc[0] = *get_desc_from_elsp_dwords(&execlist->elsp_dwords, 1);
+	desc[1] = *get_desc_from_elsp_dwords(&execlist->elsp_dwords, 0);
 
 
-	desc[0] = get_desc_from_elsp_dwords(&execlist->elsp_dwords, 1);
-	desc[1] = get_desc_from_elsp_dwords(&execlist->elsp_dwords, 0);
+	if (!desc[0].valid) {
+		gvt_vgpu_err("invalid elsp submission, desc0 is invalid\n");
+		goto inv_desc;
+	}
 
 
-	for (i = 0; i < 2; i++) {
-		if (!desc[i]->valid)
+	for (i = 0; i < ARRAY_SIZE(desc); i++) {
+		if (!desc[i].valid)
 			continue;
 			continue;
-
-		if (!desc[i]->privilege_access) {
+		if (!desc[i].privilege_access) {
 			gvt_vgpu_err("unexpected GGTT elsp submission\n");
 			gvt_vgpu_err("unexpected GGTT elsp submission\n");
-			return -EINVAL;
+			goto inv_desc;
 		}
 		}
-
-		/* TODO: add another guest context checks here. */
-		set_bit(i, &valid_desc_bitmap);
-		valid_desc[i] = *desc[i];
-	}
-
-	if (!valid_desc_bitmap) {
-		gvt_vgpu_err("no valid desc in a elsp submission\n");
-		return -EINVAL;
-	}
-
-	if (!test_bit(0, (void *)&valid_desc_bitmap) &&
-			test_bit(1, (void *)&valid_desc_bitmap)) {
-		gvt_vgpu_err("weird elsp submission, desc 0 is not valid\n");
-		return -EINVAL;
 	}
 	}
 
 
 	/* submit workload */
 	/* submit workload */
-	for_each_set_bit(i, (void *)&valid_desc_bitmap, 2) {
-		ret = submit_context(vgpu, ring_id, &valid_desc[i],
-				emulate_schedule_in);
+	for (i = 0; i < ARRAY_SIZE(desc); i++) {
+		if (!desc[i].valid)
+			continue;
+		ret = submit_context(vgpu, ring_id, &desc[i], i == 0);
 		if (ret) {
 		if (ret) {
-			gvt_vgpu_err("fail to schedule workload\n");
+			gvt_vgpu_err("failed to submit desc %d\n", i);
 			return ret;
 			return ret;
 		}
 		}
-		emulate_schedule_in = false;
 	}
 	}
+
 	return 0;
 	return 0;
+
+inv_desc:
+	gvt_vgpu_err("descriptors content: desc0 %08x %08x desc1 %08x %08x\n",
+		     desc[0].udw, desc[0].ldw, desc[1].udw, desc[1].ldw);
+	return -EINVAL;
 }
 }
 
 
 static void init_vgpu_execlist(struct intel_vgpu *vgpu, int ring_id)
 static void init_vgpu_execlist(struct intel_vgpu *vgpu, int ring_id)

+ 2 - 7
drivers/gpu/drm/i915/gvt/firmware.c

@@ -102,13 +102,8 @@ static int expose_firmware_sysfs(struct intel_gvt *gvt)
 
 
 	p = firmware + h->mmio_offset;
 	p = firmware + h->mmio_offset;
 
 
-	hash_for_each(gvt->mmio.mmio_info_table, i, e, node) {
-		int j;
-
-		for (j = 0; j < e->length; j += 4)
-			*(u32 *)(p + e->offset + j) =
-				I915_READ_NOTRACE(_MMIO(e->offset + j));
-	}
+	hash_for_each(gvt->mmio.mmio_info_table, i, e, node)
+		*(u32 *)(p + e->offset) = I915_READ_NOTRACE(_MMIO(e->offset));
 
 
 	memcpy(gvt->firmware.mmio, p, info->mmio_size);
 	memcpy(gvt->firmware.mmio, p, info->mmio_size);
 
 

+ 9 - 6
drivers/gpu/drm/i915/gvt/gtt.c

@@ -244,15 +244,19 @@ static u64 read_pte64(struct drm_i915_private *dev_priv, unsigned long index)
 	return readq(addr);
 	return readq(addr);
 }
 }
 
 
+static void gtt_invalidate(struct drm_i915_private *dev_priv)
+{
+	mmio_hw_access_pre(dev_priv);
+	I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
+	mmio_hw_access_post(dev_priv);
+}
+
 static void write_pte64(struct drm_i915_private *dev_priv,
 static void write_pte64(struct drm_i915_private *dev_priv,
 		unsigned long index, u64 pte)
 		unsigned long index, u64 pte)
 {
 {
 	void __iomem *addr = (gen8_pte_t __iomem *)dev_priv->ggtt.gsm + index;
 	void __iomem *addr = (gen8_pte_t __iomem *)dev_priv->ggtt.gsm + index;
 
 
 	writeq(pte, addr);
 	writeq(pte, addr);
-
-	I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
-	POSTING_READ(GFX_FLSH_CNTL_GEN6);
 }
 }
 
 
 static inline struct intel_gvt_gtt_entry *gtt_get_entry64(void *pt,
 static inline struct intel_gvt_gtt_entry *gtt_get_entry64(void *pt,
@@ -1849,6 +1853,7 @@ static int emulate_gtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
 	}
 	}
 
 
 	ggtt_set_shadow_entry(ggtt_mm, &m, g_gtt_index);
 	ggtt_set_shadow_entry(ggtt_mm, &m, g_gtt_index);
+	gtt_invalidate(gvt->dev_priv);
 	ggtt_set_guest_entry(ggtt_mm, &e, g_gtt_index);
 	ggtt_set_guest_entry(ggtt_mm, &e, g_gtt_index);
 	return 0;
 	return 0;
 }
 }
@@ -2301,8 +2306,6 @@ void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu)
 	u32 num_entries;
 	u32 num_entries;
 	struct intel_gvt_gtt_entry e;
 	struct intel_gvt_gtt_entry e;
 
 
-	intel_runtime_pm_get(dev_priv);
-
 	memset(&e, 0, sizeof(struct intel_gvt_gtt_entry));
 	memset(&e, 0, sizeof(struct intel_gvt_gtt_entry));
 	e.type = GTT_TYPE_GGTT_PTE;
 	e.type = GTT_TYPE_GGTT_PTE;
 	ops->set_pfn(&e, gvt->gtt.scratch_ggtt_mfn);
 	ops->set_pfn(&e, gvt->gtt.scratch_ggtt_mfn);
@@ -2318,7 +2321,7 @@ void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu)
 	for (offset = 0; offset < num_entries; offset++)
 	for (offset = 0; offset < num_entries; offset++)
 		ops->set_entry(NULL, &e, index + offset, false, 0, vgpu);
 		ops->set_entry(NULL, &e, index + offset, false, 0, vgpu);
 
 
-	intel_runtime_pm_put(dev_priv);
+	gtt_invalidate(dev_priv);
 }
 }
 
 
 /**
 /**

+ 4 - 2
drivers/gpu/drm/i915/gvt/gvt.c

@@ -147,7 +147,9 @@ static int gvt_service_thread(void *data)
 			mutex_unlock(&gvt->lock);
 			mutex_unlock(&gvt->lock);
 		}
 		}
 
 
-		if (test_and_clear_bit(INTEL_GVT_REQUEST_SCHED,
+		if (test_bit(INTEL_GVT_REQUEST_SCHED,
+				(void *)&gvt->service_request) ||
+			test_bit(INTEL_GVT_REQUEST_EVENT_SCHED,
 					(void *)&gvt->service_request)) {
 					(void *)&gvt->service_request)) {
 			intel_gvt_schedule(gvt);
 			intel_gvt_schedule(gvt);
 		}
 		}
@@ -244,7 +246,7 @@ int intel_gvt_init_device(struct drm_i915_private *dev_priv)
 	gvt_dbg_core("init gvt device\n");
 	gvt_dbg_core("init gvt device\n");
 
 
 	idr_init(&gvt->vgpu_idr);
 	idr_init(&gvt->vgpu_idr);
-
+	spin_lock_init(&gvt->scheduler.mmio_context_lock);
 	mutex_init(&gvt->lock);
 	mutex_init(&gvt->lock);
 	gvt->dev_priv = dev_priv;
 	gvt->dev_priv = dev_priv;
 
 

+ 97 - 3
drivers/gpu/drm/i915/gvt/gvt.h

@@ -165,7 +165,6 @@ struct intel_vgpu {
 	struct list_head workload_q_head[I915_NUM_ENGINES];
 	struct list_head workload_q_head[I915_NUM_ENGINES];
 	struct kmem_cache *workloads;
 	struct kmem_cache *workloads;
 	atomic_t running_workload_num;
 	atomic_t running_workload_num;
-	ktime_t last_ctx_submit_time;
 	DECLARE_BITMAP(tlb_handle_pending, I915_NUM_ENGINES);
 	DECLARE_BITMAP(tlb_handle_pending, I915_NUM_ENGINES);
 	struct i915_gem_context *shadow_ctx;
 	struct i915_gem_context *shadow_ctx;
 
 
@@ -196,11 +195,27 @@ struct intel_gvt_fence {
 	unsigned long vgpu_allocated_fence_num;
 	unsigned long vgpu_allocated_fence_num;
 };
 };
 
 
-#define INTEL_GVT_MMIO_HASH_BITS 9
+#define INTEL_GVT_MMIO_HASH_BITS 11
 
 
 struct intel_gvt_mmio {
 struct intel_gvt_mmio {
-	u32 *mmio_attribute;
+	u8 *mmio_attribute;
+/* Register contains RO bits */
+#define F_RO		(1 << 0)
+/* Register contains graphics address */
+#define F_GMADR		(1 << 1)
+/* Mode mask registers with high 16 bits as the mask bits */
+#define F_MODE_MASK	(1 << 2)
+/* This reg can be accessed by GPU commands */
+#define F_CMD_ACCESS	(1 << 3)
+/* This reg has been accessed by a VM */
+#define F_ACCESSED	(1 << 4)
+/* This reg has been accessed through GPU commands */
+#define F_CMD_ACCESSED	(1 << 5)
+/* This reg could be accessed by unaligned address */
+#define F_UNALIGN	(1 << 6)
+
 	DECLARE_HASHTABLE(mmio_info_table, INTEL_GVT_MMIO_HASH_BITS);
 	DECLARE_HASHTABLE(mmio_info_table, INTEL_GVT_MMIO_HASH_BITS);
+	unsigned int num_tracked_mmio;
 };
 };
 
 
 struct intel_gvt_firmware {
 struct intel_gvt_firmware {
@@ -257,7 +272,12 @@ static inline struct intel_gvt *to_gvt(struct drm_i915_private *i915)
 
 
 enum {
 enum {
 	INTEL_GVT_REQUEST_EMULATE_VBLANK = 0,
 	INTEL_GVT_REQUEST_EMULATE_VBLANK = 0,
+
+	/* Scheduling trigger by timer */
 	INTEL_GVT_REQUEST_SCHED = 1,
 	INTEL_GVT_REQUEST_SCHED = 1,
+
+	/* Scheduling trigger by event */
+	INTEL_GVT_REQUEST_EVENT_SCHED = 2,
 };
 };
 
 
 static inline void intel_gvt_request_service(struct intel_gvt *gvt,
 static inline void intel_gvt_request_service(struct intel_gvt *gvt,
@@ -473,6 +493,80 @@ enum {
 	GVT_FAILSAFE_INSUFFICIENT_RESOURCE,
 	GVT_FAILSAFE_INSUFFICIENT_RESOURCE,
 };
 };
 
 
+static inline void mmio_hw_access_pre(struct drm_i915_private *dev_priv)
+{
+	intel_runtime_pm_get(dev_priv);
+}
+
+static inline void mmio_hw_access_post(struct drm_i915_private *dev_priv)
+{
+	intel_runtime_pm_put(dev_priv);
+}
+
+/**
+ * intel_gvt_mmio_set_accessed - mark a MMIO has been accessed
+ * @gvt: a GVT device
+ * @offset: register offset
+ *
+ */
+static inline void intel_gvt_mmio_set_accessed(
+			struct intel_gvt *gvt, unsigned int offset)
+{
+	gvt->mmio.mmio_attribute[offset >> 2] |= F_ACCESSED;
+}
+
+/**
+ * intel_gvt_mmio_is_cmd_accessed - mark a MMIO could be accessed by command
+ * @gvt: a GVT device
+ * @offset: register offset
+ *
+ */
+static inline bool intel_gvt_mmio_is_cmd_access(
+			struct intel_gvt *gvt, unsigned int offset)
+{
+	return gvt->mmio.mmio_attribute[offset >> 2] & F_CMD_ACCESS;
+}
+
+/**
+ * intel_gvt_mmio_is_unalign - mark a MMIO could be accessed unaligned
+ * @gvt: a GVT device
+ * @offset: register offset
+ *
+ */
+static inline bool intel_gvt_mmio_is_unalign(
+			struct intel_gvt *gvt, unsigned int offset)
+{
+	return gvt->mmio.mmio_attribute[offset >> 2] & F_UNALIGN;
+}
+
+/**
+ * intel_gvt_mmio_set_cmd_accessed - mark a MMIO has been accessed by command
+ * @gvt: a GVT device
+ * @offset: register offset
+ *
+ */
+static inline void intel_gvt_mmio_set_cmd_accessed(
+			struct intel_gvt *gvt, unsigned int offset)
+{
+	gvt->mmio.mmio_attribute[offset >> 2] |= F_CMD_ACCESSED;
+}
+
+/**
+ * intel_gvt_mmio_has_mode_mask - if a MMIO has a mode mask
+ * @gvt: a GVT device
+ * @offset: register offset
+ *
+ * Returns:
+ * True if a MMIO has a mode mask in its higher 16 bits, false if it isn't.
+ *
+ */
+static inline bool intel_gvt_mmio_has_mode_mask(
+			struct intel_gvt *gvt, unsigned int offset)
+{
+	return gvt->mmio.mmio_attribute[offset >> 2] & F_MODE_MASK;
+}
+
+#include "trace.h"
 #include "mpt.h"
 #include "mpt.h"
 
 
 #endif
 #endif

+ 177 - 126
drivers/gpu/drm/i915/gvt/handlers.c

@@ -47,21 +47,6 @@
 #define PCH_PP_OFF_DELAYS _MMIO(0xc720c)
 #define PCH_PP_OFF_DELAYS _MMIO(0xc720c)
 #define PCH_PP_DIVISOR _MMIO(0xc7210)
 #define PCH_PP_DIVISOR _MMIO(0xc7210)
 
 
-/* Register contains RO bits */
-#define F_RO		(1 << 0)
-/* Register contains graphics address */
-#define F_GMADR		(1 << 1)
-/* Mode mask registers with high 16 bits as the mask bits */
-#define F_MODE_MASK	(1 << 2)
-/* This reg can be accessed by GPU commands */
-#define F_CMD_ACCESS	(1 << 3)
-/* This reg has been accessed by a VM */
-#define F_ACCESSED	(1 << 4)
-/* This reg has been accessed through GPU commands */
-#define F_CMD_ACCESSED	(1 << 5)
-/* This reg could be accessed by unaligned address */
-#define F_UNALIGN	(1 << 6)
-
 unsigned long intel_gvt_get_device_type(struct intel_gvt *gvt)
 unsigned long intel_gvt_get_device_type(struct intel_gvt *gvt)
 {
 {
 	if (IS_BROADWELL(gvt->dev_priv))
 	if (IS_BROADWELL(gvt->dev_priv))
@@ -92,11 +77,22 @@ static void write_vreg(struct intel_vgpu *vgpu, unsigned int offset,
 	memcpy(&vgpu_vreg(vgpu, offset), p_data, bytes);
 	memcpy(&vgpu_vreg(vgpu, offset), p_data, bytes);
 }
 }
 
 
+static struct intel_gvt_mmio_info *find_mmio_info(struct intel_gvt *gvt,
+						  unsigned int offset)
+{
+	struct intel_gvt_mmio_info *e;
+
+	hash_for_each_possible(gvt->mmio.mmio_info_table, e, node, offset) {
+		if (e->offset == offset)
+			return e;
+	}
+	return NULL;
+}
+
 static int new_mmio_info(struct intel_gvt *gvt,
 static int new_mmio_info(struct intel_gvt *gvt,
-		u32 offset, u32 flags, u32 size,
+		u32 offset, u8 flags, u32 size,
 		u32 addr_mask, u32 ro_mask, u32 device,
 		u32 addr_mask, u32 ro_mask, u32 device,
-		int (*read)(struct intel_vgpu *, unsigned int, void *, unsigned int),
-		int (*write)(struct intel_vgpu *, unsigned int, void *, unsigned int))
+		gvt_mmio_func read, gvt_mmio_func write)
 {
 {
 	struct intel_gvt_mmio_info *info, *p;
 	struct intel_gvt_mmio_info *info, *p;
 	u32 start, end, i;
 	u32 start, end, i;
@@ -116,13 +112,11 @@ static int new_mmio_info(struct intel_gvt *gvt,
 			return -ENOMEM;
 			return -ENOMEM;
 
 
 		info->offset = i;
 		info->offset = i;
-		p = intel_gvt_find_mmio_info(gvt, info->offset);
+		p = find_mmio_info(gvt, info->offset);
 		if (p)
 		if (p)
 			gvt_err("dup mmio definition offset %x\n",
 			gvt_err("dup mmio definition offset %x\n",
 				info->offset);
 				info->offset);
-		info->size = size;
-		info->length = (i + 4) < end ? 4 : (end - i);
-		info->addr_mask = addr_mask;
+
 		info->ro_mask = ro_mask;
 		info->ro_mask = ro_mask;
 		info->device = device;
 		info->device = device;
 		info->read = read ? read : intel_vgpu_default_mmio_read;
 		info->read = read ? read : intel_vgpu_default_mmio_read;
@@ -130,6 +124,7 @@ static int new_mmio_info(struct intel_gvt *gvt,
 		gvt->mmio.mmio_attribute[info->offset / 4] = flags;
 		gvt->mmio.mmio_attribute[info->offset / 4] = flags;
 		INIT_HLIST_NODE(&info->node);
 		INIT_HLIST_NODE(&info->node);
 		hash_add(gvt->mmio.mmio_info_table, &info->node, info->offset);
 		hash_add(gvt->mmio.mmio_info_table, &info->node, info->offset);
+		gvt->mmio.num_tracked_mmio++;
 	}
 	}
 	return 0;
 	return 0;
 }
 }
@@ -209,6 +204,7 @@ static int fence_mmio_read(struct intel_vgpu *vgpu, unsigned int off,
 static int fence_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
 static int fence_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
 		void *p_data, unsigned int bytes)
 		void *p_data, unsigned int bytes)
 {
 {
+	struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
 	unsigned int fence_num = offset_to_fence_num(off);
 	unsigned int fence_num = offset_to_fence_num(off);
 	int ret;
 	int ret;
 
 
@@ -217,8 +213,10 @@ static int fence_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
 		return ret;
 		return ret;
 	write_vreg(vgpu, off, p_data, bytes);
 	write_vreg(vgpu, off, p_data, bytes);
 
 
+	mmio_hw_access_pre(dev_priv);
 	intel_vgpu_write_fence(vgpu, fence_num,
 	intel_vgpu_write_fence(vgpu, fence_num,
 			vgpu_vreg64(vgpu, fence_num_to_offset(fence_num)));
 			vgpu_vreg64(vgpu, fence_num_to_offset(fence_num)));
+	mmio_hw_access_post(dev_priv);
 	return 0;
 	return 0;
 }
 }
 
 
@@ -300,6 +298,9 @@ static int gdrst_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
 
 
 	intel_gvt_reset_vgpu_locked(vgpu, false, engine_mask);
 	intel_gvt_reset_vgpu_locked(vgpu, false, engine_mask);
 
 
+	/* sw will wait for the device to ack the reset request */
+	 vgpu_vreg(vgpu, offset) = 0;
+
 	return 0;
 	return 0;
 }
 }
 
 
@@ -1265,7 +1266,10 @@ static int gen9_trtte_write(struct intel_vgpu *vgpu, unsigned int offset,
 	}
 	}
 	write_vreg(vgpu, offset, p_data, bytes);
 	write_vreg(vgpu, offset, p_data, bytes);
 	/* TRTTE is not per-context */
 	/* TRTTE is not per-context */
+
+	mmio_hw_access_pre(dev_priv);
 	I915_WRITE(_MMIO(offset), vgpu_vreg(vgpu, offset));
 	I915_WRITE(_MMIO(offset), vgpu_vreg(vgpu, offset));
+	mmio_hw_access_post(dev_priv);
 
 
 	return 0;
 	return 0;
 }
 }
@@ -1278,7 +1282,9 @@ static int gen9_trtt_chicken_write(struct intel_vgpu *vgpu, unsigned int offset,
 
 
 	if (val & 1) {
 	if (val & 1) {
 		/* unblock hw logic */
 		/* unblock hw logic */
+		mmio_hw_access_pre(dev_priv);
 		I915_WRITE(_MMIO(offset), val);
 		I915_WRITE(_MMIO(offset), val);
+		mmio_hw_access_post(dev_priv);
 	}
 	}
 	write_vreg(vgpu, offset, p_data, bytes);
 	write_vreg(vgpu, offset, p_data, bytes);
 	return 0;
 	return 0;
@@ -1415,7 +1421,20 @@ static int ring_timestamp_mmio_read(struct intel_vgpu *vgpu,
 {
 {
 	struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
 	struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
 
 
+	mmio_hw_access_pre(dev_priv);
 	vgpu_vreg(vgpu, offset) = I915_READ(_MMIO(offset));
 	vgpu_vreg(vgpu, offset) = I915_READ(_MMIO(offset));
+	mmio_hw_access_post(dev_priv);
+	return intel_vgpu_default_mmio_read(vgpu, offset, p_data, bytes);
+}
+
+static int instdone_mmio_read(struct intel_vgpu *vgpu,
+		unsigned int offset, void *p_data, unsigned int bytes)
+{
+	struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+
+	mmio_hw_access_pre(dev_priv);
+	vgpu_vreg(vgpu, offset) = I915_READ(_MMIO(offset));
+	mmio_hw_access_post(dev_priv);
 	return intel_vgpu_default_mmio_read(vgpu, offset, p_data, bytes);
 	return intel_vgpu_default_mmio_read(vgpu, offset, p_data, bytes);
 }
 }
 
 
@@ -1434,7 +1453,6 @@ static int elsp_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
 
 
 	execlist->elsp_dwords.data[execlist->elsp_dwords.index] = data;
 	execlist->elsp_dwords.data[execlist->elsp_dwords.index] = data;
 	if (execlist->elsp_dwords.index == 3) {
 	if (execlist->elsp_dwords.index == 3) {
-		vgpu->last_ctx_submit_time = ktime_get();
 		ret = intel_vgpu_submit_execlist(vgpu, ring_id);
 		ret = intel_vgpu_submit_execlist(vgpu, ring_id);
 		if(ret)
 		if(ret)
 			gvt_vgpu_err("fail submit workload on ring %d\n",
 			gvt_vgpu_err("fail submit workload on ring %d\n",
@@ -1603,6 +1621,12 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
 	MMIO_RING_DFH(RING_REG, D_ALL, F_CMD_ACCESS, NULL, NULL);
 	MMIO_RING_DFH(RING_REG, D_ALL, F_CMD_ACCESS, NULL, NULL);
 #undef RING_REG
 #undef RING_REG
 
 
+#define RING_REG(base) (base + 0x6c)
+	MMIO_RING_DFH(RING_REG, D_ALL, 0, instdone_mmio_read, NULL);
+	MMIO_DH(RING_REG(GEN8_BSD2_RING_BASE), D_ALL, instdone_mmio_read, NULL);
+#undef RING_REG
+	MMIO_DH(GEN7_SC_INSTDONE, D_BDW_PLUS, instdone_mmio_read, NULL);
+
 	MMIO_GM_RDR(0x2148, D_ALL, NULL, NULL);
 	MMIO_GM_RDR(0x2148, D_ALL, NULL, NULL);
 	MMIO_GM_RDR(CCID, D_ALL, NULL, NULL);
 	MMIO_GM_RDR(CCID, D_ALL, NULL, NULL);
 	MMIO_GM_RDR(0x12198, D_ALL, NULL, NULL);
 	MMIO_GM_RDR(0x12198, D_ALL, NULL, NULL);
@@ -1779,10 +1803,6 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
 	MMIO_D(SPRSCALE(PIPE_C), D_ALL);
 	MMIO_D(SPRSCALE(PIPE_C), D_ALL);
 	MMIO_D(SPRSURFLIVE(PIPE_C), D_ALL);
 	MMIO_D(SPRSURFLIVE(PIPE_C), D_ALL);
 
 
-	MMIO_F(LGC_PALETTE(PIPE_A, 0), 4 * 256, 0, 0, 0, D_ALL, NULL, NULL);
-	MMIO_F(LGC_PALETTE(PIPE_B, 0), 4 * 256, 0, 0, 0, D_ALL, NULL, NULL);
-	MMIO_F(LGC_PALETTE(PIPE_C, 0), 4 * 256, 0, 0, 0, D_ALL, NULL, NULL);
-
 	MMIO_D(HTOTAL(TRANSCODER_A), D_ALL);
 	MMIO_D(HTOTAL(TRANSCODER_A), D_ALL);
 	MMIO_D(HBLANK(TRANSCODER_A), D_ALL);
 	MMIO_D(HBLANK(TRANSCODER_A), D_ALL);
 	MMIO_D(HSYNC(TRANSCODER_A), D_ALL);
 	MMIO_D(HSYNC(TRANSCODER_A), D_ALL);
@@ -2187,7 +2207,7 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
 	MMIO_DFH(GTFIFODBG, D_ALL, F_CMD_ACCESS, NULL, NULL);
 	MMIO_DFH(GTFIFODBG, D_ALL, F_CMD_ACCESS, NULL, NULL);
 	MMIO_DFH(GTFIFOCTL, D_ALL, F_CMD_ACCESS, NULL, NULL);
 	MMIO_DFH(GTFIFOCTL, D_ALL, F_CMD_ACCESS, NULL, NULL);
 	MMIO_DH(FORCEWAKE_MT, D_PRE_SKL, NULL, mul_force_wake_write);
 	MMIO_DH(FORCEWAKE_MT, D_PRE_SKL, NULL, mul_force_wake_write);
-	MMIO_DH(FORCEWAKE_ACK_HSW, D_HSW | D_BDW, NULL, NULL);
+	MMIO_DH(FORCEWAKE_ACK_HSW, D_BDW, NULL, NULL);
 	MMIO_D(ECOBUS, D_ALL);
 	MMIO_D(ECOBUS, D_ALL);
 	MMIO_DH(GEN6_RC_CONTROL, D_ALL, NULL, NULL);
 	MMIO_DH(GEN6_RC_CONTROL, D_ALL, NULL, NULL);
 	MMIO_DH(GEN6_RC_STATE, D_ALL, NULL, NULL);
 	MMIO_DH(GEN6_RC_STATE, D_ALL, NULL, NULL);
@@ -2219,22 +2239,19 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
 	MMIO_D(GEN6_RC6p_THRESHOLD, D_ALL);
 	MMIO_D(GEN6_RC6p_THRESHOLD, D_ALL);
 	MMIO_D(GEN6_RC6pp_THRESHOLD, D_ALL);
 	MMIO_D(GEN6_RC6pp_THRESHOLD, D_ALL);
 	MMIO_D(GEN6_PMINTRMSK, D_ALL);
 	MMIO_D(GEN6_PMINTRMSK, D_ALL);
-	MMIO_DH(HSW_PWR_WELL_BIOS, D_HSW | D_BDW, NULL, power_well_ctl_mmio_write);
-	MMIO_DH(HSW_PWR_WELL_DRIVER, D_HSW | D_BDW, NULL, power_well_ctl_mmio_write);
-	MMIO_DH(HSW_PWR_WELL_KVMR, D_HSW | D_BDW, NULL, power_well_ctl_mmio_write);
-	MMIO_DH(HSW_PWR_WELL_DEBUG, D_HSW | D_BDW, NULL, power_well_ctl_mmio_write);
-	MMIO_DH(HSW_PWR_WELL_CTL5, D_HSW | D_BDW, NULL, power_well_ctl_mmio_write);
-	MMIO_DH(HSW_PWR_WELL_CTL6, D_HSW | D_BDW, NULL, power_well_ctl_mmio_write);
+	MMIO_DH(HSW_PWR_WELL_BIOS, D_BDW, NULL, power_well_ctl_mmio_write);
+	MMIO_DH(HSW_PWR_WELL_DRIVER, D_BDW, NULL, power_well_ctl_mmio_write);
+	MMIO_DH(HSW_PWR_WELL_KVMR, D_BDW, NULL, power_well_ctl_mmio_write);
+	MMIO_DH(HSW_PWR_WELL_DEBUG, D_BDW, NULL, power_well_ctl_mmio_write);
+	MMIO_DH(HSW_PWR_WELL_CTL5, D_BDW, NULL, power_well_ctl_mmio_write);
+	MMIO_DH(HSW_PWR_WELL_CTL6, D_BDW, NULL, power_well_ctl_mmio_write);
 
 
 	MMIO_D(RSTDBYCTL, D_ALL);
 	MMIO_D(RSTDBYCTL, D_ALL);
 
 
 	MMIO_DH(GEN6_GDRST, D_ALL, NULL, gdrst_mmio_write);
 	MMIO_DH(GEN6_GDRST, D_ALL, NULL, gdrst_mmio_write);
 	MMIO_F(FENCE_REG_GEN6_LO(0), 0x80, 0, 0, 0, D_ALL, fence_mmio_read, fence_mmio_write);
 	MMIO_F(FENCE_REG_GEN6_LO(0), 0x80, 0, 0, 0, D_ALL, fence_mmio_read, fence_mmio_write);
-	MMIO_F(VGT_PVINFO_PAGE, VGT_PVINFO_SIZE, F_UNALIGN, 0, 0, D_ALL, pvinfo_mmio_read, pvinfo_mmio_write);
 	MMIO_DH(CPU_VGACNTRL, D_ALL, NULL, vga_control_mmio_write);
 	MMIO_DH(CPU_VGACNTRL, D_ALL, NULL, vga_control_mmio_write);
 
 
-	MMIO_F(MCHBAR_MIRROR_BASE_SNB, 0x40000, 0, 0, 0, D_ALL, NULL, NULL);
-
 	MMIO_D(TILECTL, D_ALL);
 	MMIO_D(TILECTL, D_ALL);
 
 
 	MMIO_D(GEN6_UCGCTL1, D_ALL);
 	MMIO_D(GEN6_UCGCTL1, D_ALL);
@@ -2242,7 +2259,6 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
 
 
 	MMIO_F(0x4f000, 0x90, 0, 0, 0, D_ALL, NULL, NULL);
 	MMIO_F(0x4f000, 0x90, 0, 0, 0, D_ALL, NULL, NULL);
 
 
-	MMIO_D(GEN6_PCODE_MAILBOX, D_PRE_BDW);
 	MMIO_D(GEN6_PCODE_DATA, D_ALL);
 	MMIO_D(GEN6_PCODE_DATA, D_ALL);
 	MMIO_D(0x13812c, D_ALL);
 	MMIO_D(0x13812c, D_ALL);
 	MMIO_DH(GEN7_ERR_INT, D_ALL, NULL, NULL);
 	MMIO_DH(GEN7_ERR_INT, D_ALL, NULL, NULL);
@@ -2321,14 +2337,13 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
 	MMIO_D(0x1a054, D_ALL);
 	MMIO_D(0x1a054, D_ALL);
 
 
 	MMIO_D(0x44070, D_ALL);
 	MMIO_D(0x44070, D_ALL);
-	MMIO_DFH(0x215c, D_HSW_PLUS, F_CMD_ACCESS, NULL, NULL);
+	MMIO_DFH(0x215c, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
 	MMIO_DFH(0x2178, D_ALL, F_CMD_ACCESS, NULL, NULL);
 	MMIO_DFH(0x2178, D_ALL, F_CMD_ACCESS, NULL, NULL);
 	MMIO_DFH(0x217c, D_ALL, F_CMD_ACCESS, NULL, NULL);
 	MMIO_DFH(0x217c, D_ALL, F_CMD_ACCESS, NULL, NULL);
 	MMIO_DFH(0x12178, D_ALL, F_CMD_ACCESS, NULL, NULL);
 	MMIO_DFH(0x12178, D_ALL, F_CMD_ACCESS, NULL, NULL);
 	MMIO_DFH(0x1217c, D_ALL, F_CMD_ACCESS, NULL, NULL);
 	MMIO_DFH(0x1217c, D_ALL, F_CMD_ACCESS, NULL, NULL);
 
 
-	MMIO_F(0x2290, 8, F_CMD_ACCESS, 0, 0, D_HSW_PLUS, NULL, NULL);
-	MMIO_DFH(GEN7_OACONTROL, D_HSW, F_CMD_ACCESS, NULL, NULL);
+	MMIO_F(0x2290, 8, F_CMD_ACCESS, 0, 0, D_BDW_PLUS, NULL, NULL);
 	MMIO_D(0x2b00, D_BDW_PLUS);
 	MMIO_D(0x2b00, D_BDW_PLUS);
 	MMIO_D(0x2360, D_BDW_PLUS);
 	MMIO_D(0x2360, D_BDW_PLUS);
 	MMIO_F(0x5200, 32, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
 	MMIO_F(0x5200, 32, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
@@ -2766,7 +2781,6 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
 	MMIO_D(0x72380, D_SKL_PLUS);
 	MMIO_D(0x72380, D_SKL_PLUS);
 	MMIO_D(0x7039c, D_SKL_PLUS);
 	MMIO_D(0x7039c, D_SKL_PLUS);
 
 
-	MMIO_F(0x80000, 0x3000, 0, 0, 0, D_SKL_PLUS, NULL, NULL);
 	MMIO_D(0x8f074, D_SKL | D_KBL);
 	MMIO_D(0x8f074, D_SKL | D_KBL);
 	MMIO_D(0x8f004, D_SKL | D_KBL);
 	MMIO_D(0x8f004, D_SKL | D_KBL);
 	MMIO_D(0x8f034, D_SKL | D_KBL);
 	MMIO_D(0x8f034, D_SKL | D_KBL);
@@ -2840,26 +2854,36 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
 	return 0;
 	return 0;
 }
 }
 
 
-/**
- * intel_gvt_find_mmio_info - find MMIO information entry by aligned offset
- * @gvt: GVT device
- * @offset: register offset
- *
- * This function is used to find the MMIO information entry from hash table
- *
- * Returns:
- * pointer to MMIO information entry, NULL if not exists
- */
-struct intel_gvt_mmio_info *intel_gvt_find_mmio_info(struct intel_gvt *gvt,
-	unsigned int offset)
-{
-	struct intel_gvt_mmio_info *e;
+/* Special MMIO blocks. */
+static struct gvt_mmio_block {
+	unsigned int device;
+	i915_reg_t   offset;
+	unsigned int size;
+	gvt_mmio_func read;
+	gvt_mmio_func write;
+} gvt_mmio_blocks[] = {
+	{D_SKL_PLUS, _MMIO(CSR_MMIO_START_RANGE), 0x3000, NULL, NULL},
+	{D_ALL, _MMIO(MCHBAR_MIRROR_BASE_SNB), 0x40000, NULL, NULL},
+	{D_ALL, _MMIO(VGT_PVINFO_PAGE), VGT_PVINFO_SIZE,
+		pvinfo_mmio_read, pvinfo_mmio_write},
+	{D_ALL, LGC_PALETTE(PIPE_A, 0), 1024, NULL, NULL},
+	{D_ALL, LGC_PALETTE(PIPE_B, 0), 1024, NULL, NULL},
+	{D_ALL, LGC_PALETTE(PIPE_C, 0), 1024, NULL, NULL},
+};
 
 
-	WARN_ON(!IS_ALIGNED(offset, 4));
+static struct gvt_mmio_block *find_mmio_block(struct intel_gvt *gvt,
+					      unsigned int offset)
+{
+	unsigned long device = intel_gvt_get_device_type(gvt);
+	struct gvt_mmio_block *block = gvt_mmio_blocks;
+	int i;
 
 
-	hash_for_each_possible(gvt->mmio.mmio_info_table, e, node, offset) {
-		if (e->offset == offset)
-			return e;
+	for (i = 0; i < ARRAY_SIZE(gvt_mmio_blocks); i++, block++) {
+		if (!(device & block->device))
+			continue;
+		if (offset >= INTEL_GVT_MMIO_OFFSET(block->offset) &&
+		    offset < INTEL_GVT_MMIO_OFFSET(block->offset) + block->size)
+			return block;
 	}
 	}
 	return NULL;
 	return NULL;
 }
 }
@@ -2899,9 +2923,10 @@ int intel_gvt_setup_mmio_info(struct intel_gvt *gvt)
 {
 {
 	struct intel_gvt_device_info *info = &gvt->device_info;
 	struct intel_gvt_device_info *info = &gvt->device_info;
 	struct drm_i915_private *dev_priv = gvt->dev_priv;
 	struct drm_i915_private *dev_priv = gvt->dev_priv;
+	int size = info->mmio_size / 4 * sizeof(*gvt->mmio.mmio_attribute);
 	int ret;
 	int ret;
 
 
-	gvt->mmio.mmio_attribute = vzalloc(info->mmio_size);
+	gvt->mmio.mmio_attribute = vzalloc(size);
 	if (!gvt->mmio.mmio_attribute)
 	if (!gvt->mmio.mmio_attribute)
 		return -ENOMEM;
 		return -ENOMEM;
 
 
@@ -2922,77 +2947,15 @@ int intel_gvt_setup_mmio_info(struct intel_gvt *gvt)
 		if (ret)
 		if (ret)
 			goto err;
 			goto err;
 	}
 	}
+
+	gvt_dbg_mmio("traced %u virtual mmio registers\n",
+		     gvt->mmio.num_tracked_mmio);
 	return 0;
 	return 0;
 err:
 err:
 	intel_gvt_clean_mmio_info(gvt);
 	intel_gvt_clean_mmio_info(gvt);
 	return ret;
 	return ret;
 }
 }
 
 
-/**
- * intel_gvt_mmio_set_accessed - mark a MMIO has been accessed
- * @gvt: a GVT device
- * @offset: register offset
- *
- */
-void intel_gvt_mmio_set_accessed(struct intel_gvt *gvt, unsigned int offset)
-{
-	gvt->mmio.mmio_attribute[offset >> 2] |=
-		F_ACCESSED;
-}
-
-/**
- * intel_gvt_mmio_is_cmd_accessed - mark a MMIO could be accessed by command
- * @gvt: a GVT device
- * @offset: register offset
- *
- */
-bool intel_gvt_mmio_is_cmd_access(struct intel_gvt *gvt,
-		unsigned int offset)
-{
-	return gvt->mmio.mmio_attribute[offset >> 2] &
-		F_CMD_ACCESS;
-}
-
-/**
- * intel_gvt_mmio_is_unalign - mark a MMIO could be accessed unaligned
- * @gvt: a GVT device
- * @offset: register offset
- *
- */
-bool intel_gvt_mmio_is_unalign(struct intel_gvt *gvt,
-		unsigned int offset)
-{
-	return gvt->mmio.mmio_attribute[offset >> 2] &
-		F_UNALIGN;
-}
-
-/**
- * intel_gvt_mmio_set_cmd_accessed - mark a MMIO has been accessed by command
- * @gvt: a GVT device
- * @offset: register offset
- *
- */
-void intel_gvt_mmio_set_cmd_accessed(struct intel_gvt *gvt,
-		unsigned int offset)
-{
-	gvt->mmio.mmio_attribute[offset >> 2] |=
-		F_CMD_ACCESSED;
-}
-
-/**
- * intel_gvt_mmio_has_mode_mask - if a MMIO has a mode mask
- * @gvt: a GVT device
- * @offset: register offset
- *
- * Returns:
- * True if a MMIO has a mode mask in its higher 16 bits, false if it isn't.
- *
- */
-bool intel_gvt_mmio_has_mode_mask(struct intel_gvt *gvt, unsigned int offset)
-{
-	return gvt->mmio.mmio_attribute[offset >> 2] &
-		F_MODE_MASK;
-}
 
 
 /**
 /**
  * intel_vgpu_default_mmio_read - default MMIO read handler
  * intel_vgpu_default_mmio_read - default MMIO read handler
@@ -3044,3 +3007,91 @@ bool intel_gvt_in_force_nonpriv_whitelist(struct intel_gvt *gvt,
 {
 {
 	return in_whitelist(offset);
 	return in_whitelist(offset);
 }
 }
+
+/**
+ * intel_vgpu_mmio_reg_rw - emulate tracked mmio registers
+ * @vgpu: a vGPU
+ * @offset: register offset
+ * @pdata: data buffer
+ * @bytes: data length
+ *
+ * Returns:
+ * Zero on success, negative error code if failed.
+ */
+int intel_vgpu_mmio_reg_rw(struct intel_vgpu *vgpu, unsigned int offset,
+			   void *pdata, unsigned int bytes, bool is_read)
+{
+	struct intel_gvt *gvt = vgpu->gvt;
+	struct intel_gvt_mmio_info *mmio_info;
+	struct gvt_mmio_block *mmio_block;
+	gvt_mmio_func func;
+	int ret;
+
+	if (WARN_ON(bytes > 4))
+		return -EINVAL;
+
+	/*
+	 * Handle special MMIO blocks.
+	 */
+	mmio_block = find_mmio_block(gvt, offset);
+	if (mmio_block) {
+		func = is_read ? mmio_block->read : mmio_block->write;
+		if (func)
+			return func(vgpu, offset, pdata, bytes);
+		goto default_rw;
+	}
+
+	/*
+	 * Normal tracked MMIOs.
+	 */
+	mmio_info = find_mmio_info(gvt, offset);
+	if (!mmio_info) {
+		if (!vgpu->mmio.disable_warn_untrack)
+			gvt_vgpu_err("untracked MMIO %08x len %d\n",
+				     offset, bytes);
+		goto default_rw;
+	}
+
+	if (is_read)
+		return mmio_info->read(vgpu, offset, pdata, bytes);
+	else {
+		u64 ro_mask = mmio_info->ro_mask;
+		u32 old_vreg = 0, old_sreg = 0;
+		u64 data = 0;
+
+		if (intel_gvt_mmio_has_mode_mask(gvt, mmio_info->offset)) {
+			old_vreg = vgpu_vreg(vgpu, offset);
+			old_sreg = vgpu_sreg(vgpu, offset);
+		}
+
+		if (likely(!ro_mask))
+			ret = mmio_info->write(vgpu, offset, pdata, bytes);
+		else if (!~ro_mask) {
+			gvt_vgpu_err("try to write RO reg %x\n", offset);
+			return 0;
+		} else {
+			/* keep the RO bits in the virtual register */
+			memcpy(&data, pdata, bytes);
+			data &= ~ro_mask;
+			data |= vgpu_vreg(vgpu, offset) & ro_mask;
+			ret = mmio_info->write(vgpu, offset, &data, bytes);
+		}
+
+		/* higher 16bits of mode ctl regs are mask bits for change */
+		if (intel_gvt_mmio_has_mode_mask(gvt, mmio_info->offset)) {
+			u32 mask = vgpu_vreg(vgpu, offset) >> 16;
+
+			vgpu_vreg(vgpu, offset) = (old_vreg & ~mask)
+					| (vgpu_vreg(vgpu, offset) & mask);
+			vgpu_sreg(vgpu, offset) = (old_sreg & ~mask)
+					| (vgpu_sreg(vgpu, offset) & mask);
+		}
+	}
+
+	return ret;
+
+default_rw:
+	return is_read ?
+		intel_vgpu_default_mmio_read(vgpu, offset, pdata, bytes) :
+		intel_vgpu_default_mmio_write(vgpu, offset, pdata, bytes);
+}

+ 10 - 10
drivers/gpu/drm/i915/gvt/interrupt.c

@@ -31,6 +31,7 @@
 
 
 #include "i915_drv.h"
 #include "i915_drv.h"
 #include "gvt.h"
 #include "gvt.h"
+#include "trace.h"
 
 
 /* common offset among interrupt control registers */
 /* common offset among interrupt control registers */
 #define regbase_to_isr(base)	(base)
 #define regbase_to_isr(base)	(base)
@@ -178,8 +179,8 @@ int intel_vgpu_reg_imr_handler(struct intel_vgpu *vgpu,
 	struct intel_gvt_irq_ops *ops = gvt->irq.ops;
 	struct intel_gvt_irq_ops *ops = gvt->irq.ops;
 	u32 imr = *(u32 *)p_data;
 	u32 imr = *(u32 *)p_data;
 
 
-	gvt_dbg_irq("write IMR %x, new %08x, old %08x, changed %08x\n",
-		    reg, imr, vgpu_vreg(vgpu, reg), vgpu_vreg(vgpu, reg) ^ imr);
+	trace_write_ir(vgpu->id, "IMR", reg, imr, vgpu_vreg(vgpu, reg),
+		       (vgpu_vreg(vgpu, reg) ^ imr));
 
 
 	vgpu_vreg(vgpu, reg) = imr;
 	vgpu_vreg(vgpu, reg) = imr;
 
 
@@ -209,8 +210,8 @@ int intel_vgpu_reg_master_irq_handler(struct intel_vgpu *vgpu,
 	u32 ier = *(u32 *)p_data;
 	u32 ier = *(u32 *)p_data;
 	u32 virtual_ier = vgpu_vreg(vgpu, reg);
 	u32 virtual_ier = vgpu_vreg(vgpu, reg);
 
 
-	gvt_dbg_irq("write MASTER_IRQ %x, new %08x, old %08x, changed %08x\n",
-		    reg, ier, virtual_ier, virtual_ier ^ ier);
+	trace_write_ir(vgpu->id, "MASTER_IRQ", reg, ier, virtual_ier,
+		       (virtual_ier ^ ier));
 
 
 	/*
 	/*
 	 * GEN8_MASTER_IRQ is a special irq register,
 	 * GEN8_MASTER_IRQ is a special irq register,
@@ -248,8 +249,8 @@ int intel_vgpu_reg_ier_handler(struct intel_vgpu *vgpu,
 	struct intel_gvt_irq_info *info;
 	struct intel_gvt_irq_info *info;
 	u32 ier = *(u32 *)p_data;
 	u32 ier = *(u32 *)p_data;
 
 
-	gvt_dbg_irq("write IER %x, new %08x, old %08x, changed %08x\n",
-		    reg, ier, vgpu_vreg(vgpu, reg), vgpu_vreg(vgpu, reg) ^ ier);
+	trace_write_ir(vgpu->id, "IER", reg, ier, vgpu_vreg(vgpu, reg),
+		       (vgpu_vreg(vgpu, reg) ^ ier));
 
 
 	vgpu_vreg(vgpu, reg) = ier;
 	vgpu_vreg(vgpu, reg) = ier;
 
 
@@ -285,8 +286,8 @@ int intel_vgpu_reg_iir_handler(struct intel_vgpu *vgpu, unsigned int reg,
 		iir_to_regbase(reg));
 		iir_to_regbase(reg));
 	u32 iir = *(u32 *)p_data;
 	u32 iir = *(u32 *)p_data;
 
 
-	gvt_dbg_irq("write IIR %x, new %08x, old %08x, changed %08x\n",
-		    reg, iir, vgpu_vreg(vgpu, reg), vgpu_vreg(vgpu, reg) ^ iir);
+	trace_write_ir(vgpu->id, "IIR", reg, iir, vgpu_vreg(vgpu, reg),
+		       (vgpu_vreg(vgpu, reg) ^ iir));
 
 
 	if (WARN_ON(!info))
 	if (WARN_ON(!info))
 		return -EINVAL;
 		return -EINVAL;
@@ -411,8 +412,7 @@ static void propagate_event(struct intel_gvt_irq *irq,
 
 
 	if (!test_bit(bit, (void *)&vgpu_vreg(vgpu,
 	if (!test_bit(bit, (void *)&vgpu_vreg(vgpu,
 					regbase_to_imr(reg_base)))) {
 					regbase_to_imr(reg_base)))) {
-		gvt_dbg_irq("set bit (%d) for (%s) for vgpu (%d)\n",
-				bit, irq_name[event], vgpu->id);
+		trace_propagate_event(vgpu->id, irq_name[event], bit);
 		set_bit(bit, (void *)&vgpu_vreg(vgpu,
 		set_bit(bit, (void *)&vgpu_vreg(vgpu,
 					regbase_to_iir(reg_base)));
 					regbase_to_iir(reg_base)));
 	}
 	}

+ 25 - 96
drivers/gpu/drm/i915/gvt/mmio.c

@@ -123,7 +123,6 @@ int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, uint64_t pa,
 		void *p_data, unsigned int bytes)
 		void *p_data, unsigned int bytes)
 {
 {
 	struct intel_gvt *gvt = vgpu->gvt;
 	struct intel_gvt *gvt = vgpu->gvt;
-	struct intel_gvt_mmio_info *mmio;
 	unsigned int offset = 0;
 	unsigned int offset = 0;
 	int ret = -EINVAL;
 	int ret = -EINVAL;
 
 
@@ -187,32 +186,8 @@ int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, uint64_t pa,
 			goto err;
 			goto err;
 	}
 	}
 
 
-	mmio = intel_gvt_find_mmio_info(gvt, rounddown(offset, 4));
-	if (mmio) {
-		if (!intel_gvt_mmio_is_unalign(gvt, mmio->offset)) {
-			if (WARN_ON(offset + bytes > mmio->offset + mmio->size))
-				goto err;
-			if (WARN_ON(mmio->offset != offset))
-				goto err;
-		}
-		ret = mmio->read(vgpu, offset, p_data, bytes);
-	} else {
-		ret = intel_vgpu_default_mmio_read(vgpu, offset, p_data, bytes);
-
-		if (!vgpu->mmio.disable_warn_untrack) {
-			gvt_vgpu_err("read untracked MMIO %x(%dB) val %x\n",
-				offset, bytes, *(u32 *)p_data);
-
-			if (offset == 0x206c) {
-				gvt_vgpu_err("------------------------------------------\n");
-				gvt_vgpu_err("likely triggers a gfx reset\n");
-				gvt_vgpu_err("------------------------------------------\n");
-				vgpu->mmio.disable_warn_untrack = true;
-			}
-		}
-	}
-
-	if (ret)
+	ret = intel_vgpu_mmio_reg_rw(vgpu, offset, p_data, bytes, true);
+	if (ret < 0)
 		goto err;
 		goto err;
 
 
 	intel_gvt_mmio_set_accessed(gvt, offset);
 	intel_gvt_mmio_set_accessed(gvt, offset);
@@ -239,9 +214,7 @@ int intel_vgpu_emulate_mmio_write(struct intel_vgpu *vgpu, uint64_t pa,
 		void *p_data, unsigned int bytes)
 		void *p_data, unsigned int bytes)
 {
 {
 	struct intel_gvt *gvt = vgpu->gvt;
 	struct intel_gvt *gvt = vgpu->gvt;
-	struct intel_gvt_mmio_info *mmio;
 	unsigned int offset = 0;
 	unsigned int offset = 0;
-	u32 old_vreg = 0, old_sreg = 0;
 	int ret = -EINVAL;
 	int ret = -EINVAL;
 
 
 	if (vgpu->failsafe) {
 	if (vgpu->failsafe) {
@@ -296,66 +269,10 @@ int intel_vgpu_emulate_mmio_write(struct intel_vgpu *vgpu, uint64_t pa,
 		return ret;
 		return ret;
 	}
 	}
 
 
-	mmio = intel_gvt_find_mmio_info(gvt, rounddown(offset, 4));
-	if (!mmio && !vgpu->mmio.disable_warn_untrack)
-		gvt_dbg_mmio("vgpu%d: write untracked MMIO %x len %d val %x\n",
-				vgpu->id, offset, bytes, *(u32 *)p_data);
-
-	if (!intel_gvt_mmio_is_unalign(gvt, offset)) {
-		if (WARN_ON(!IS_ALIGNED(offset, bytes)))
-			goto err;
-	}
-
-	if (mmio) {
-		u64 ro_mask = mmio->ro_mask;
-
-		if (!intel_gvt_mmio_is_unalign(gvt, mmio->offset)) {
-			if (WARN_ON(offset + bytes > mmio->offset + mmio->size))
-				goto err;
-			if (WARN_ON(mmio->offset != offset))
-				goto err;
-		}
-
-		if (intel_gvt_mmio_has_mode_mask(gvt, mmio->offset)) {
-			old_vreg = vgpu_vreg(vgpu, offset);
-			old_sreg = vgpu_sreg(vgpu, offset);
-		}
-
-		if (!ro_mask) {
-			ret = mmio->write(vgpu, offset, p_data, bytes);
-		} else {
-			/* Protect RO bits like HW */
-			u64 data = 0;
-
-			/* all register bits are RO. */
-			if (ro_mask == ~(u64)0) {
-				gvt_vgpu_err("try to write RO reg %x\n",
-					offset);
-				ret = 0;
-				goto out;
-			}
-			/* keep the RO bits in the virtual register */
-			memcpy(&data, p_data, bytes);
-			data &= ~mmio->ro_mask;
-			data |= vgpu_vreg(vgpu, offset) & mmio->ro_mask;
-			ret = mmio->write(vgpu, offset, &data, bytes);
-		}
-
-		/* higher 16bits of mode ctl regs are mask bits for change */
-		if (intel_gvt_mmio_has_mode_mask(gvt, mmio->offset)) {
-			u32 mask = vgpu_vreg(vgpu, offset) >> 16;
-
-			vgpu_vreg(vgpu, offset) = (old_vreg & ~mask)
-				| (vgpu_vreg(vgpu, offset) & mask);
-			vgpu_sreg(vgpu, offset) = (old_sreg & ~mask)
-				| (vgpu_sreg(vgpu, offset) & mask);
-		}
-	} else
-		ret = intel_vgpu_default_mmio_write(vgpu, offset, p_data,
-				bytes);
-	if (ret)
+	ret = intel_vgpu_mmio_reg_rw(vgpu, offset, p_data, bytes, false);
+	if (ret < 0)
 		goto err;
 		goto err;
-out:
+
 	intel_gvt_mmio_set_accessed(gvt, offset);
 	intel_gvt_mmio_set_accessed(gvt, offset);
 	mutex_unlock(&gvt->lock);
 	mutex_unlock(&gvt->lock);
 	return 0;
 	return 0;
@@ -372,20 +289,32 @@ err:
  * @vgpu: a vGPU
  * @vgpu: a vGPU
  *
  *
  */
  */
-void intel_vgpu_reset_mmio(struct intel_vgpu *vgpu)
+void intel_vgpu_reset_mmio(struct intel_vgpu *vgpu, bool dmlr)
 {
 {
 	struct intel_gvt *gvt = vgpu->gvt;
 	struct intel_gvt *gvt = vgpu->gvt;
 	const struct intel_gvt_device_info *info = &gvt->device_info;
 	const struct intel_gvt_device_info *info = &gvt->device_info;
+	void  *mmio = gvt->firmware.mmio;
+
+	if (dmlr) {
+		memcpy(vgpu->mmio.vreg, mmio, info->mmio_size);
+		memcpy(vgpu->mmio.sreg, mmio, info->mmio_size);
 
 
-	memcpy(vgpu->mmio.vreg, gvt->firmware.mmio, info->mmio_size);
-	memcpy(vgpu->mmio.sreg, gvt->firmware.mmio, info->mmio_size);
+		vgpu_vreg(vgpu, GEN6_GT_THREAD_STATUS_REG) = 0;
 
 
-	vgpu_vreg(vgpu, GEN6_GT_THREAD_STATUS_REG) = 0;
+		/* set the bit 0:2(Core C-State ) to C0 */
+		vgpu_vreg(vgpu, GEN6_GT_CORE_STATUS) = 0;
 
 
-	/* set the bit 0:2(Core C-State ) to C0 */
-	vgpu_vreg(vgpu, GEN6_GT_CORE_STATUS) = 0;
+		vgpu->mmio.disable_warn_untrack = false;
+	} else {
+#define GVT_GEN8_MMIO_RESET_OFFSET		(0x44200)
+		/* only reset the engine related, so starting with 0x44200
+		 * interrupt include DE,display mmio related will not be
+		 * touched
+		 */
+		memcpy(vgpu->mmio.vreg, mmio, GVT_GEN8_MMIO_RESET_OFFSET);
+		memcpy(vgpu->mmio.sreg, mmio, GVT_GEN8_MMIO_RESET_OFFSET);
+	}
 
 
-	vgpu->mmio.disable_warn_untrack = false;
 }
 }
 
 
 /**
 /**
@@ -405,7 +334,7 @@ int intel_vgpu_init_mmio(struct intel_vgpu *vgpu)
 
 
 	vgpu->mmio.sreg = vgpu->mmio.vreg + info->mmio_size;
 	vgpu->mmio.sreg = vgpu->mmio.vreg + info->mmio_size;
 
 
-	intel_vgpu_reset_mmio(vgpu);
+	intel_vgpu_reset_mmio(vgpu, true);
 
 
 	return 0;
 	return 0;
 }
 }

+ 16 - 28
drivers/gpu/drm/i915/gvt/mmio.h

@@ -39,36 +39,28 @@
 struct intel_gvt;
 struct intel_gvt;
 struct intel_vgpu;
 struct intel_vgpu;
 
 
-#define D_SNB   (1 << 0)
-#define D_IVB   (1 << 1)
-#define D_HSW   (1 << 2)
-#define D_BDW   (1 << 3)
-#define D_SKL	(1 << 4)
-#define D_KBL	(1 << 5)
+#define D_BDW   (1 << 0)
+#define D_SKL	(1 << 1)
+#define D_KBL	(1 << 2)
 
 
 #define D_GEN9PLUS	(D_SKL | D_KBL)
 #define D_GEN9PLUS	(D_SKL | D_KBL)
 #define D_GEN8PLUS	(D_BDW | D_SKL | D_KBL)
 #define D_GEN8PLUS	(D_BDW | D_SKL | D_KBL)
-#define D_GEN75PLUS	(D_HSW | D_BDW | D_SKL | D_KBL)
-#define D_GEN7PLUS	(D_IVB | D_HSW | D_BDW | D_SKL | D_KBL)
 
 
 #define D_SKL_PLUS	(D_SKL | D_KBL)
 #define D_SKL_PLUS	(D_SKL | D_KBL)
 #define D_BDW_PLUS	(D_BDW | D_SKL | D_KBL)
 #define D_BDW_PLUS	(D_BDW | D_SKL | D_KBL)
-#define D_HSW_PLUS	(D_HSW | D_BDW | D_SKL | D_KBL)
-#define D_IVB_PLUS	(D_IVB | D_HSW | D_BDW | D_SKL | D_KBL)
 
 
-#define D_PRE_BDW	(D_SNB | D_IVB | D_HSW)
-#define D_PRE_SKL	(D_SNB | D_IVB | D_HSW | D_BDW)
-#define D_ALL		(D_SNB | D_IVB | D_HSW | D_BDW | D_SKL | D_KBL)
+#define D_PRE_SKL	(D_BDW)
+#define D_ALL		(D_BDW | D_SKL | D_KBL)
+
+typedef int (*gvt_mmio_func)(struct intel_vgpu *, unsigned int, void *,
+			     unsigned int);
 
 
 struct intel_gvt_mmio_info {
 struct intel_gvt_mmio_info {
 	u32 offset;
 	u32 offset;
-	u32 size;
-	u32 length;
-	u32 addr_mask;
 	u64 ro_mask;
 	u64 ro_mask;
 	u32 device;
 	u32 device;
-	int (*read)(struct intel_vgpu *, unsigned int, void *, unsigned int);
-	int (*write)(struct intel_vgpu *, unsigned int, void *, unsigned int);
+	gvt_mmio_func read;
+	gvt_mmio_func write;
 	u32 addr_range;
 	u32 addr_range;
 	struct hlist_node node;
 	struct hlist_node node;
 };
 };
@@ -79,8 +71,6 @@ bool intel_gvt_match_device(struct intel_gvt *gvt, unsigned long device);
 int intel_gvt_setup_mmio_info(struct intel_gvt *gvt);
 int intel_gvt_setup_mmio_info(struct intel_gvt *gvt);
 void intel_gvt_clean_mmio_info(struct intel_gvt *gvt);
 void intel_gvt_clean_mmio_info(struct intel_gvt *gvt);
 
 
-struct intel_gvt_mmio_info *intel_gvt_find_mmio_info(struct intel_gvt *gvt,
-						     unsigned int offset);
 #define INTEL_GVT_MMIO_OFFSET(reg) ({ \
 #define INTEL_GVT_MMIO_OFFSET(reg) ({ \
 	typeof(reg) __reg = reg; \
 	typeof(reg) __reg = reg; \
 	u32 *offset = (u32 *)&__reg; \
 	u32 *offset = (u32 *)&__reg; \
@@ -88,7 +78,7 @@ struct intel_gvt_mmio_info *intel_gvt_find_mmio_info(struct intel_gvt *gvt,
 })
 })
 
 
 int intel_vgpu_init_mmio(struct intel_vgpu *vgpu);
 int intel_vgpu_init_mmio(struct intel_vgpu *vgpu);
-void intel_vgpu_reset_mmio(struct intel_vgpu *vgpu);
+void intel_vgpu_reset_mmio(struct intel_vgpu *vgpu, bool dmlr);
 void intel_vgpu_clean_mmio(struct intel_vgpu *vgpu);
 void intel_vgpu_clean_mmio(struct intel_vgpu *vgpu);
 
 
 int intel_vgpu_gpa_to_mmio_offset(struct intel_vgpu *vgpu, u64 gpa);
 int intel_vgpu_gpa_to_mmio_offset(struct intel_vgpu *vgpu, u64 gpa);
@@ -97,13 +87,7 @@ int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, u64 pa,
 				void *p_data, unsigned int bytes);
 				void *p_data, unsigned int bytes);
 int intel_vgpu_emulate_mmio_write(struct intel_vgpu *vgpu, u64 pa,
 int intel_vgpu_emulate_mmio_write(struct intel_vgpu *vgpu, u64 pa,
 				void *p_data, unsigned int bytes);
 				void *p_data, unsigned int bytes);
-bool intel_gvt_mmio_is_cmd_access(struct intel_gvt *gvt,
-				  unsigned int offset);
-bool intel_gvt_mmio_is_unalign(struct intel_gvt *gvt, unsigned int offset);
-void intel_gvt_mmio_set_accessed(struct intel_gvt *gvt, unsigned int offset);
-void intel_gvt_mmio_set_cmd_accessed(struct intel_gvt *gvt,
-				     unsigned int offset);
-bool intel_gvt_mmio_has_mode_mask(struct intel_gvt *gvt, unsigned int offset);
+
 int intel_vgpu_default_mmio_read(struct intel_vgpu *vgpu, unsigned int offset,
 int intel_vgpu_default_mmio_read(struct intel_vgpu *vgpu, unsigned int offset,
 				 void *p_data, unsigned int bytes);
 				 void *p_data, unsigned int bytes);
 int intel_vgpu_default_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
 int intel_vgpu_default_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
@@ -111,4 +95,8 @@ int intel_vgpu_default_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
 
 
 bool intel_gvt_in_force_nonpriv_whitelist(struct intel_gvt *gvt,
 bool intel_gvt_in_force_nonpriv_whitelist(struct intel_gvt *gvt,
 					  unsigned int offset);
 					  unsigned int offset);
+
+int intel_vgpu_mmio_reg_rw(struct intel_vgpu *vgpu, unsigned int offset,
+			   void *pdata, unsigned int bytes, bool is_read);
+
 #endif
 #endif

+ 1 - 2
drivers/gpu/drm/i915/gvt/mpt.h

@@ -133,8 +133,7 @@ static inline int intel_gvt_hypervisor_inject_msi(struct intel_vgpu *vgpu)
 	if (WARN(control & GENMASK(15, 1), "only support one MSI format\n"))
 	if (WARN(control & GENMASK(15, 1), "only support one MSI format\n"))
 		return -EINVAL;
 		return -EINVAL;
 
 
-	gvt_dbg_irq("vgpu%d: inject msi address %x data%x\n", vgpu->id, addr,
-		    data);
+	trace_inject_msi(vgpu->id, addr, data);
 
 
 	ret = intel_gvt_host.mpt->inject_msi(vgpu->handle, addr, data);
 	ret = intel_gvt_host.mpt->inject_msi(vgpu->handle, addr, data);
 	if (ret)
 	if (ret)

+ 40 - 8
drivers/gpu/drm/i915/gvt/render.c

@@ -35,6 +35,7 @@
 
 
 #include "i915_drv.h"
 #include "i915_drv.h"
 #include "gvt.h"
 #include "gvt.h"
+#include "trace.h"
 
 
 struct render_mmio {
 struct render_mmio {
 	int ring_id;
 	int ring_id;
@@ -260,7 +261,8 @@ static void restore_mocs(struct intel_vgpu *vgpu, int ring_id)
 
 
 #define CTX_CONTEXT_CONTROL_VAL	0x03
 #define CTX_CONTEXT_CONTROL_VAL	0x03
 
 
-void intel_gvt_load_render_mmio(struct intel_vgpu *vgpu, int ring_id)
+/* Switch ring mmio values (context) from host to a vgpu. */
+static void switch_mmio_to_vgpu(struct intel_vgpu *vgpu, int ring_id)
 {
 {
 	struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
 	struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
 	struct render_mmio *mmio;
 	struct render_mmio *mmio;
@@ -305,14 +307,15 @@ void intel_gvt_load_render_mmio(struct intel_vgpu *vgpu, int ring_id)
 		I915_WRITE(mmio->reg, v);
 		I915_WRITE(mmio->reg, v);
 		POSTING_READ(mmio->reg);
 		POSTING_READ(mmio->reg);
 
 
-		gvt_dbg_render("load reg %x old %x new %x\n",
-				i915_mmio_reg_offset(mmio->reg),
-				mmio->value, v);
+		trace_render_mmio(vgpu->id, "load",
+				  i915_mmio_reg_offset(mmio->reg),
+				  mmio->value, v);
 	}
 	}
 	handle_tlb_pending_event(vgpu, ring_id);
 	handle_tlb_pending_event(vgpu, ring_id);
 }
 }
 
 
-void intel_gvt_restore_render_mmio(struct intel_vgpu *vgpu, int ring_id)
+/* Switch ring mmio values (context) from vgpu to host. */
+static void switch_mmio_to_host(struct intel_vgpu *vgpu, int ring_id)
 {
 {
 	struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
 	struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
 	struct render_mmio *mmio;
 	struct render_mmio *mmio;
@@ -346,8 +349,37 @@ void intel_gvt_restore_render_mmio(struct intel_vgpu *vgpu, int ring_id)
 		I915_WRITE(mmio->reg, v);
 		I915_WRITE(mmio->reg, v);
 		POSTING_READ(mmio->reg);
 		POSTING_READ(mmio->reg);
 
 
-		gvt_dbg_render("restore reg %x old %x new %x\n",
-				i915_mmio_reg_offset(mmio->reg),
-				mmio->value, v);
+		trace_render_mmio(vgpu->id, "restore",
+				  i915_mmio_reg_offset(mmio->reg),
+				  mmio->value, v);
 	}
 	}
 }
 }
+
+/**
+ * intel_gvt_switch_render_mmio - switch mmio context of specific engine
+ * @pre: the last vGPU that own the engine
+ * @next: the vGPU to switch to
+ * @ring_id: specify the engine
+ *
+ * If pre is null indicates that host own the engine. If next is null
+ * indicates that we are switching to host workload.
+ */
+void intel_gvt_switch_mmio(struct intel_vgpu *pre,
+			   struct intel_vgpu *next, int ring_id)
+{
+	if (WARN_ON(!pre && !next))
+		return;
+
+	gvt_dbg_render("switch ring %d from %s to %s\n", ring_id,
+		       pre ? "vGPU" : "host", next ? "vGPU" : "HOST");
+
+	/**
+	 * TODO: Optimize for vGPU to vGPU switch by merging
+	 * switch_mmio_to_host() and switch_mmio_to_vgpu().
+	 */
+	if (pre)
+		switch_mmio_to_host(pre, ring_id);
+
+	if (next)
+		switch_mmio_to_vgpu(next, ring_id);
+}

+ 2 - 2
drivers/gpu/drm/i915/gvt/render.h

@@ -36,8 +36,8 @@
 #ifndef __GVT_RENDER_H__
 #ifndef __GVT_RENDER_H__
 #define __GVT_RENDER_H__
 #define __GVT_RENDER_H__
 
 
-void intel_gvt_load_render_mmio(struct intel_vgpu *vgpu, int ring_id);
+void intel_gvt_switch_mmio(struct intel_vgpu *pre,
+			   struct intel_vgpu *next, int ring_id);
 
 
-void intel_gvt_restore_render_mmio(struct intel_vgpu *vgpu, int ring_id);
 
 
 #endif
 #endif

+ 22 - 5
drivers/gpu/drm/i915/gvt/sched_policy.c

@@ -202,11 +202,6 @@ static void tbs_sched_func(struct gvt_sched_data *sched_data)
 	struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
 	struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
 	struct vgpu_sched_data *vgpu_data;
 	struct vgpu_sched_data *vgpu_data;
 	struct intel_vgpu *vgpu = NULL;
 	struct intel_vgpu *vgpu = NULL;
-	static uint64_t timer_check;
-
-	if (!(timer_check++ % GVT_TS_BALANCE_PERIOD_MS))
-		gvt_balance_timeslice(sched_data);
-
 	/* no active vgpu or has already had a target */
 	/* no active vgpu or has already had a target */
 	if (list_empty(&sched_data->lru_runq_head) || scheduler->next_vgpu)
 	if (list_empty(&sched_data->lru_runq_head) || scheduler->next_vgpu)
 		goto out;
 		goto out;
@@ -231,9 +226,19 @@ out:
 void intel_gvt_schedule(struct intel_gvt *gvt)
 void intel_gvt_schedule(struct intel_gvt *gvt)
 {
 {
 	struct gvt_sched_data *sched_data = gvt->scheduler.sched_data;
 	struct gvt_sched_data *sched_data = gvt->scheduler.sched_data;
+	static uint64_t timer_check;
 
 
 	mutex_lock(&gvt->lock);
 	mutex_lock(&gvt->lock);
+
+	if (test_and_clear_bit(INTEL_GVT_REQUEST_SCHED,
+				(void *)&gvt->service_request)) {
+		if (!(timer_check++ % GVT_TS_BALANCE_PERIOD_MS))
+			gvt_balance_timeslice(sched_data);
+	}
+	clear_bit(INTEL_GVT_REQUEST_EVENT_SCHED, (void *)&gvt->service_request);
+
 	tbs_sched_func(sched_data);
 	tbs_sched_func(sched_data);
+
 	mutex_unlock(&gvt->lock);
 	mutex_unlock(&gvt->lock);
 }
 }
 
 
@@ -303,8 +308,20 @@ static int tbs_sched_init_vgpu(struct intel_vgpu *vgpu)
 
 
 static void tbs_sched_clean_vgpu(struct intel_vgpu *vgpu)
 static void tbs_sched_clean_vgpu(struct intel_vgpu *vgpu)
 {
 {
+	struct intel_gvt_workload_scheduler *scheduler = &vgpu->gvt->scheduler;
+	int ring_id;
+
 	kfree(vgpu->sched_data);
 	kfree(vgpu->sched_data);
 	vgpu->sched_data = NULL;
 	vgpu->sched_data = NULL;
+
+	spin_lock_bh(&scheduler->mmio_context_lock);
+	for (ring_id = 0; ring_id < I915_NUM_ENGINES; ring_id++) {
+		if (scheduler->engine_owner[ring_id] == vgpu) {
+			intel_gvt_switch_mmio(vgpu, NULL, ring_id);
+			scheduler->engine_owner[ring_id] = NULL;
+		}
+	}
+	spin_unlock_bh(&scheduler->mmio_context_lock);
 }
 }
 
 
 static void tbs_sched_start_schedule(struct intel_vgpu *vgpu)
 static void tbs_sched_start_schedule(struct intel_vgpu *vgpu)

+ 32 - 7
drivers/gpu/drm/i915/gvt/scheduler.c

@@ -138,21 +138,42 @@ static int shadow_context_status_change(struct notifier_block *nb,
 	struct intel_gvt *gvt = container_of(nb, struct intel_gvt,
 	struct intel_gvt *gvt = container_of(nb, struct intel_gvt,
 				shadow_ctx_notifier_block[req->engine->id]);
 				shadow_ctx_notifier_block[req->engine->id]);
 	struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
 	struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
-	struct intel_vgpu_workload *workload =
-		scheduler->current_workload[req->engine->id];
+	enum intel_engine_id ring_id = req->engine->id;
+	struct intel_vgpu_workload *workload;
+
+	if (!is_gvt_request(req)) {
+		spin_lock_bh(&scheduler->mmio_context_lock);
+		if (action == INTEL_CONTEXT_SCHEDULE_IN &&
+		    scheduler->engine_owner[ring_id]) {
+			/* Switch ring from vGPU to host. */
+			intel_gvt_switch_mmio(scheduler->engine_owner[ring_id],
+					      NULL, ring_id);
+			scheduler->engine_owner[ring_id] = NULL;
+		}
+		spin_unlock_bh(&scheduler->mmio_context_lock);
 
 
-	if (!is_gvt_request(req) || unlikely(!workload))
+		return NOTIFY_OK;
+	}
+
+	workload = scheduler->current_workload[ring_id];
+	if (unlikely(!workload))
 		return NOTIFY_OK;
 		return NOTIFY_OK;
 
 
 	switch (action) {
 	switch (action) {
 	case INTEL_CONTEXT_SCHEDULE_IN:
 	case INTEL_CONTEXT_SCHEDULE_IN:
-		intel_gvt_load_render_mmio(workload->vgpu,
-					   workload->ring_id);
+		spin_lock_bh(&scheduler->mmio_context_lock);
+		if (workload->vgpu != scheduler->engine_owner[ring_id]) {
+			/* Switch ring from host to vGPU or vGPU to vGPU. */
+			intel_gvt_switch_mmio(scheduler->engine_owner[ring_id],
+					      workload->vgpu, ring_id);
+			scheduler->engine_owner[ring_id] = workload->vgpu;
+		} else
+			gvt_dbg_sched("skip ring %d mmio switch for vgpu%d\n",
+				      ring_id, workload->vgpu->id);
+		spin_unlock_bh(&scheduler->mmio_context_lock);
 		atomic_set(&workload->shadow_ctx_active, 1);
 		atomic_set(&workload->shadow_ctx_active, 1);
 		break;
 		break;
 	case INTEL_CONTEXT_SCHEDULE_OUT:
 	case INTEL_CONTEXT_SCHEDULE_OUT:
-		intel_gvt_restore_render_mmio(workload->vgpu,
-					      workload->ring_id);
 		/* If the status is -EINPROGRESS means this workload
 		/* If the status is -EINPROGRESS means this workload
 		 * doesn't meet any issue during dispatching so when
 		 * doesn't meet any issue during dispatching so when
 		 * get the SCHEDULE_OUT set the status to be zero for
 		 * get the SCHEDULE_OUT set the status to be zero for
@@ -431,6 +452,10 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
 
 
 	atomic_dec(&vgpu->running_workload_num);
 	atomic_dec(&vgpu->running_workload_num);
 	wake_up(&scheduler->workload_complete_wq);
 	wake_up(&scheduler->workload_complete_wq);
+
+	if (gvt->scheduler.need_reschedule)
+		intel_gvt_request_service(gvt, INTEL_GVT_REQUEST_EVENT_SCHED);
+
 	mutex_unlock(&gvt->lock);
 	mutex_unlock(&gvt->lock);
 }
 }
 
 

+ 4 - 0
drivers/gpu/drm/i915/gvt/scheduler.h

@@ -42,6 +42,10 @@ struct intel_gvt_workload_scheduler {
 	struct intel_vgpu_workload *current_workload[I915_NUM_ENGINES];
 	struct intel_vgpu_workload *current_workload[I915_NUM_ENGINES];
 	bool need_reschedule;
 	bool need_reschedule;
 
 
+	spinlock_t mmio_context_lock;
+	/* can be null when owner is host */
+	struct intel_vgpu *engine_owner[I915_NUM_ENGINES];
+
 	wait_queue_head_t workload_complete_wq;
 	wait_queue_head_t workload_complete_wq;
 	struct task_struct *thread[I915_NUM_ENGINES];
 	struct task_struct *thread[I915_NUM_ENGINES];
 	wait_queue_head_t waitq[I915_NUM_ENGINES];
 	wait_queue_head_t waitq[I915_NUM_ENGINES];

+ 129 - 49
drivers/gpu/drm/i915/gvt/trace.h

@@ -224,58 +224,138 @@ TRACE_EVENT(oos_sync,
 	TP_printk("%s", __entry->buf)
 	TP_printk("%s", __entry->buf)
 );
 );
 
 
-#define MAX_CMD_STR_LEN	256
 TRACE_EVENT(gvt_command,
 TRACE_EVENT(gvt_command,
-		TP_PROTO(u8 vm_id, u8 ring_id, u32 ip_gma, u32 *cmd_va, u32 cmd_len, bool ring_buffer_cmd, cycles_t cost_pre_cmd_handler, cycles_t cost_cmd_handler),
-
-		TP_ARGS(vm_id, ring_id, ip_gma, cmd_va, cmd_len, ring_buffer_cmd, cost_pre_cmd_handler, cost_cmd_handler),
-
-		TP_STRUCT__entry(
-			__field(u8, vm_id)
-			__field(u8, ring_id)
-			__field(int, i)
-			__array(char, tmp_buf, MAX_CMD_STR_LEN)
-			__array(char, cmd_str, MAX_CMD_STR_LEN)
-			),
-
-		TP_fast_assign(
-			__entry->vm_id = vm_id;
-			__entry->ring_id = ring_id;
-			__entry->cmd_str[0] = '\0';
-			snprintf(__entry->tmp_buf, MAX_CMD_STR_LEN, "VM(%d) Ring(%d): %s ip(%08x) pre handler cost (%llu), handler cost (%llu) ", vm_id, ring_id, ring_buffer_cmd ? "RB":"BB", ip_gma, cost_pre_cmd_handler, cost_cmd_handler);
-			strcat(__entry->cmd_str, __entry->tmp_buf);
-			entry->i = 0;
-			while (cmd_len > 0) {
-				if (cmd_len >= 8) {
-					snprintf(__entry->tmp_buf, MAX_CMD_STR_LEN, "%08x %08x %08x %08x %08x %08x %08x %08x ",
-						cmd_va[__entry->i], cmd_va[__entry->i+1], cmd_va[__entry->i+2], cmd_va[__entry->i+3],
-						cmd_va[__entry->i+4], cmd_va[__entry->i+5], cmd_va[__entry->i+6], cmd_va[__entry->i+7]);
-					__entry->i += 8;
-					cmd_len -= 8;
-					strcat(__entry->cmd_str, __entry->tmp_buf);
-				} else if (cmd_len >= 4) {
-					snprintf(__entry->tmp_buf, MAX_CMD_STR_LEN, "%08x %08x %08x %08x ",
-						cmd_va[__entry->i], cmd_va[__entry->i+1], cmd_va[__entry->i+2], cmd_va[__entry->i+3]);
-					__entry->i += 4;
-					cmd_len -= 4;
-					strcat(__entry->cmd_str, __entry->tmp_buf);
-				} else if (cmd_len >= 2) {
-					snprintf(__entry->tmp_buf, MAX_CMD_STR_LEN, "%08x %08x ", cmd_va[__entry->i], cmd_va[__entry->i+1]);
-					__entry->i += 2;
-					cmd_len -= 2;
-					strcat(__entry->cmd_str, __entry->tmp_buf);
-				} else if (cmd_len == 1) {
-					snprintf(__entry->tmp_buf, MAX_CMD_STR_LEN, "%08x ", cmd_va[__entry->i]);
-					__entry->i += 1;
-					cmd_len -= 1;
-					strcat(__entry->cmd_str, __entry->tmp_buf);
-				}
-			}
-			strcat(__entry->cmd_str, "\n");
-		),
+	TP_PROTO(u8 vgpu_id, u8 ring_id, u32 ip_gma, u32 *cmd_va, u32 cmd_len,
+		 u32 buf_type),
+
+	TP_ARGS(vgpu_id, ring_id, ip_gma, cmd_va, cmd_len, buf_type),
+
+	TP_STRUCT__entry(
+		__field(u8, vgpu_id)
+		__field(u8, ring_id)
+		__field(u32, ip_gma)
+		__field(u32, buf_type)
+		__field(u32, cmd_len)
+		__dynamic_array(u32, raw_cmd, cmd_len)
+	),
+
+	TP_fast_assign(
+		__entry->vgpu_id = vgpu_id;
+		__entry->ring_id = ring_id;
+		__entry->ip_gma = ip_gma;
+		__entry->buf_type = buf_type;
+		__entry->cmd_len = cmd_len;
+		memcpy(__get_dynamic_array(raw_cmd), cmd_va, cmd_len * sizeof(*cmd_va));
+	),
+
+
+	TP_printk("vgpu%d ring %d: buf_type %u, ip_gma %08x, raw cmd %s",
+		__entry->vgpu_id,
+		__entry->ring_id,
+		__entry->buf_type,
+		__entry->ip_gma,
+		__print_array(__get_dynamic_array(raw_cmd), __entry->cmd_len, 4))
+);
+
+#define GVT_TEMP_STR_LEN 10
+TRACE_EVENT(write_ir,
+	TP_PROTO(int id, char *reg_name, unsigned int reg, unsigned int new_val,
+		 unsigned int old_val, bool changed),
+
+	TP_ARGS(id, reg_name, reg, new_val, old_val, changed),
+
+	TP_STRUCT__entry(
+		__field(int, id)
+		__array(char, buf, GVT_TEMP_STR_LEN)
+		__field(unsigned int, reg)
+		__field(unsigned int, new_val)
+		__field(unsigned int, old_val)
+		__field(bool, changed)
+	),
+
+	TP_fast_assign(
+		__entry->id = id;
+		snprintf(__entry->buf, GVT_TEMP_STR_LEN, "%s", reg_name);
+		__entry->reg = reg;
+		__entry->new_val = new_val;
+		__entry->old_val = old_val;
+		__entry->changed = changed;
+	),
+
+	TP_printk("VM%u write [%s] %x, new %08x, old %08x, changed %08x\n",
+		  __entry->id, __entry->buf, __entry->reg, __entry->new_val,
+		  __entry->old_val, __entry->changed)
+);
+
+TRACE_EVENT(propagate_event,
+	TP_PROTO(int id, const char *irq_name, int bit),
+
+	TP_ARGS(id, irq_name, bit),
+
+	TP_STRUCT__entry(
+		__field(int, id)
+		__array(char, buf, GVT_TEMP_STR_LEN)
+		__field(int, bit)
+	),
 
 
-		TP_printk("%s", __entry->cmd_str)
+	TP_fast_assign(
+		__entry->id = id;
+		snprintf(__entry->buf, GVT_TEMP_STR_LEN, "%s", irq_name);
+		__entry->bit = bit;
+	),
+
+	TP_printk("Set bit (%d) for (%s) for vgpu (%d)\n",
+		  __entry->bit, __entry->buf, __entry->id)
 );
 );
+
+TRACE_EVENT(inject_msi,
+	TP_PROTO(int id, unsigned int address, unsigned int data),
+
+	TP_ARGS(id, address, data),
+
+	TP_STRUCT__entry(
+		__field(int, id)
+		__field(unsigned int, address)
+		__field(unsigned int, data)
+	),
+
+	TP_fast_assign(
+		__entry->id = id;
+		__entry->address = address;
+		__entry->data = data;
+	),
+
+	TP_printk("vgpu%d:inject msi address %x data %x\n",
+		  __entry->id, __entry->address, __entry->data)
+);
+
+TRACE_EVENT(render_mmio,
+	TP_PROTO(int id, char *action, unsigned int reg,
+		 unsigned int old_val, unsigned int new_val),
+
+	TP_ARGS(id, action, reg, new_val, old_val),
+
+	TP_STRUCT__entry(
+		__field(int, id)
+		__array(char, buf, GVT_TEMP_STR_LEN)
+		__field(unsigned int, reg)
+		__field(unsigned int, old_val)
+		__field(unsigned int, new_val)
+	),
+
+	TP_fast_assign(
+		__entry->id = id;
+		snprintf(__entry->buf, GVT_TEMP_STR_LEN, "%s", action);
+		__entry->reg = reg;
+		__entry->old_val = old_val;
+		__entry->new_val = new_val;
+	),
+
+	TP_printk("VM%u %s reg %x, old %08x new %08x\n",
+		  __entry->id, __entry->buf, __entry->reg,
+		  __entry->old_val, __entry->new_val)
+);
+
 #endif /* _GVT_TRACE_H_ */
 #endif /* _GVT_TRACE_H_ */
 
 
 /* This part must be out of protection */
 /* This part must be out of protection */

+ 7 - 2
drivers/gpu/drm/i915/gvt/vgpu.c

@@ -501,9 +501,14 @@ void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr,
 
 
 	/* full GPU reset or device model level reset */
 	/* full GPU reset or device model level reset */
 	if (engine_mask == ALL_ENGINES || dmlr) {
 	if (engine_mask == ALL_ENGINES || dmlr) {
+
 		intel_vgpu_reset_gtt(vgpu, dmlr);
 		intel_vgpu_reset_gtt(vgpu, dmlr);
-		intel_vgpu_reset_resource(vgpu);
-		intel_vgpu_reset_mmio(vgpu);
+
+		/*fence will not be reset during virtual reset */
+		if (dmlr)
+			intel_vgpu_reset_resource(vgpu);
+
+		intel_vgpu_reset_mmio(vgpu, dmlr);
 		populate_pvinfo_page(vgpu);
 		populate_pvinfo_page(vgpu);
 		intel_vgpu_reset_display(vgpu);
 		intel_vgpu_reset_display(vgpu);
 
 

+ 41 - 27
drivers/gpu/drm/i915/i915_debugfs.c

@@ -1670,12 +1670,22 @@ static int i915_fbc_status(struct seq_file *m, void *unused)
 		seq_printf(m, "FBC disabled: %s\n",
 		seq_printf(m, "FBC disabled: %s\n",
 			   dev_priv->fbc.no_fbc_reason);
 			   dev_priv->fbc.no_fbc_reason);
 
 
-	if (intel_fbc_is_active(dev_priv) && INTEL_GEN(dev_priv) >= 7) {
-		uint32_t mask = INTEL_GEN(dev_priv) >= 8 ?
-				BDW_FBC_COMPRESSION_MASK :
-				IVB_FBC_COMPRESSION_MASK;
-		seq_printf(m, "Compressing: %s\n",
-			   yesno(I915_READ(FBC_STATUS2) & mask));
+	if (intel_fbc_is_active(dev_priv)) {
+		u32 mask;
+
+		if (INTEL_GEN(dev_priv) >= 8)
+			mask = I915_READ(IVB_FBC_STATUS2) & BDW_FBC_COMP_SEG_MASK;
+		else if (INTEL_GEN(dev_priv) >= 7)
+			mask = I915_READ(IVB_FBC_STATUS2) & IVB_FBC_COMP_SEG_MASK;
+		else if (INTEL_GEN(dev_priv) >= 5)
+			mask = I915_READ(ILK_DPFC_STATUS) & ILK_DPFC_COMP_SEG_MASK;
+		else if (IS_G4X(dev_priv))
+			mask = I915_READ(DPFC_STATUS) & DPFC_COMP_SEG_MASK;
+		else
+			mask = I915_READ(FBC_STATUS) & (FBC_STAT_COMPRESSING |
+							FBC_STAT_COMPRESSED);
+
+		seq_printf(m, "Compressing: %s\n", yesno(mask));
 	}
 	}
 
 
 	mutex_unlock(&dev_priv->fbc.lock);
 	mutex_unlock(&dev_priv->fbc.lock);
@@ -1684,7 +1694,7 @@ static int i915_fbc_status(struct seq_file *m, void *unused)
 	return 0;
 	return 0;
 }
 }
 
 
-static int i915_fbc_fc_get(void *data, u64 *val)
+static int i915_fbc_false_color_get(void *data, u64 *val)
 {
 {
 	struct drm_i915_private *dev_priv = data;
 	struct drm_i915_private *dev_priv = data;
 
 
@@ -1696,7 +1706,7 @@ static int i915_fbc_fc_get(void *data, u64 *val)
 	return 0;
 	return 0;
 }
 }
 
 
-static int i915_fbc_fc_set(void *data, u64 val)
+static int i915_fbc_false_color_set(void *data, u64 val)
 {
 {
 	struct drm_i915_private *dev_priv = data;
 	struct drm_i915_private *dev_priv = data;
 	u32 reg;
 	u32 reg;
@@ -1717,8 +1727,8 @@ static int i915_fbc_fc_set(void *data, u64 val)
 	return 0;
 	return 0;
 }
 }
 
 
-DEFINE_SIMPLE_ATTRIBUTE(i915_fbc_fc_fops,
-			i915_fbc_fc_get, i915_fbc_fc_set,
+DEFINE_SIMPLE_ATTRIBUTE(i915_fbc_false_color_fops,
+			i915_fbc_false_color_get, i915_fbc_false_color_set,
 			"%llu\n");
 			"%llu\n");
 
 
 static int i915_ips_status(struct seq_file *m, void *unused)
 static int i915_ips_status(struct seq_file *m, void *unused)
@@ -1988,6 +1998,12 @@ static int i915_context_status(struct seq_file *m, void *unused)
 			seq_putc(m, '\n');
 			seq_putc(m, '\n');
 		}
 		}
 
 
+		seq_printf(m,
+			   "\tvma hashtable size=%u (actual %lu), count=%u\n",
+			   ctx->vma_lut.ht_size,
+			   BIT(ctx->vma_lut.ht_bits),
+			   ctx->vma_lut.ht_count);
+
 		seq_putc(m, '\n');
 		seq_putc(m, '\n');
 	}
 	}
 
 
@@ -4289,26 +4305,27 @@ i915_drop_caches_set(void *data, u64 val)
 {
 {
 	struct drm_i915_private *dev_priv = data;
 	struct drm_i915_private *dev_priv = data;
 	struct drm_device *dev = &dev_priv->drm;
 	struct drm_device *dev = &dev_priv->drm;
-	int ret;
+	int ret = 0;
 
 
 	DRM_DEBUG("Dropping caches: 0x%08llx\n", val);
 	DRM_DEBUG("Dropping caches: 0x%08llx\n", val);
 
 
 	/* No need to check and wait for gpu resets, only libdrm auto-restarts
 	/* No need to check and wait for gpu resets, only libdrm auto-restarts
 	 * on ioctls on -EAGAIN. */
 	 * on ioctls on -EAGAIN. */
-	ret = mutex_lock_interruptible(&dev->struct_mutex);
-	if (ret)
-		return ret;
-
-	if (val & DROP_ACTIVE) {
-		ret = i915_gem_wait_for_idle(dev_priv,
-					     I915_WAIT_INTERRUPTIBLE |
-					     I915_WAIT_LOCKED);
+	if (val & (DROP_ACTIVE | DROP_RETIRE)) {
+		ret = mutex_lock_interruptible(&dev->struct_mutex);
 		if (ret)
 		if (ret)
-			goto unlock;
-	}
+			return ret;
 
 
-	if (val & DROP_RETIRE)
-		i915_gem_retire_requests(dev_priv);
+		if (val & DROP_ACTIVE)
+			ret = i915_gem_wait_for_idle(dev_priv,
+						     I915_WAIT_INTERRUPTIBLE |
+						     I915_WAIT_LOCKED);
+
+		if (val & DROP_RETIRE)
+			i915_gem_retire_requests(dev_priv);
+
+		mutex_unlock(&dev->struct_mutex);
+	}
 
 
 	lockdep_set_current_reclaim_state(GFP_KERNEL);
 	lockdep_set_current_reclaim_state(GFP_KERNEL);
 	if (val & DROP_BOUND)
 	if (val & DROP_BOUND)
@@ -4321,9 +4338,6 @@ i915_drop_caches_set(void *data, u64 val)
 		i915_gem_shrink_all(dev_priv);
 		i915_gem_shrink_all(dev_priv);
 	lockdep_clear_current_reclaim_state();
 	lockdep_clear_current_reclaim_state();
 
 
-unlock:
-	mutex_unlock(&dev->struct_mutex);
-
 	if (val & DROP_FREED) {
 	if (val & DROP_FREED) {
 		synchronize_rcu();
 		synchronize_rcu();
 		i915_gem_drain_freed_objects(dev_priv);
 		i915_gem_drain_freed_objects(dev_priv);
@@ -4861,7 +4875,7 @@ static const struct i915_debugfs_files {
 	{"i915_pri_wm_latency", &i915_pri_wm_latency_fops},
 	{"i915_pri_wm_latency", &i915_pri_wm_latency_fops},
 	{"i915_spr_wm_latency", &i915_spr_wm_latency_fops},
 	{"i915_spr_wm_latency", &i915_spr_wm_latency_fops},
 	{"i915_cur_wm_latency", &i915_cur_wm_latency_fops},
 	{"i915_cur_wm_latency", &i915_cur_wm_latency_fops},
-	{"i915_fbc_false_color", &i915_fbc_fc_fops},
+	{"i915_fbc_false_color", &i915_fbc_false_color_fops},
 	{"i915_dp_test_data", &i915_displayport_test_data_fops},
 	{"i915_dp_test_data", &i915_displayport_test_data_fops},
 	{"i915_dp_test_type", &i915_displayport_test_type_fops},
 	{"i915_dp_test_type", &i915_displayport_test_type_fops},
 	{"i915_dp_test_active", &i915_displayport_test_active_fops},
 	{"i915_dp_test_active", &i915_displayport_test_active_fops},

+ 41 - 5
drivers/gpu/drm/i915/i915_drv.c

@@ -139,6 +139,9 @@ static enum intel_pch intel_virt_detect_pch(struct drm_i915_private *dev_priv)
 	} else if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
 	} else if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
 		ret = PCH_SPT;
 		ret = PCH_SPT;
 		DRM_DEBUG_KMS("Assuming SunrisePoint PCH\n");
 		DRM_DEBUG_KMS("Assuming SunrisePoint PCH\n");
+	} else if (IS_COFFEELAKE(dev_priv) || IS_CANNONLAKE(dev_priv)) {
+		ret = PCH_CNP;
+		DRM_DEBUG_KMS("Assuming CannonPoint PCH\n");
 	}
 	}
 
 
 	return ret;
 	return ret;
@@ -170,24 +173,29 @@ static void intel_detect_pch(struct drm_i915_private *dev_priv)
 	while ((pch = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, pch))) {
 	while ((pch = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, pch))) {
 		if (pch->vendor == PCI_VENDOR_ID_INTEL) {
 		if (pch->vendor == PCI_VENDOR_ID_INTEL) {
 			unsigned short id = pch->device & INTEL_PCH_DEVICE_ID_MASK;
 			unsigned short id = pch->device & INTEL_PCH_DEVICE_ID_MASK;
-			dev_priv->pch_id = id;
+			unsigned short id_ext = pch->device &
+				INTEL_PCH_DEVICE_ID_MASK_EXT;
 
 
 			if (id == INTEL_PCH_IBX_DEVICE_ID_TYPE) {
 			if (id == INTEL_PCH_IBX_DEVICE_ID_TYPE) {
+				dev_priv->pch_id = id;
 				dev_priv->pch_type = PCH_IBX;
 				dev_priv->pch_type = PCH_IBX;
 				DRM_DEBUG_KMS("Found Ibex Peak PCH\n");
 				DRM_DEBUG_KMS("Found Ibex Peak PCH\n");
 				WARN_ON(!IS_GEN5(dev_priv));
 				WARN_ON(!IS_GEN5(dev_priv));
 			} else if (id == INTEL_PCH_CPT_DEVICE_ID_TYPE) {
 			} else if (id == INTEL_PCH_CPT_DEVICE_ID_TYPE) {
+				dev_priv->pch_id = id;
 				dev_priv->pch_type = PCH_CPT;
 				dev_priv->pch_type = PCH_CPT;
 				DRM_DEBUG_KMS("Found CougarPoint PCH\n");
 				DRM_DEBUG_KMS("Found CougarPoint PCH\n");
 				WARN_ON(!(IS_GEN6(dev_priv) ||
 				WARN_ON(!(IS_GEN6(dev_priv) ||
 					IS_IVYBRIDGE(dev_priv)));
 					IS_IVYBRIDGE(dev_priv)));
 			} else if (id == INTEL_PCH_PPT_DEVICE_ID_TYPE) {
 			} else if (id == INTEL_PCH_PPT_DEVICE_ID_TYPE) {
 				/* PantherPoint is CPT compatible */
 				/* PantherPoint is CPT compatible */
+				dev_priv->pch_id = id;
 				dev_priv->pch_type = PCH_CPT;
 				dev_priv->pch_type = PCH_CPT;
 				DRM_DEBUG_KMS("Found PantherPoint PCH\n");
 				DRM_DEBUG_KMS("Found PantherPoint PCH\n");
 				WARN_ON(!(IS_GEN6(dev_priv) ||
 				WARN_ON(!(IS_GEN6(dev_priv) ||
 					IS_IVYBRIDGE(dev_priv)));
 					IS_IVYBRIDGE(dev_priv)));
 			} else if (id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
 			} else if (id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
+				dev_priv->pch_id = id;
 				dev_priv->pch_type = PCH_LPT;
 				dev_priv->pch_type = PCH_LPT;
 				DRM_DEBUG_KMS("Found LynxPoint PCH\n");
 				DRM_DEBUG_KMS("Found LynxPoint PCH\n");
 				WARN_ON(!IS_HASWELL(dev_priv) &&
 				WARN_ON(!IS_HASWELL(dev_priv) &&
@@ -195,6 +203,7 @@ static void intel_detect_pch(struct drm_i915_private *dev_priv)
 				WARN_ON(IS_HSW_ULT(dev_priv) ||
 				WARN_ON(IS_HSW_ULT(dev_priv) ||
 					IS_BDW_ULT(dev_priv));
 					IS_BDW_ULT(dev_priv));
 			} else if (id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) {
 			} else if (id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) {
+				dev_priv->pch_id = id;
 				dev_priv->pch_type = PCH_LPT;
 				dev_priv->pch_type = PCH_LPT;
 				DRM_DEBUG_KMS("Found LynxPoint LP PCH\n");
 				DRM_DEBUG_KMS("Found LynxPoint LP PCH\n");
 				WARN_ON(!IS_HASWELL(dev_priv) &&
 				WARN_ON(!IS_HASWELL(dev_priv) &&
@@ -202,20 +211,35 @@ static void intel_detect_pch(struct drm_i915_private *dev_priv)
 				WARN_ON(!IS_HSW_ULT(dev_priv) &&
 				WARN_ON(!IS_HSW_ULT(dev_priv) &&
 					!IS_BDW_ULT(dev_priv));
 					!IS_BDW_ULT(dev_priv));
 			} else if (id == INTEL_PCH_SPT_DEVICE_ID_TYPE) {
 			} else if (id == INTEL_PCH_SPT_DEVICE_ID_TYPE) {
+				dev_priv->pch_id = id;
 				dev_priv->pch_type = PCH_SPT;
 				dev_priv->pch_type = PCH_SPT;
 				DRM_DEBUG_KMS("Found SunrisePoint PCH\n");
 				DRM_DEBUG_KMS("Found SunrisePoint PCH\n");
 				WARN_ON(!IS_SKYLAKE(dev_priv) &&
 				WARN_ON(!IS_SKYLAKE(dev_priv) &&
 					!IS_KABYLAKE(dev_priv));
 					!IS_KABYLAKE(dev_priv));
-			} else if (id == INTEL_PCH_SPT_LP_DEVICE_ID_TYPE) {
+			} else if (id_ext == INTEL_PCH_SPT_LP_DEVICE_ID_TYPE) {
+				dev_priv->pch_id = id_ext;
 				dev_priv->pch_type = PCH_SPT;
 				dev_priv->pch_type = PCH_SPT;
 				DRM_DEBUG_KMS("Found SunrisePoint LP PCH\n");
 				DRM_DEBUG_KMS("Found SunrisePoint LP PCH\n");
 				WARN_ON(!IS_SKYLAKE(dev_priv) &&
 				WARN_ON(!IS_SKYLAKE(dev_priv) &&
 					!IS_KABYLAKE(dev_priv));
 					!IS_KABYLAKE(dev_priv));
 			} else if (id == INTEL_PCH_KBP_DEVICE_ID_TYPE) {
 			} else if (id == INTEL_PCH_KBP_DEVICE_ID_TYPE) {
+				dev_priv->pch_id = id;
 				dev_priv->pch_type = PCH_KBP;
 				dev_priv->pch_type = PCH_KBP;
 				DRM_DEBUG_KMS("Found KabyPoint PCH\n");
 				DRM_DEBUG_KMS("Found KabyPoint PCH\n");
 				WARN_ON(!IS_SKYLAKE(dev_priv) &&
 				WARN_ON(!IS_SKYLAKE(dev_priv) &&
 					!IS_KABYLAKE(dev_priv));
 					!IS_KABYLAKE(dev_priv));
+			} else if (id == INTEL_PCH_CNP_DEVICE_ID_TYPE) {
+				dev_priv->pch_id = id;
+				dev_priv->pch_type = PCH_CNP;
+				DRM_DEBUG_KMS("Found CannonPoint PCH\n");
+				WARN_ON(!IS_CANNONLAKE(dev_priv) &&
+					!IS_COFFEELAKE(dev_priv));
+			} else if (id_ext == INTEL_PCH_CNP_LP_DEVICE_ID_TYPE) {
+				dev_priv->pch_id = id_ext;
+				dev_priv->pch_type = PCH_CNP;
+				DRM_DEBUG_KMS("Found CannonPoint LP PCH\n");
+				WARN_ON(!IS_CANNONLAKE(dev_priv) &&
+					!IS_COFFEELAKE(dev_priv));
 			} else if ((id == INTEL_PCH_P2X_DEVICE_ID_TYPE) ||
 			} else if ((id == INTEL_PCH_P2X_DEVICE_ID_TYPE) ||
 				   (id == INTEL_PCH_P3X_DEVICE_ID_TYPE) ||
 				   (id == INTEL_PCH_P3X_DEVICE_ID_TYPE) ||
 				   ((id == INTEL_PCH_QEMU_DEVICE_ID_TYPE) &&
 				   ((id == INTEL_PCH_QEMU_DEVICE_ID_TYPE) &&
@@ -223,6 +247,7 @@ static void intel_detect_pch(struct drm_i915_private *dev_priv)
 					    PCI_SUBVENDOR_ID_REDHAT_QUMRANET &&
 					    PCI_SUBVENDOR_ID_REDHAT_QUMRANET &&
 				    pch->subsystem_device ==
 				    pch->subsystem_device ==
 					    PCI_SUBDEVICE_ID_QEMU)) {
 					    PCI_SUBDEVICE_ID_QEMU)) {
+				dev_priv->pch_id = id;
 				dev_priv->pch_type =
 				dev_priv->pch_type =
 					intel_virt_detect_pch(dev_priv);
 					intel_virt_detect_pch(dev_priv);
 			} else
 			} else
@@ -351,6 +376,7 @@ static int i915_getparam(struct drm_device *dev, void *data,
 	case I915_PARAM_HAS_EXEC_ASYNC:
 	case I915_PARAM_HAS_EXEC_ASYNC:
 	case I915_PARAM_HAS_EXEC_FENCE:
 	case I915_PARAM_HAS_EXEC_FENCE:
 	case I915_PARAM_HAS_EXEC_CAPTURE:
 	case I915_PARAM_HAS_EXEC_CAPTURE:
+	case I915_PARAM_HAS_EXEC_BATCH_FIRST:
 		/* For the time being all of these are always true;
 		/* For the time being all of these are always true;
 		 * if some supported hardware does not have one of these
 		 * if some supported hardware does not have one of these
 		 * features this value needs to be provided from
 		 * features this value needs to be provided from
@@ -358,6 +384,16 @@ static int i915_getparam(struct drm_device *dev, void *data,
 		 */
 		 */
 		value = 1;
 		value = 1;
 		break;
 		break;
+	case I915_PARAM_SLICE_MASK:
+		value = INTEL_INFO(dev_priv)->sseu.slice_mask;
+		if (!value)
+			return -ENODEV;
+		break;
+	case I915_PARAM_SUBSLICE_MASK:
+		value = INTEL_INFO(dev_priv)->sseu.subslice_mask;
+		if (!value)
+			return -ENODEV;
+		break;
 	default:
 	default:
 		DRM_DEBUG("Unknown parameter %d\n", param->param);
 		DRM_DEBUG("Unknown parameter %d\n", param->param);
 		return -EINVAL;
 		return -EINVAL;
@@ -553,6 +589,7 @@ static void i915_gem_fini(struct drm_i915_private *dev_priv)
 	intel_uc_fini_hw(dev_priv);
 	intel_uc_fini_hw(dev_priv);
 	i915_gem_cleanup_engines(dev_priv);
 	i915_gem_cleanup_engines(dev_priv);
 	i915_gem_context_fini(dev_priv);
 	i915_gem_context_fini(dev_priv);
+	i915_gem_cleanup_userptr(dev_priv);
 	mutex_unlock(&dev_priv->drm.struct_mutex);
 	mutex_unlock(&dev_priv->drm.struct_mutex);
 
 
 	i915_gem_drain_freed_objects(dev_priv);
 	i915_gem_drain_freed_objects(dev_priv);
@@ -997,6 +1034,8 @@ static void intel_sanitize_options(struct drm_i915_private *dev_priv)
 	DRM_DEBUG_DRIVER("use GPU semaphores? %s\n", yesno(i915.semaphores));
 	DRM_DEBUG_DRIVER("use GPU semaphores? %s\n", yesno(i915.semaphores));
 
 
 	intel_uc_sanitize_options(dev_priv);
 	intel_uc_sanitize_options(dev_priv);
+
+	intel_gvt_sanitize_options(dev_priv);
 }
 }
 
 
 /**
 /**
@@ -2459,9 +2498,6 @@ static int intel_runtime_resume(struct device *kdev)
 
 
 	intel_guc_resume(dev_priv);
 	intel_guc_resume(dev_priv);
 
 
-	if (IS_GEN6(dev_priv))
-		intel_init_pch_refclk(dev_priv);
-
 	if (IS_GEN9_LP(dev_priv)) {
 	if (IS_GEN9_LP(dev_priv)) {
 		bxt_disable_dc9(dev_priv);
 		bxt_disable_dc9(dev_priv);
 		bxt_display_core_init(dev_priv, true);
 		bxt_display_core_init(dev_priv, true);

+ 76 - 30
drivers/gpu/drm/i915/i915_drv.h

@@ -37,7 +37,7 @@
 #include <linux/i2c.h>
 #include <linux/i2c.h>
 #include <linux/i2c-algo-bit.h>
 #include <linux/i2c-algo-bit.h>
 #include <linux/backlight.h>
 #include <linux/backlight.h>
-#include <linux/hashtable.h>
+#include <linux/hash.h>
 #include <linux/intel-iommu.h>
 #include <linux/intel-iommu.h>
 #include <linux/kref.h>
 #include <linux/kref.h>
 #include <linux/pm_qos.h>
 #include <linux/pm_qos.h>
@@ -80,8 +80,8 @@
 
 
 #define DRIVER_NAME		"i915"
 #define DRIVER_NAME		"i915"
 #define DRIVER_DESC		"Intel Graphics"
 #define DRIVER_DESC		"Intel Graphics"
-#define DRIVER_DATE		"20170529"
-#define DRIVER_TIMESTAMP	1496041258
+#define DRIVER_DATE		"20170619"
+#define DRIVER_TIMESTAMP	1497857498
 
 
 /* Use I915_STATE_WARN(x) and I915_STATE_WARN_ON() (rather than WARN() and
 /* Use I915_STATE_WARN(x) and I915_STATE_WARN_ON() (rather than WARN() and
  * WARN_ON()) for hw state sanity checks to check for unexpected conditions
  * WARN_ON()) for hw state sanity checks to check for unexpected conditions
@@ -752,7 +752,6 @@ struct intel_csr {
 	func(has_aliasing_ppgtt); \
 	func(has_aliasing_ppgtt); \
 	func(has_csr); \
 	func(has_csr); \
 	func(has_ddi); \
 	func(has_ddi); \
-	func(has_decoupled_mmio); \
 	func(has_dp_mst); \
 	func(has_dp_mst); \
 	func(has_fbc); \
 	func(has_fbc); \
 	func(has_fpga_dbg); \
 	func(has_fpga_dbg); \
@@ -827,6 +826,8 @@ enum intel_platform {
 	INTEL_BROXTON,
 	INTEL_BROXTON,
 	INTEL_KABYLAKE,
 	INTEL_KABYLAKE,
 	INTEL_GEMINILAKE,
 	INTEL_GEMINILAKE,
+	INTEL_COFFEELAKE,
+	INTEL_CANNONLAKE,
 	INTEL_MAX_PLATFORMS
 	INTEL_MAX_PLATFORMS
 };
 };
 
 
@@ -1152,6 +1153,7 @@ enum intel_pch {
 	PCH_LPT,	/* Lynxpoint PCH */
 	PCH_LPT,	/* Lynxpoint PCH */
 	PCH_SPT,        /* Sunrisepoint PCH */
 	PCH_SPT,        /* Sunrisepoint PCH */
 	PCH_KBP,        /* Kabypoint PCH */
 	PCH_KBP,        /* Kabypoint PCH */
+	PCH_CNP,        /* Cannonpoint PCH */
 	PCH_NOP,
 	PCH_NOP,
 };
 };
 
 
@@ -1160,11 +1162,9 @@ enum intel_sbi_destination {
 	SBI_MPHY,
 	SBI_MPHY,
 };
 };
 
 
-#define QUIRK_PIPEA_FORCE (1<<0)
 #define QUIRK_LVDS_SSC_DISABLE (1<<1)
 #define QUIRK_LVDS_SSC_DISABLE (1<<1)
 #define QUIRK_INVERT_BRIGHTNESS (1<<2)
 #define QUIRK_INVERT_BRIGHTNESS (1<<2)
 #define QUIRK_BACKLIGHT_PRESENT (1<<3)
 #define QUIRK_BACKLIGHT_PRESENT (1<<3)
-#define QUIRK_PIPEB_FORCE (1<<4)
 #define QUIRK_PIN_SWIZZLED_PAGES (1<<5)
 #define QUIRK_PIN_SWIZZLED_PAGES (1<<5)
 
 
 struct intel_fbdev;
 struct intel_fbdev;
@@ -1454,6 +1454,13 @@ struct i915_gem_mm {
 	/** LRU list of objects with fence regs on them. */
 	/** LRU list of objects with fence regs on them. */
 	struct list_head fence_list;
 	struct list_head fence_list;
 
 
+	/**
+	 * Workqueue to fault in userptr pages, flushed by the execbuf
+	 * when required but otherwise left to userspace to try again
+	 * on EAGAIN.
+	 */
+	struct workqueue_struct *userptr_wq;
+
 	u64 unordered_timeline;
 	u64 unordered_timeline;
 
 
 	/* the indicator for dispatch video commands on two BSD rings */
 	/* the indicator for dispatch video commands on two BSD rings */
@@ -2017,9 +2024,17 @@ struct i915_oa_ops {
 	void (*init_oa_buffer)(struct drm_i915_private *dev_priv);
 	void (*init_oa_buffer)(struct drm_i915_private *dev_priv);
 
 
 	/**
 	/**
-	 * @enable_metric_set: Applies any MUX configuration to set up the
-	 * Boolean and Custom (B/C) counters that are part of the counter
-	 * reports being sampled. May apply system constraints such as
+	 * @select_metric_set: The auto generated code that checks whether a
+	 * requested OA config is applicable to the system and if so sets up
+	 * the mux, oa and flex eu register config pointers according to the
+	 * current dev_priv->perf.oa.metrics_set.
+	 */
+	int (*select_metric_set)(struct drm_i915_private *dev_priv);
+
+	/**
+	 * @enable_metric_set: Selects and applies any MUX configuration to set
+	 * up the Boolean and Custom (B/C) counters that are part of the
+	 * counter reports being sampled. May apply system constraints such as
 	 * disabling EU clock gating as required.
 	 * disabling EU clock gating as required.
 	 */
 	 */
 	int (*enable_metric_set)(struct drm_i915_private *dev_priv);
 	int (*enable_metric_set)(struct drm_i915_private *dev_priv);
@@ -2050,20 +2065,13 @@ struct i915_oa_ops {
 		    size_t *offset);
 		    size_t *offset);
 
 
 	/**
 	/**
-	 * @oa_buffer_check: Check for OA buffer data + update tail
-	 *
-	 * This is either called via fops or the poll check hrtimer (atomic
-	 * ctx) without any locks taken.
+	 * @oa_hw_tail_read: read the OA tail pointer register
 	 *
 	 *
-	 * It's safe to read OA config state here unlocked, assuming that this
-	 * is only called while the stream is enabled, while the global OA
-	 * configuration can't be modified.
-	 *
-	 * Efficiency is more important than avoiding some false positives
-	 * here, which will be handled gracefully - likely resulting in an
-	 * %EAGAIN error for userspace.
+	 * In particular this enables us to share all the fiddly code for
+	 * handling the OA unit tail pointer race that affects multiple
+	 * generations.
 	 */
 	 */
-	bool (*oa_buffer_check)(struct drm_i915_private *dev_priv);
+	u32 (*oa_hw_tail_read)(struct drm_i915_private *dev_priv);
 };
 };
 
 
 struct intel_cdclk_state {
 struct intel_cdclk_state {
@@ -2394,8 +2402,6 @@ struct drm_i915_private {
 		struct mutex lock;
 		struct mutex lock;
 		struct list_head streams;
 		struct list_head streams;
 
 
-		spinlock_t hook_lock;
-
 		struct {
 		struct {
 			struct i915_perf_stream *exclusive_stream;
 			struct i915_perf_stream *exclusive_stream;
 
 
@@ -2413,17 +2419,23 @@ struct drm_i915_private {
 
 
 			bool periodic;
 			bool periodic;
 			int period_exponent;
 			int period_exponent;
+			int timestamp_frequency;
 
 
 			int metrics_set;
 			int metrics_set;
 
 
-			const struct i915_oa_reg *mux_regs;
-			int mux_regs_len;
+			const struct i915_oa_reg *mux_regs[6];
+			int mux_regs_lens[6];
+			int n_mux_configs;
+
 			const struct i915_oa_reg *b_counter_regs;
 			const struct i915_oa_reg *b_counter_regs;
 			int b_counter_regs_len;
 			int b_counter_regs_len;
+			const struct i915_oa_reg *flex_regs;
+			int flex_regs_len;
 
 
 			struct {
 			struct {
 				struct i915_vma *vma;
 				struct i915_vma *vma;
 				u8 *vaddr;
 				u8 *vaddr;
+				u32 last_ctx_id;
 				int format;
 				int format;
 				int format_size;
 				int format_size;
 
 
@@ -2493,6 +2505,15 @@ struct drm_i915_private {
 			} oa_buffer;
 			} oa_buffer;
 
 
 			u32 gen7_latched_oastatus1;
 			u32 gen7_latched_oastatus1;
+			u32 ctx_oactxctrl_offset;
+			u32 ctx_flexeu0_offset;
+
+			/**
+			 * The RPT_ID/reason field for Gen8+ includes a bit
+			 * to determine if the CTX ID in the report is valid
+			 * but the specific bit differs between Gen 8 and 9
+			 */
+			u32 gen8_valid_ctx_bit;
 
 
 			struct i915_oa_ops ops;
 			struct i915_oa_ops ops;
 			const struct i915_oa_format *oa_formats;
 			const struct i915_oa_format *oa_formats;
@@ -2768,6 +2789,8 @@ intel_info(const struct drm_i915_private *dev_priv)
 #define IS_BROXTON(dev_priv)	((dev_priv)->info.platform == INTEL_BROXTON)
 #define IS_BROXTON(dev_priv)	((dev_priv)->info.platform == INTEL_BROXTON)
 #define IS_KABYLAKE(dev_priv)	((dev_priv)->info.platform == INTEL_KABYLAKE)
 #define IS_KABYLAKE(dev_priv)	((dev_priv)->info.platform == INTEL_KABYLAKE)
 #define IS_GEMINILAKE(dev_priv)	((dev_priv)->info.platform == INTEL_GEMINILAKE)
 #define IS_GEMINILAKE(dev_priv)	((dev_priv)->info.platform == INTEL_GEMINILAKE)
+#define IS_COFFEELAKE(dev_priv)	((dev_priv)->info.platform == INTEL_COFFEELAKE)
+#define IS_CANNONLAKE(dev_priv)	((dev_priv)->info.platform == INTEL_CANNONLAKE)
 #define IS_MOBILE(dev_priv)	((dev_priv)->info.is_mobile)
 #define IS_MOBILE(dev_priv)	((dev_priv)->info.is_mobile)
 #define IS_HSW_EARLY_SDV(dev_priv) (IS_HASWELL(dev_priv) && \
 #define IS_HSW_EARLY_SDV(dev_priv) (IS_HASWELL(dev_priv) && \
 				    (INTEL_DEVID(dev_priv) & 0xFF00) == 0x0C00)
 				    (INTEL_DEVID(dev_priv) & 0xFF00) == 0x0C00)
@@ -2803,10 +2826,18 @@ intel_info(const struct drm_i915_private *dev_priv)
 #define IS_KBL_ULX(dev_priv)	(INTEL_DEVID(dev_priv) == 0x590E || \
 #define IS_KBL_ULX(dev_priv)	(INTEL_DEVID(dev_priv) == 0x590E || \
 				 INTEL_DEVID(dev_priv) == 0x5915 || \
 				 INTEL_DEVID(dev_priv) == 0x5915 || \
 				 INTEL_DEVID(dev_priv) == 0x591E)
 				 INTEL_DEVID(dev_priv) == 0x591E)
+#define IS_SKL_GT2(dev_priv)	(IS_SKYLAKE(dev_priv) && \
+				 (INTEL_DEVID(dev_priv) & 0x00F0) == 0x0010)
 #define IS_SKL_GT3(dev_priv)	(IS_SKYLAKE(dev_priv) && \
 #define IS_SKL_GT3(dev_priv)	(IS_SKYLAKE(dev_priv) && \
 				 (INTEL_DEVID(dev_priv) & 0x00F0) == 0x0020)
 				 (INTEL_DEVID(dev_priv) & 0x00F0) == 0x0020)
 #define IS_SKL_GT4(dev_priv)	(IS_SKYLAKE(dev_priv) && \
 #define IS_SKL_GT4(dev_priv)	(IS_SKYLAKE(dev_priv) && \
 				 (INTEL_DEVID(dev_priv) & 0x00F0) == 0x0030)
 				 (INTEL_DEVID(dev_priv) & 0x00F0) == 0x0030)
+#define IS_KBL_GT2(dev_priv)	(IS_KABYLAKE(dev_priv) && \
+				 (INTEL_DEVID(dev_priv) & 0x00F0) == 0x0010)
+#define IS_KBL_GT3(dev_priv)	(IS_KABYLAKE(dev_priv) && \
+				 (INTEL_DEVID(dev_priv) & 0x00F0) == 0x0020)
+#define IS_CFL_ULT(dev_priv)	(IS_COFFEELAKE(dev_priv) && \
+				 (INTEL_DEVID(dev_priv) & 0x00F0) == 0x00A0)
 
 
 #define IS_ALPHA_SUPPORT(intel_info) ((intel_info)->is_alpha_support)
 #define IS_ALPHA_SUPPORT(intel_info) ((intel_info)->is_alpha_support)
 
 
@@ -2845,6 +2876,12 @@ intel_info(const struct drm_i915_private *dev_priv)
 #define IS_GLK_REVID(dev_priv, since, until) \
 #define IS_GLK_REVID(dev_priv, since, until) \
 	(IS_GEMINILAKE(dev_priv) && IS_REVID(dev_priv, since, until))
 	(IS_GEMINILAKE(dev_priv) && IS_REVID(dev_priv, since, until))
 
 
+#define CNL_REVID_A0		0x0
+#define CNL_REVID_B0		0x1
+
+#define IS_CNL_REVID(p, since, until) \
+	(IS_CANNONLAKE(p) && IS_REVID(p, since, until))
+
 /*
 /*
  * The genX designation typically refers to the render engine, so render
  * The genX designation typically refers to the render engine, so render
  * capability related checks should use IS_GEN, while display and other checks
  * capability related checks should use IS_GEN, while display and other checks
@@ -2859,6 +2896,7 @@ intel_info(const struct drm_i915_private *dev_priv)
 #define IS_GEN7(dev_priv)	(!!((dev_priv)->info.gen_mask & BIT(6)))
 #define IS_GEN7(dev_priv)	(!!((dev_priv)->info.gen_mask & BIT(6)))
 #define IS_GEN8(dev_priv)	(!!((dev_priv)->info.gen_mask & BIT(7)))
 #define IS_GEN8(dev_priv)	(!!((dev_priv)->info.gen_mask & BIT(7)))
 #define IS_GEN9(dev_priv)	(!!((dev_priv)->info.gen_mask & BIT(8)))
 #define IS_GEN9(dev_priv)	(!!((dev_priv)->info.gen_mask & BIT(8)))
+#define IS_GEN10(dev_priv)	(!!((dev_priv)->info.gen_mask & BIT(9)))
 
 
 #define IS_LP(dev_priv)	(INTEL_INFO(dev_priv)->is_lp)
 #define IS_LP(dev_priv)	(INTEL_INFO(dev_priv)->is_lp)
 #define IS_GEN9_LP(dev_priv)	(IS_GEN9(dev_priv) && IS_LP(dev_priv))
 #define IS_GEN9_LP(dev_priv)	(IS_GEN9(dev_priv) && IS_LP(dev_priv))
@@ -2959,6 +2997,7 @@ intel_info(const struct drm_i915_private *dev_priv)
 #define HAS_POOLED_EU(dev_priv)	((dev_priv)->info.has_pooled_eu)
 #define HAS_POOLED_EU(dev_priv)	((dev_priv)->info.has_pooled_eu)
 
 
 #define INTEL_PCH_DEVICE_ID_MASK		0xff00
 #define INTEL_PCH_DEVICE_ID_MASK		0xff00
+#define INTEL_PCH_DEVICE_ID_MASK_EXT		0xff80
 #define INTEL_PCH_IBX_DEVICE_ID_TYPE		0x3b00
 #define INTEL_PCH_IBX_DEVICE_ID_TYPE		0x3b00
 #define INTEL_PCH_CPT_DEVICE_ID_TYPE		0x1c00
 #define INTEL_PCH_CPT_DEVICE_ID_TYPE		0x1c00
 #define INTEL_PCH_PPT_DEVICE_ID_TYPE		0x1e00
 #define INTEL_PCH_PPT_DEVICE_ID_TYPE		0x1e00
@@ -2967,11 +3006,16 @@ intel_info(const struct drm_i915_private *dev_priv)
 #define INTEL_PCH_SPT_DEVICE_ID_TYPE		0xA100
 #define INTEL_PCH_SPT_DEVICE_ID_TYPE		0xA100
 #define INTEL_PCH_SPT_LP_DEVICE_ID_TYPE		0x9D00
 #define INTEL_PCH_SPT_LP_DEVICE_ID_TYPE		0x9D00
 #define INTEL_PCH_KBP_DEVICE_ID_TYPE		0xA200
 #define INTEL_PCH_KBP_DEVICE_ID_TYPE		0xA200
+#define INTEL_PCH_CNP_DEVICE_ID_TYPE		0xA300
+#define INTEL_PCH_CNP_LP_DEVICE_ID_TYPE		0x9D80
 #define INTEL_PCH_P2X_DEVICE_ID_TYPE		0x7100
 #define INTEL_PCH_P2X_DEVICE_ID_TYPE		0x7100
 #define INTEL_PCH_P3X_DEVICE_ID_TYPE		0x7000
 #define INTEL_PCH_P3X_DEVICE_ID_TYPE		0x7000
 #define INTEL_PCH_QEMU_DEVICE_ID_TYPE		0x2900 /* qemu q35 has 2918 */
 #define INTEL_PCH_QEMU_DEVICE_ID_TYPE		0x2900 /* qemu q35 has 2918 */
 
 
 #define INTEL_PCH_TYPE(dev_priv) ((dev_priv)->pch_type)
 #define INTEL_PCH_TYPE(dev_priv) ((dev_priv)->pch_type)
+#define HAS_PCH_CNP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_CNP)
+#define HAS_PCH_CNP_LP(dev_priv) \
+	((dev_priv)->pch_id == INTEL_PCH_CNP_LP_DEVICE_ID_TYPE)
 #define HAS_PCH_KBP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_KBP)
 #define HAS_PCH_KBP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_KBP)
 #define HAS_PCH_SPT(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_SPT)
 #define HAS_PCH_SPT(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_SPT)
 #define HAS_PCH_LPT(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_LPT)
 #define HAS_PCH_LPT(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_LPT)
@@ -2986,7 +3030,7 @@ intel_info(const struct drm_i915_private *dev_priv)
 
 
 #define HAS_GMCH_DISPLAY(dev_priv) ((dev_priv)->info.has_gmch_display)
 #define HAS_GMCH_DISPLAY(dev_priv) ((dev_priv)->info.has_gmch_display)
 
 
-#define HAS_LSPCON(dev_priv) (IS_GEN9(dev_priv))
+#define HAS_LSPCON(dev_priv) (INTEL_GEN(dev_priv) >= 9)
 
 
 /* DPF == dynamic parity feature */
 /* DPF == dynamic parity feature */
 #define HAS_L3_DPF(dev_priv) ((dev_priv)->info.has_l3_dpf)
 #define HAS_L3_DPF(dev_priv) ((dev_priv)->info.has_l3_dpf)
@@ -2996,8 +3040,6 @@ intel_info(const struct drm_i915_private *dev_priv)
 #define GT_FREQUENCY_MULTIPLIER 50
 #define GT_FREQUENCY_MULTIPLIER 50
 #define GEN9_FREQ_SCALER 3
 #define GEN9_FREQ_SCALER 3
 
 
-#define HAS_DECOUPLED_MMIO(dev_priv) (INTEL_INFO(dev_priv)->has_decoupled_mmio)
-
 #include "i915_trace.h"
 #include "i915_trace.h"
 
 
 static inline bool intel_vtd_active(void)
 static inline bool intel_vtd_active(void)
@@ -3194,7 +3236,8 @@ int i915_gem_set_tiling_ioctl(struct drm_device *dev, void *data,
 			      struct drm_file *file_priv);
 			      struct drm_file *file_priv);
 int i915_gem_get_tiling_ioctl(struct drm_device *dev, void *data,
 int i915_gem_get_tiling_ioctl(struct drm_device *dev, void *data,
 			      struct drm_file *file_priv);
 			      struct drm_file *file_priv);
-void i915_gem_init_userptr(struct drm_i915_private *dev_priv);
+int i915_gem_init_userptr(struct drm_i915_private *dev_priv);
+void i915_gem_cleanup_userptr(struct drm_i915_private *dev_priv);
 int i915_gem_userptr_ioctl(struct drm_device *dev, void *data,
 int i915_gem_userptr_ioctl(struct drm_device *dev, void *data,
 			   struct drm_file *file);
 			   struct drm_file *file);
 int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
 int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
@@ -3534,6 +3577,9 @@ i915_gem_context_lookup_timeline(struct i915_gem_context *ctx,
 
 
 int i915_perf_open_ioctl(struct drm_device *dev, void *data,
 int i915_perf_open_ioctl(struct drm_device *dev, void *data,
 			 struct drm_file *file);
 			 struct drm_file *file);
+void i915_oa_init_reg_state(struct intel_engine_cs *engine,
+			    struct i915_gem_context *ctx,
+			    uint32_t *reg_state);
 
 
 /* i915_gem_evict.c */
 /* i915_gem_evict.c */
 int __must_check i915_gem_evict_something(struct i915_address_space *vm,
 int __must_check i915_gem_evict_something(struct i915_address_space *vm,
@@ -3544,7 +3590,7 @@ int __must_check i915_gem_evict_something(struct i915_address_space *vm,
 int __must_check i915_gem_evict_for_node(struct i915_address_space *vm,
 int __must_check i915_gem_evict_for_node(struct i915_address_space *vm,
 					 struct drm_mm_node *node,
 					 struct drm_mm_node *node,
 					 unsigned int flags);
 					 unsigned int flags);
-int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle);
+int i915_gem_evict_vm(struct i915_address_space *vm);
 
 
 /* belongs in i915_gem_gtt.h */
 /* belongs in i915_gem_gtt.h */
 static inline void i915_gem_chipset_flush(struct drm_i915_private *dev_priv)
 static inline void i915_gem_chipset_flush(struct drm_i915_private *dev_priv)

+ 102 - 62
drivers/gpu/drm/i915/i915_gem.c

@@ -49,10 +49,10 @@ static void i915_gem_flush_free_objects(struct drm_i915_private *i915);
 
 
 static bool cpu_write_needs_clflush(struct drm_i915_gem_object *obj)
 static bool cpu_write_needs_clflush(struct drm_i915_gem_object *obj)
 {
 {
-	if (obj->base.write_domain == I915_GEM_DOMAIN_CPU)
+	if (obj->cache_dirty)
 		return false;
 		return false;
 
 
-	if (!i915_gem_object_is_coherent(obj))
+	if (!obj->cache_coherent)
 		return true;
 		return true;
 
 
 	return obj->pin_display;
 	return obj->pin_display;
@@ -143,9 +143,9 @@ i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
 	struct i915_ggtt *ggtt = &dev_priv->ggtt;
 	struct i915_ggtt *ggtt = &dev_priv->ggtt;
 	struct drm_i915_gem_get_aperture *args = data;
 	struct drm_i915_gem_get_aperture *args = data;
 	struct i915_vma *vma;
 	struct i915_vma *vma;
-	size_t pinned;
+	u64 pinned;
 
 
-	pinned = 0;
+	pinned = ggtt->base.reserved;
 	mutex_lock(&dev->struct_mutex);
 	mutex_lock(&dev->struct_mutex);
 	list_for_each_entry(vma, &ggtt->base.active_list, vm_link)
 	list_for_each_entry(vma, &ggtt->base.active_list, vm_link)
 		if (i915_vma_is_pinned(vma))
 		if (i915_vma_is_pinned(vma))
@@ -233,6 +233,14 @@ err_phys:
 	return st;
 	return st;
 }
 }
 
 
+static void __start_cpu_write(struct drm_i915_gem_object *obj)
+{
+	obj->base.read_domains = I915_GEM_DOMAIN_CPU;
+	obj->base.write_domain = I915_GEM_DOMAIN_CPU;
+	if (cpu_write_needs_clflush(obj))
+		obj->cache_dirty = true;
+}
+
 static void
 static void
 __i915_gem_object_release_shmem(struct drm_i915_gem_object *obj,
 __i915_gem_object_release_shmem(struct drm_i915_gem_object *obj,
 				struct sg_table *pages,
 				struct sg_table *pages,
@@ -245,11 +253,10 @@ __i915_gem_object_release_shmem(struct drm_i915_gem_object *obj,
 
 
 	if (needs_clflush &&
 	if (needs_clflush &&
 	    (obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0 &&
 	    (obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0 &&
-	    !i915_gem_object_is_coherent(obj))
+	    !obj->cache_coherent)
 		drm_clflush_sg(pages);
 		drm_clflush_sg(pages);
 
 
-	obj->base.read_domains = I915_GEM_DOMAIN_CPU;
-	obj->base.write_domain = I915_GEM_DOMAIN_CPU;
+	__start_cpu_write(obj);
 }
 }
 
 
 static void
 static void
@@ -684,6 +691,12 @@ i915_gem_dumb_create(struct drm_file *file,
 			       args->size, &args->handle);
 			       args->size, &args->handle);
 }
 }
 
 
+static bool gpu_write_needs_clflush(struct drm_i915_gem_object *obj)
+{
+	return !(obj->cache_level == I915_CACHE_NONE ||
+		 obj->cache_level == I915_CACHE_WT);
+}
+
 /**
 /**
  * Creates a new mm object and returns a handle to it.
  * Creates a new mm object and returns a handle to it.
  * @dev: drm device pointer
  * @dev: drm device pointer
@@ -753,6 +766,11 @@ flush_write_domain(struct drm_i915_gem_object *obj, unsigned int flush_domains)
 	case I915_GEM_DOMAIN_CPU:
 	case I915_GEM_DOMAIN_CPU:
 		i915_gem_clflush_object(obj, I915_CLFLUSH_SYNC);
 		i915_gem_clflush_object(obj, I915_CLFLUSH_SYNC);
 		break;
 		break;
+
+	case I915_GEM_DOMAIN_RENDER:
+		if (gpu_write_needs_clflush(obj))
+			obj->cache_dirty = true;
+		break;
 	}
 	}
 
 
 	obj->base.write_domain = 0;
 	obj->base.write_domain = 0;
@@ -838,8 +856,7 @@ int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj,
 	if (ret)
 	if (ret)
 		return ret;
 		return ret;
 
 
-	if (i915_gem_object_is_coherent(obj) ||
-	    !static_cpu_has(X86_FEATURE_CLFLUSH)) {
+	if (obj->cache_coherent || !static_cpu_has(X86_FEATURE_CLFLUSH)) {
 		ret = i915_gem_object_set_to_cpu_domain(obj, false);
 		ret = i915_gem_object_set_to_cpu_domain(obj, false);
 		if (ret)
 		if (ret)
 			goto err_unpin;
 			goto err_unpin;
@@ -854,7 +871,8 @@ int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj,
 	 * optimizes for the case when the gpu will dirty the data
 	 * optimizes for the case when the gpu will dirty the data
 	 * anyway again before the next pread happens.
 	 * anyway again before the next pread happens.
 	 */
 	 */
-	if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU))
+	if (!obj->cache_dirty &&
+	    !(obj->base.read_domains & I915_GEM_DOMAIN_CPU))
 		*needs_clflush = CLFLUSH_BEFORE;
 		*needs_clflush = CLFLUSH_BEFORE;
 
 
 out:
 out:
@@ -890,8 +908,7 @@ int i915_gem_obj_prepare_shmem_write(struct drm_i915_gem_object *obj,
 	if (ret)
 	if (ret)
 		return ret;
 		return ret;
 
 
-	if (i915_gem_object_is_coherent(obj) ||
-	    !static_cpu_has(X86_FEATURE_CLFLUSH)) {
+	if (obj->cache_coherent || !static_cpu_has(X86_FEATURE_CLFLUSH)) {
 		ret = i915_gem_object_set_to_cpu_domain(obj, true);
 		ret = i915_gem_object_set_to_cpu_domain(obj, true);
 		if (ret)
 		if (ret)
 			goto err_unpin;
 			goto err_unpin;
@@ -906,14 +923,16 @@ int i915_gem_obj_prepare_shmem_write(struct drm_i915_gem_object *obj,
 	 * This optimizes for the case when the gpu will use the data
 	 * This optimizes for the case when the gpu will use the data
 	 * right away and we therefore have to clflush anyway.
 	 * right away and we therefore have to clflush anyway.
 	 */
 	 */
-	if (obj->base.write_domain != I915_GEM_DOMAIN_CPU)
+	if (!obj->cache_dirty) {
 		*needs_clflush |= CLFLUSH_AFTER;
 		*needs_clflush |= CLFLUSH_AFTER;
 
 
-	/* Same trick applies to invalidate partially written cachelines read
-	 * before writing.
-	 */
-	if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU))
-		*needs_clflush |= CLFLUSH_BEFORE;
+		/*
+		 * Same trick applies to invalidate partially written
+		 * cachelines read before writing.
+		 */
+		if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU))
+			*needs_clflush |= CLFLUSH_BEFORE;
+	}
 
 
 out:
 out:
 	intel_fb_obj_invalidate(obj, ORIGIN_CPU);
 	intel_fb_obj_invalidate(obj, ORIGIN_CPU);
@@ -2337,8 +2356,8 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
 	struct page *page;
 	struct page *page;
 	unsigned long last_pfn = 0;	/* suppress gcc warning */
 	unsigned long last_pfn = 0;	/* suppress gcc warning */
 	unsigned int max_segment;
 	unsigned int max_segment;
+	gfp_t noreclaim;
 	int ret;
 	int ret;
-	gfp_t gfp;
 
 
 	/* Assert that the object is not currently in any GPU domain. As it
 	/* Assert that the object is not currently in any GPU domain. As it
 	 * wasn't in the GTT, there shouldn't be any way it could have been in
 	 * wasn't in the GTT, there shouldn't be any way it could have been in
@@ -2367,22 +2386,30 @@ rebuild_st:
 	 * Fail silently without starting the shrinker
 	 * Fail silently without starting the shrinker
 	 */
 	 */
 	mapping = obj->base.filp->f_mapping;
 	mapping = obj->base.filp->f_mapping;
-	gfp = mapping_gfp_constraint(mapping, ~(__GFP_IO | __GFP_RECLAIM));
-	gfp |= __GFP_NORETRY | __GFP_NOWARN;
+	noreclaim = mapping_gfp_constraint(mapping, ~__GFP_RECLAIM);
+	noreclaim |= __GFP_NORETRY | __GFP_NOWARN;
+
 	sg = st->sgl;
 	sg = st->sgl;
 	st->nents = 0;
 	st->nents = 0;
 	for (i = 0; i < page_count; i++) {
 	for (i = 0; i < page_count; i++) {
-		page = shmem_read_mapping_page_gfp(mapping, i, gfp);
-		if (unlikely(IS_ERR(page))) {
-			i915_gem_shrink(dev_priv,
-					page_count,
-					I915_SHRINK_BOUND |
-					I915_SHRINK_UNBOUND |
-					I915_SHRINK_PURGEABLE);
+		const unsigned int shrink[] = {
+			I915_SHRINK_BOUND | I915_SHRINK_UNBOUND | I915_SHRINK_PURGEABLE,
+			0,
+		}, *s = shrink;
+		gfp_t gfp = noreclaim;
+
+		do {
 			page = shmem_read_mapping_page_gfp(mapping, i, gfp);
 			page = shmem_read_mapping_page_gfp(mapping, i, gfp);
-		}
-		if (unlikely(IS_ERR(page))) {
-			gfp_t reclaim;
+			if (likely(!IS_ERR(page)))
+				break;
+
+			if (!*s) {
+				ret = PTR_ERR(page);
+				goto err_sg;
+			}
+
+			i915_gem_shrink(dev_priv, 2 * page_count, *s++);
+			cond_resched();
 
 
 			/* We've tried hard to allocate the memory by reaping
 			/* We've tried hard to allocate the memory by reaping
 			 * our own buffer, now let the real VM do its job and
 			 * our own buffer, now let the real VM do its job and
@@ -2392,15 +2419,26 @@ rebuild_st:
 			 * defer the oom here by reporting the ENOMEM back
 			 * defer the oom here by reporting the ENOMEM back
 			 * to userspace.
 			 * to userspace.
 			 */
 			 */
-			reclaim = mapping_gfp_mask(mapping);
-			reclaim |= __GFP_NORETRY; /* reclaim, but no oom */
-
-			page = shmem_read_mapping_page_gfp(mapping, i, reclaim);
-			if (IS_ERR(page)) {
-				ret = PTR_ERR(page);
-				goto err_sg;
+			if (!*s) {
+				/* reclaim and warn, but no oom */
+				gfp = mapping_gfp_mask(mapping);
+
+				/* Our bo are always dirty and so we require
+				 * kswapd to reclaim our pages (direct reclaim
+				 * does not effectively begin pageout of our
+				 * buffers on its own). However, direct reclaim
+				 * only waits for kswapd when under allocation
+				 * congestion. So as a result __GFP_RECLAIM is
+				 * unreliable and fails to actually reclaim our
+				 * dirty pages -- unless you try over and over
+				 * again with !__GFP_NORETRY. However, we still
+				 * want to fail this allocation rather than
+				 * trigger the out-of-memory killer and for
+				 * this we want the future __GFP_MAYFAIL.
+				 */
 			}
 			}
-		}
+		} while (1);
+
 		if (!i ||
 		if (!i ||
 		    sg->length >= max_segment ||
 		    sg->length >= max_segment ||
 		    page_to_pfn(page) != last_pfn + 1) {
 		    page_to_pfn(page) != last_pfn + 1) {
@@ -3223,6 +3261,10 @@ void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file)
 		if (vma->vm->file == fpriv)
 		if (vma->vm->file == fpriv)
 			i915_vma_close(vma);
 			i915_vma_close(vma);
 
 
+	vma = obj->vma_hashed;
+	if (vma && vma->ctx->file_priv == fpriv)
+		i915_vma_unlink_ctx(vma);
+
 	if (i915_gem_object_is_active(obj) &&
 	if (i915_gem_object_is_active(obj) &&
 	    !i915_gem_object_has_active_reference(obj)) {
 	    !i915_gem_object_has_active_reference(obj)) {
 		i915_gem_object_set_active_reference(obj);
 		i915_gem_object_set_active_reference(obj);
@@ -3376,10 +3418,13 @@ int i915_gem_wait_for_idle(struct drm_i915_private *i915, unsigned int flags)
 
 
 static void __i915_gem_object_flush_for_display(struct drm_i915_gem_object *obj)
 static void __i915_gem_object_flush_for_display(struct drm_i915_gem_object *obj)
 {
 {
-	if (obj->base.write_domain != I915_GEM_DOMAIN_CPU && !obj->cache_dirty)
-		return;
-
-	i915_gem_clflush_object(obj, I915_CLFLUSH_FORCE);
+	/*
+	 * We manually flush the CPU domain so that we can override and
+	 * force the flush for the display, and perform it asyncrhonously.
+	 */
+	flush_write_domain(obj, ~I915_GEM_DOMAIN_CPU);
+	if (obj->cache_dirty)
+		i915_gem_clflush_object(obj, I915_CLFLUSH_FORCE);
 	obj->base.write_domain = 0;
 	obj->base.write_domain = 0;
 }
 }
 
 
@@ -3638,13 +3683,11 @@ restart:
 		}
 		}
 	}
 	}
 
 
-	if (obj->base.write_domain == I915_GEM_DOMAIN_CPU &&
-	    i915_gem_object_is_coherent(obj))
-		obj->cache_dirty = true;
-
 	list_for_each_entry(vma, &obj->vma_list, obj_link)
 	list_for_each_entry(vma, &obj->vma_list, obj_link)
 		vma->node.color = cache_level;
 		vma->node.color = cache_level;
 	obj->cache_level = cache_level;
 	obj->cache_level = cache_level;
+	obj->cache_coherent = i915_gem_object_is_coherent(obj);
+	obj->cache_dirty = true; /* Always invalidate stale cachelines */
 
 
 	return 0;
 	return 0;
 }
 }
@@ -3866,9 +3909,6 @@ i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
 	if (ret)
 	if (ret)
 		return ret;
 		return ret;
 
 
-	if (obj->base.write_domain == I915_GEM_DOMAIN_CPU)
-		return 0;
-
 	flush_write_domain(obj, ~I915_GEM_DOMAIN_CPU);
 	flush_write_domain(obj, ~I915_GEM_DOMAIN_CPU);
 
 
 	/* Flush the CPU cache if it's still invalid. */
 	/* Flush the CPU cache if it's still invalid. */
@@ -3880,15 +3920,13 @@ i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
 	/* It should now be out of any other write domains, and we can update
 	/* It should now be out of any other write domains, and we can update
 	 * the domain values for our changes.
 	 * the domain values for our changes.
 	 */
 	 */
-	GEM_BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
+	GEM_BUG_ON(obj->base.write_domain & ~I915_GEM_DOMAIN_CPU);
 
 
 	/* If we're writing through the CPU, then the GPU read domains will
 	/* If we're writing through the CPU, then the GPU read domains will
 	 * need to be invalidated at next use.
 	 * need to be invalidated at next use.
 	 */
 	 */
-	if (write) {
-		obj->base.read_domains = I915_GEM_DOMAIN_CPU;
-		obj->base.write_domain = I915_GEM_DOMAIN_CPU;
-	}
+	if (write)
+		__start_cpu_write(obj);
 
 
 	return 0;
 	return 0;
 }
 }
@@ -4220,7 +4258,6 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj,
 
 
 	INIT_LIST_HEAD(&obj->global_link);
 	INIT_LIST_HEAD(&obj->global_link);
 	INIT_LIST_HEAD(&obj->userfault_link);
 	INIT_LIST_HEAD(&obj->userfault_link);
-	INIT_LIST_HEAD(&obj->obj_exec_link);
 	INIT_LIST_HEAD(&obj->vma_list);
 	INIT_LIST_HEAD(&obj->vma_list);
 	INIT_LIST_HEAD(&obj->batch_pool_link);
 	INIT_LIST_HEAD(&obj->batch_pool_link);
 
 
@@ -4285,6 +4322,7 @@ i915_gem_object_create(struct drm_i915_private *dev_priv, u64 size)
 
 
 	mapping = obj->base.filp->f_mapping;
 	mapping = obj->base.filp->f_mapping;
 	mapping_set_gfp_mask(mapping, mask);
 	mapping_set_gfp_mask(mapping, mask);
+	GEM_BUG_ON(!(mapping_gfp_mask(mapping) & __GFP_RECLAIM));
 
 
 	i915_gem_object_init(obj, &i915_gem_object_ops);
 	i915_gem_object_init(obj, &i915_gem_object_ops);
 
 
@@ -4308,6 +4346,9 @@ i915_gem_object_create(struct drm_i915_private *dev_priv, u64 size)
 	} else
 	} else
 		obj->cache_level = I915_CACHE_NONE;
 		obj->cache_level = I915_CACHE_NONE;
 
 
+	obj->cache_coherent = i915_gem_object_is_coherent(obj);
+	obj->cache_dirty = !obj->cache_coherent;
+
 	trace_i915_gem_object_create(obj);
 	trace_i915_gem_object_create(obj);
 
 
 	return obj;
 	return obj;
@@ -4356,7 +4397,6 @@ static void __i915_gem_free_objects(struct drm_i915_private *i915,
 		GEM_BUG_ON(i915_gem_object_is_active(obj));
 		GEM_BUG_ON(i915_gem_object_is_active(obj));
 		list_for_each_entry_safe(vma, vn,
 		list_for_each_entry_safe(vma, vn,
 					 &obj->vma_list, obj_link) {
 					 &obj->vma_list, obj_link) {
-			GEM_BUG_ON(!i915_vma_is_ggtt(vma));
 			GEM_BUG_ON(i915_vma_is_active(vma));
 			GEM_BUG_ON(i915_vma_is_active(vma));
 			vma->flags &= ~I915_VMA_PIN_MASK;
 			vma->flags &= ~I915_VMA_PIN_MASK;
 			i915_vma_close(vma);
 			i915_vma_close(vma);
@@ -4763,7 +4803,9 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
 	 */
 	 */
 	intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
 	intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
 
 
-	i915_gem_init_userptr(dev_priv);
+	ret = i915_gem_init_userptr(dev_priv);
+	if (ret)
+		goto out_unlock;
 
 
 	ret = i915_gem_init_ggtt(dev_priv);
 	ret = i915_gem_init_ggtt(dev_priv);
 	if (ret)
 	if (ret)
@@ -4974,10 +5016,8 @@ int i915_gem_freeze_late(struct drm_i915_private *dev_priv)
 
 
 	mutex_lock(&dev_priv->drm.struct_mutex);
 	mutex_lock(&dev_priv->drm.struct_mutex);
 	for (p = phases; *p; p++) {
 	for (p = phases; *p; p++) {
-		list_for_each_entry(obj, *p, global_link) {
-			obj->base.read_domains = I915_GEM_DOMAIN_CPU;
-			obj->base.write_domain = I915_GEM_DOMAIN_CPU;
-		}
+		list_for_each_entry(obj, *p, global_link)
+			__start_cpu_write(obj);
 	}
 	}
 	mutex_unlock(&dev_priv->drm.struct_mutex);
 	mutex_unlock(&dev_priv->drm.struct_mutex);
 
 

+ 17 - 2
drivers/gpu/drm/i915/i915_gem_batch_pool.c

@@ -114,12 +114,27 @@ i915_gem_batch_pool_get(struct i915_gem_batch_pool *pool,
 	list_for_each_entry(obj, list, batch_pool_link) {
 	list_for_each_entry(obj, list, batch_pool_link) {
 		/* The batches are strictly LRU ordered */
 		/* The batches are strictly LRU ordered */
 		if (i915_gem_object_is_active(obj)) {
 		if (i915_gem_object_is_active(obj)) {
-			if (!reservation_object_test_signaled_rcu(obj->resv,
-								  true))
+			struct reservation_object *resv = obj->resv;
+
+			if (!reservation_object_test_signaled_rcu(resv, true))
 				break;
 				break;
 
 
 			i915_gem_retire_requests(pool->engine->i915);
 			i915_gem_retire_requests(pool->engine->i915);
 			GEM_BUG_ON(i915_gem_object_is_active(obj));
 			GEM_BUG_ON(i915_gem_object_is_active(obj));
+
+			/*
+			 * The object is now idle, clear the array of shared
+			 * fences before we add a new request. Although, we
+			 * remain on the same engine, we may be on a different
+			 * timeline and so may continually grow the array,
+			 * trapping a reference to all the old fences, rather
+			 * than replace the existing fence.
+			 */
+			if (rcu_access_pointer(resv->fence)) {
+				reservation_object_lock(resv, NULL);
+				reservation_object_add_excl_fence(resv, NULL);
+				reservation_object_unlock(resv);
+			}
 		}
 		}
 
 
 		GEM_BUG_ON(!reservation_object_test_signaled_rcu(obj->resv,
 		GEM_BUG_ON(!reservation_object_test_signaled_rcu(obj->resv,

+ 8 - 9
drivers/gpu/drm/i915/i915_gem_clflush.c

@@ -71,8 +71,6 @@ static const struct dma_fence_ops i915_clflush_ops = {
 static void __i915_do_clflush(struct drm_i915_gem_object *obj)
 static void __i915_do_clflush(struct drm_i915_gem_object *obj)
 {
 {
 	drm_clflush_sg(obj->mm.pages);
 	drm_clflush_sg(obj->mm.pages);
-	obj->cache_dirty = false;
-
 	intel_fb_obj_flush(obj, ORIGIN_CPU);
 	intel_fb_obj_flush(obj, ORIGIN_CPU);
 }
 }
 
 
@@ -81,9 +79,6 @@ static void i915_clflush_work(struct work_struct *work)
 	struct clflush *clflush = container_of(work, typeof(*clflush), work);
 	struct clflush *clflush = container_of(work, typeof(*clflush), work);
 	struct drm_i915_gem_object *obj = clflush->obj;
 	struct drm_i915_gem_object *obj = clflush->obj;
 
 
-	if (!obj->cache_dirty)
-		goto out;
-
 	if (i915_gem_object_pin_pages(obj)) {
 	if (i915_gem_object_pin_pages(obj)) {
 		DRM_ERROR("Failed to acquire obj->pages for clflushing\n");
 		DRM_ERROR("Failed to acquire obj->pages for clflushing\n");
 		goto out;
 		goto out;
@@ -131,10 +126,10 @@ void i915_gem_clflush_object(struct drm_i915_gem_object *obj,
 	 * anything not backed by physical memory we consider to be always
 	 * anything not backed by physical memory we consider to be always
 	 * coherent and not need clflushing.
 	 * coherent and not need clflushing.
 	 */
 	 */
-	if (!i915_gem_object_has_struct_page(obj))
+	if (!i915_gem_object_has_struct_page(obj)) {
+		obj->cache_dirty = false;
 		return;
 		return;
-
-	obj->cache_dirty = true;
+	}
 
 
 	/* If the GPU is snooping the contents of the CPU cache,
 	/* If the GPU is snooping the contents of the CPU cache,
 	 * we do not need to manually clear the CPU cache lines.  However,
 	 * we do not need to manually clear the CPU cache lines.  However,
@@ -144,7 +139,7 @@ void i915_gem_clflush_object(struct drm_i915_gem_object *obj,
 	 * snooping behaviour occurs naturally as the result of our domain
 	 * snooping behaviour occurs naturally as the result of our domain
 	 * tracking.
 	 * tracking.
 	 */
 	 */
-	if (!(flags & I915_CLFLUSH_FORCE) && i915_gem_object_is_coherent(obj))
+	if (!(flags & I915_CLFLUSH_FORCE) && obj->cache_coherent)
 		return;
 		return;
 
 
 	trace_i915_gem_object_clflush(obj);
 	trace_i915_gem_object_clflush(obj);
@@ -153,6 +148,8 @@ void i915_gem_clflush_object(struct drm_i915_gem_object *obj,
 	if (!(flags & I915_CLFLUSH_SYNC))
 	if (!(flags & I915_CLFLUSH_SYNC))
 		clflush = kmalloc(sizeof(*clflush), GFP_KERNEL);
 		clflush = kmalloc(sizeof(*clflush), GFP_KERNEL);
 	if (clflush) {
 	if (clflush) {
+		GEM_BUG_ON(!obj->cache_dirty);
+
 		dma_fence_init(&clflush->dma,
 		dma_fence_init(&clflush->dma,
 			       &i915_clflush_ops,
 			       &i915_clflush_ops,
 			       &clflush_lock,
 			       &clflush_lock,
@@ -180,4 +177,6 @@ void i915_gem_clflush_object(struct drm_i915_gem_object *obj,
 	} else {
 	} else {
 		GEM_BUG_ON(obj->base.write_domain != I915_GEM_DOMAIN_CPU);
 		GEM_BUG_ON(obj->base.write_domain != I915_GEM_DOMAIN_CPU);
 	}
 	}
+
+	obj->cache_dirty = false;
 }
 }

+ 82 - 4
drivers/gpu/drm/i915/i915_gem_context.c

@@ -85,6 +85,7 @@
  *
  *
  */
  */
 
 
+#include <linux/log2.h>
 #include <drm/drmP.h>
 #include <drm/drmP.h>
 #include <drm/i915_drm.h>
 #include <drm/i915_drm.h>
 #include "i915_drv.h"
 #include "i915_drv.h"
@@ -92,6 +93,71 @@
 
 
 #define ALL_L3_SLICES(dev) (1 << NUM_L3_SLICES(dev)) - 1
 #define ALL_L3_SLICES(dev) (1 << NUM_L3_SLICES(dev)) - 1
 
 
+/* Initial size (as log2) to preallocate the handle->object hashtable */
+#define VMA_HT_BITS 2u /* 4 x 2 pointers, 64 bytes minimum */
+
+static void resize_vma_ht(struct work_struct *work)
+{
+	struct i915_gem_context_vma_lut *lut =
+		container_of(work, typeof(*lut), resize);
+	unsigned int bits, new_bits, size, i;
+	struct hlist_head *new_ht;
+
+	GEM_BUG_ON(!(lut->ht_size & I915_CTX_RESIZE_IN_PROGRESS));
+
+	bits = 1 + ilog2(4*lut->ht_count/3 + 1);
+	new_bits = min_t(unsigned int,
+			 max(bits, VMA_HT_BITS),
+			 sizeof(unsigned int) * BITS_PER_BYTE - 1);
+	if (new_bits == lut->ht_bits)
+		goto out;
+
+	new_ht = kzalloc(sizeof(*new_ht)<<new_bits, GFP_KERNEL | __GFP_NOWARN);
+	if (!new_ht)
+		new_ht = vzalloc(sizeof(*new_ht)<<new_bits);
+	if (!new_ht)
+		/* Pretend resize succeeded and stop calling us for a bit! */
+		goto out;
+
+	size = BIT(lut->ht_bits);
+	for (i = 0; i < size; i++) {
+		struct i915_vma *vma;
+		struct hlist_node *tmp;
+
+		hlist_for_each_entry_safe(vma, tmp, &lut->ht[i], ctx_node)
+			hlist_add_head(&vma->ctx_node,
+				       &new_ht[hash_32(vma->ctx_handle,
+						       new_bits)]);
+	}
+	kvfree(lut->ht);
+	lut->ht = new_ht;
+	lut->ht_bits = new_bits;
+out:
+	smp_store_release(&lut->ht_size, BIT(bits));
+	GEM_BUG_ON(lut->ht_size & I915_CTX_RESIZE_IN_PROGRESS);
+}
+
+static void vma_lut_free(struct i915_gem_context *ctx)
+{
+	struct i915_gem_context_vma_lut *lut = &ctx->vma_lut;
+	unsigned int i, size;
+
+	if (lut->ht_size & I915_CTX_RESIZE_IN_PROGRESS)
+		cancel_work_sync(&lut->resize);
+
+	size = BIT(lut->ht_bits);
+	for (i = 0; i < size; i++) {
+		struct i915_vma *vma;
+
+		hlist_for_each_entry(vma, &lut->ht[i], ctx_node) {
+			vma->obj->vma_hashed = NULL;
+			vma->ctx = NULL;
+			i915_vma_put(vma);
+		}
+	}
+	kvfree(lut->ht);
+}
+
 void i915_gem_context_free(struct kref *ctx_ref)
 void i915_gem_context_free(struct kref *ctx_ref)
 {
 {
 	struct i915_gem_context *ctx = container_of(ctx_ref, typeof(*ctx), ref);
 	struct i915_gem_context *ctx = container_of(ctx_ref, typeof(*ctx), ref);
@@ -101,6 +167,7 @@ void i915_gem_context_free(struct kref *ctx_ref)
 	trace_i915_context_free(ctx);
 	trace_i915_context_free(ctx);
 	GEM_BUG_ON(!i915_gem_context_is_closed(ctx));
 	GEM_BUG_ON(!i915_gem_context_is_closed(ctx));
 
 
+	vma_lut_free(ctx);
 	i915_ppgtt_put(ctx->ppgtt);
 	i915_ppgtt_put(ctx->ppgtt);
 
 
 	for (i = 0; i < I915_NUM_ENGINES; i++) {
 	for (i = 0; i < I915_NUM_ENGINES; i++) {
@@ -118,6 +185,7 @@ void i915_gem_context_free(struct kref *ctx_ref)
 
 
 	kfree(ctx->name);
 	kfree(ctx->name);
 	put_pid(ctx->pid);
 	put_pid(ctx->pid);
+
 	list_del(&ctx->link);
 	list_del(&ctx->link);
 
 
 	ida_simple_remove(&ctx->i915->context_hw_ida, ctx->hw_id);
 	ida_simple_remove(&ctx->i915->context_hw_ida, ctx->hw_id);
@@ -201,13 +269,24 @@ __create_hw_context(struct drm_i915_private *dev_priv,
 	ctx->i915 = dev_priv;
 	ctx->i915 = dev_priv;
 	ctx->priority = I915_PRIORITY_NORMAL;
 	ctx->priority = I915_PRIORITY_NORMAL;
 
 
+	ctx->vma_lut.ht_bits = VMA_HT_BITS;
+	ctx->vma_lut.ht_size = BIT(VMA_HT_BITS);
+	BUILD_BUG_ON(BIT(VMA_HT_BITS) == I915_CTX_RESIZE_IN_PROGRESS);
+	ctx->vma_lut.ht = kcalloc(ctx->vma_lut.ht_size,
+				  sizeof(*ctx->vma_lut.ht),
+				  GFP_KERNEL);
+	if (!ctx->vma_lut.ht)
+		goto err_out;
+
+	INIT_WORK(&ctx->vma_lut.resize, resize_vma_ht);
+
 	/* Default context will never have a file_priv */
 	/* Default context will never have a file_priv */
 	ret = DEFAULT_CONTEXT_HANDLE;
 	ret = DEFAULT_CONTEXT_HANDLE;
 	if (file_priv) {
 	if (file_priv) {
 		ret = idr_alloc(&file_priv->context_idr, ctx,
 		ret = idr_alloc(&file_priv->context_idr, ctx,
 				DEFAULT_CONTEXT_HANDLE, 0, GFP_KERNEL);
 				DEFAULT_CONTEXT_HANDLE, 0, GFP_KERNEL);
 		if (ret < 0)
 		if (ret < 0)
-			goto err_out;
+			goto err_lut;
 	}
 	}
 	ctx->user_handle = ret;
 	ctx->user_handle = ret;
 
 
@@ -248,6 +327,8 @@ __create_hw_context(struct drm_i915_private *dev_priv,
 err_pid:
 err_pid:
 	put_pid(ctx->pid);
 	put_pid(ctx->pid);
 	idr_remove(&file_priv->context_idr, ctx->user_handle);
 	idr_remove(&file_priv->context_idr, ctx->user_handle);
+err_lut:
+	kvfree(ctx->vma_lut.ht);
 err_out:
 err_out:
 	context_close(ctx);
 	context_close(ctx);
 	return ERR_PTR(ret);
 	return ERR_PTR(ret);
@@ -1034,9 +1115,6 @@ int i915_gem_context_reset_stats_ioctl(struct drm_device *dev,
 	if (args->flags || args->pad)
 	if (args->flags || args->pad)
 		return -EINVAL;
 		return -EINVAL;
 
 
-	if (args->ctx_id == DEFAULT_CONTEXT_HANDLE && !capable(CAP_SYS_ADMIN))
-		return -EPERM;
-
 	ret = i915_mutex_lock_interruptible(dev);
 	ret = i915_mutex_lock_interruptible(dev);
 	if (ret)
 	if (ret)
 		return ret;
 		return ret;

+ 26 - 0
drivers/gpu/drm/i915/i915_gem_context.h

@@ -143,6 +143,32 @@ struct i915_gem_context {
 	/** ggtt_offset_bias: placement restriction for context objects */
 	/** ggtt_offset_bias: placement restriction for context objects */
 	u32 ggtt_offset_bias;
 	u32 ggtt_offset_bias;
 
 
+	struct i915_gem_context_vma_lut {
+		/** ht_size: last request size to allocate the hashtable for. */
+		unsigned int ht_size;
+#define I915_CTX_RESIZE_IN_PROGRESS BIT(0)
+		/** ht_bits: real log2(size) of hashtable. */
+		unsigned int ht_bits;
+		/** ht_count: current number of entries inside the hashtable */
+		unsigned int ht_count;
+
+		/** ht: the array of buckets comprising the simple hashtable */
+		struct hlist_head *ht;
+
+		/**
+		 * resize: After an execbuf completes, we check the load factor
+		 * of the hashtable. If the hashtable is too full, or too empty,
+		 * we schedule a task to resize the hashtable. During the
+		 * resize, the entries are moved between different buckets and
+		 * so we cannot simultaneously read the hashtable as it is
+		 * being resized (unlike rhashtable). Therefore we treat the
+		 * active work as a strong barrier, pausing a subsequent
+		 * execbuf to wait for the resize worker to complete, if
+		 * required.
+		 */
+		struct work_struct resize;
+	} vma_lut;
+
 	/** engine: per-engine logical HW state */
 	/** engine: per-engine logical HW state */
 	struct intel_context {
 	struct intel_context {
 		struct i915_vma *state;
 		struct i915_vma *state;

+ 65 - 54
drivers/gpu/drm/i915/i915_gem_evict.c

@@ -50,6 +50,29 @@ static bool ggtt_is_idle(struct drm_i915_private *dev_priv)
 	return true;
 	return true;
 }
 }
 
 
+static int ggtt_flush(struct drm_i915_private *i915)
+{
+	int err;
+
+	/* Not everything in the GGTT is tracked via vma (otherwise we
+	 * could evict as required with minimal stalling) so we are forced
+	 * to idle the GPU and explicitly retire outstanding requests in
+	 * the hopes that we can then remove contexts and the like only
+	 * bound by their active reference.
+	 */
+	err = i915_gem_switch_to_kernel_context(i915);
+	if (err)
+		return err;
+
+	err = i915_gem_wait_for_idle(i915,
+				     I915_WAIT_INTERRUPTIBLE |
+				     I915_WAIT_LOCKED);
+	if (err)
+		return err;
+
+	return 0;
+}
+
 static bool
 static bool
 mark_free(struct drm_mm_scan *scan,
 mark_free(struct drm_mm_scan *scan,
 	  struct i915_vma *vma,
 	  struct i915_vma *vma,
@@ -59,13 +82,10 @@ mark_free(struct drm_mm_scan *scan,
 	if (i915_vma_is_pinned(vma))
 	if (i915_vma_is_pinned(vma))
 		return false;
 		return false;
 
 
-	if (WARN_ON(!list_empty(&vma->exec_list)))
-		return false;
-
 	if (flags & PIN_NONFAULT && !list_empty(&vma->obj->userfault_link))
 	if (flags & PIN_NONFAULT && !list_empty(&vma->obj->userfault_link))
 		return false;
 		return false;
 
 
-	list_add(&vma->exec_list, unwind);
+	list_add(&vma->evict_link, unwind);
 	return drm_mm_scan_add_block(scan, &vma->node);
 	return drm_mm_scan_add_block(scan, &vma->node);
 }
 }
 
 
@@ -157,11 +177,9 @@ search_again:
 	} while (*++phase);
 	} while (*++phase);
 
 
 	/* Nothing found, clean up and bail out! */
 	/* Nothing found, clean up and bail out! */
-	list_for_each_entry_safe(vma, next, &eviction_list, exec_list) {
+	list_for_each_entry_safe(vma, next, &eviction_list, evict_link) {
 		ret = drm_mm_scan_remove_block(&scan, &vma->node);
 		ret = drm_mm_scan_remove_block(&scan, &vma->node);
 		BUG_ON(ret);
 		BUG_ON(ret);
-
-		INIT_LIST_HEAD(&vma->exec_list);
 	}
 	}
 
 
 	/* Can we unpin some objects such as idle hw contents,
 	/* Can we unpin some objects such as idle hw contents,
@@ -180,19 +198,7 @@ search_again:
 		return intel_has_pending_fb_unpin(dev_priv) ? -EAGAIN : -ENOSPC;
 		return intel_has_pending_fb_unpin(dev_priv) ? -EAGAIN : -ENOSPC;
 	}
 	}
 
 
-	/* Not everything in the GGTT is tracked via vma (otherwise we
-	 * could evict as required with minimal stalling) so we are forced
-	 * to idle the GPU and explicitly retire outstanding requests in
-	 * the hopes that we can then remove contexts and the like only
-	 * bound by their active reference.
-	 */
-	ret = i915_gem_switch_to_kernel_context(dev_priv);
-	if (ret)
-		return ret;
-
-	ret = i915_gem_wait_for_idle(dev_priv,
-				     I915_WAIT_INTERRUPTIBLE |
-				     I915_WAIT_LOCKED);
+	ret = ggtt_flush(dev_priv);
 	if (ret)
 	if (ret)
 		return ret;
 		return ret;
 
 
@@ -205,21 +211,16 @@ found:
 	 * calling unbind (which may remove the active reference
 	 * calling unbind (which may remove the active reference
 	 * of any of our objects, thus corrupting the list).
 	 * of any of our objects, thus corrupting the list).
 	 */
 	 */
-	list_for_each_entry_safe(vma, next, &eviction_list, exec_list) {
+	list_for_each_entry_safe(vma, next, &eviction_list, evict_link) {
 		if (drm_mm_scan_remove_block(&scan, &vma->node))
 		if (drm_mm_scan_remove_block(&scan, &vma->node))
 			__i915_vma_pin(vma);
 			__i915_vma_pin(vma);
 		else
 		else
-			list_del_init(&vma->exec_list);
+			list_del(&vma->evict_link);
 	}
 	}
 
 
 	/* Unbinding will emit any required flushes */
 	/* Unbinding will emit any required flushes */
 	ret = 0;
 	ret = 0;
-	while (!list_empty(&eviction_list)) {
-		vma = list_first_entry(&eviction_list,
-				       struct i915_vma,
-				       exec_list);
-
-		list_del_init(&vma->exec_list);
+	list_for_each_entry_safe(vma, next, &eviction_list, evict_link) {
 		__i915_vma_unpin(vma);
 		__i915_vma_unpin(vma);
 		if (ret == 0)
 		if (ret == 0)
 			ret = i915_vma_unbind(vma);
 			ret = i915_vma_unbind(vma);
@@ -315,7 +316,7 @@ int i915_gem_evict_for_node(struct i915_address_space *vm,
 		}
 		}
 
 
 		/* Overlap of objects in the same batch? */
 		/* Overlap of objects in the same batch? */
-		if (i915_vma_is_pinned(vma) || !list_empty(&vma->exec_list)) {
+		if (i915_vma_is_pinned(vma)) {
 			ret = -ENOSPC;
 			ret = -ENOSPC;
 			if (vma->exec_entry &&
 			if (vma->exec_entry &&
 			    vma->exec_entry->flags & EXEC_OBJECT_PINNED)
 			    vma->exec_entry->flags & EXEC_OBJECT_PINNED)
@@ -332,11 +333,10 @@ int i915_gem_evict_for_node(struct i915_address_space *vm,
 		 * reference) another in our eviction list.
 		 * reference) another in our eviction list.
 		 */
 		 */
 		__i915_vma_pin(vma);
 		__i915_vma_pin(vma);
-		list_add(&vma->exec_list, &eviction_list);
+		list_add(&vma->evict_link, &eviction_list);
 	}
 	}
 
 
-	list_for_each_entry_safe(vma, next, &eviction_list, exec_list) {
-		list_del_init(&vma->exec_list);
+	list_for_each_entry_safe(vma, next, &eviction_list, evict_link) {
 		__i915_vma_unpin(vma);
 		__i915_vma_unpin(vma);
 		if (ret == 0)
 		if (ret == 0)
 			ret = i915_vma_unbind(vma);
 			ret = i915_vma_unbind(vma);
@@ -348,10 +348,8 @@ int i915_gem_evict_for_node(struct i915_address_space *vm,
 /**
 /**
  * i915_gem_evict_vm - Evict all idle vmas from a vm
  * i915_gem_evict_vm - Evict all idle vmas from a vm
  * @vm: Address space to cleanse
  * @vm: Address space to cleanse
- * @do_idle: Boolean directing whether to idle first.
  *
  *
- * This function evicts all idles vmas from a vm. If all unpinned vmas should be
- * evicted the @do_idle needs to be set to true.
+ * This function evicts all vmas from a vm.
  *
  *
  * This is used by the execbuf code as a last-ditch effort to defragment the
  * This is used by the execbuf code as a last-ditch effort to defragment the
  * address space.
  * address space.
@@ -359,37 +357,50 @@ int i915_gem_evict_for_node(struct i915_address_space *vm,
  * To clarify: This is for freeing up virtual address space, not for freeing
  * To clarify: This is for freeing up virtual address space, not for freeing
  * memory in e.g. the shrinker.
  * memory in e.g. the shrinker.
  */
  */
-int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle)
+int i915_gem_evict_vm(struct i915_address_space *vm)
 {
 {
+	struct list_head *phases[] = {
+		&vm->inactive_list,
+		&vm->active_list,
+		NULL
+	}, **phase;
+	struct list_head eviction_list;
 	struct i915_vma *vma, *next;
 	struct i915_vma *vma, *next;
 	int ret;
 	int ret;
 
 
 	lockdep_assert_held(&vm->i915->drm.struct_mutex);
 	lockdep_assert_held(&vm->i915->drm.struct_mutex);
 	trace_i915_gem_evict_vm(vm);
 	trace_i915_gem_evict_vm(vm);
 
 
-	if (do_idle) {
-		struct drm_i915_private *dev_priv = vm->i915;
-
-		if (i915_is_ggtt(vm)) {
-			ret = i915_gem_switch_to_kernel_context(dev_priv);
-			if (ret)
-				return ret;
-		}
-
-		ret = i915_gem_wait_for_idle(dev_priv,
-					     I915_WAIT_INTERRUPTIBLE |
-					     I915_WAIT_LOCKED);
+	/* Switch back to the default context in order to unpin
+	 * the existing context objects. However, such objects only
+	 * pin themselves inside the global GTT and performing the
+	 * switch otherwise is ineffective.
+	 */
+	if (i915_is_ggtt(vm)) {
+		ret = ggtt_flush(vm->i915);
 		if (ret)
 		if (ret)
 			return ret;
 			return ret;
-
-		WARN_ON(!list_empty(&vm->active_list));
 	}
 	}
 
 
-	list_for_each_entry_safe(vma, next, &vm->inactive_list, vm_link)
-		if (!i915_vma_is_pinned(vma))
-			WARN_ON(i915_vma_unbind(vma));
+	INIT_LIST_HEAD(&eviction_list);
+	phase = phases;
+	do {
+		list_for_each_entry(vma, *phase, vm_link) {
+			if (i915_vma_is_pinned(vma))
+				continue;
 
 
-	return 0;
+			__i915_vma_pin(vma);
+			list_add(&vma->evict_link, &eviction_list);
+		}
+	} while (*++phase);
+
+	ret = 0;
+	list_for_each_entry_safe(vma, next, &eviction_list, evict_link) {
+		__i915_vma_unpin(vma);
+		if (ret == 0)
+			ret = i915_vma_unbind(vma);
+	}
+	return ret;
 }
 }
 
 
 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)

+ 1631 - 1123
drivers/gpu/drm/i915/i915_gem_execbuffer.c

@@ -40,143 +40,726 @@
 #include "intel_drv.h"
 #include "intel_drv.h"
 #include "intel_frontbuffer.h"
 #include "intel_frontbuffer.h"
 
 
-#define DBG_USE_CPU_RELOC 0 /* -1 force GTT relocs; 1 force CPU relocs */
+enum {
+	FORCE_CPU_RELOC = 1,
+	FORCE_GTT_RELOC,
+	FORCE_GPU_RELOC,
+#define DBG_FORCE_RELOC 0 /* choose one of the above! */
+};
+
+#define __EXEC_OBJECT_HAS_REF		BIT(31)
+#define __EXEC_OBJECT_HAS_PIN		BIT(30)
+#define __EXEC_OBJECT_HAS_FENCE		BIT(29)
+#define __EXEC_OBJECT_NEEDS_MAP		BIT(28)
+#define __EXEC_OBJECT_NEEDS_BIAS	BIT(27)
+#define __EXEC_OBJECT_INTERNAL_FLAGS	(~0u << 27) /* all of the above */
+#define __EXEC_OBJECT_RESERVED (__EXEC_OBJECT_HAS_PIN | __EXEC_OBJECT_HAS_FENCE)
 
 
-#define  __EXEC_OBJECT_HAS_PIN		(1<<31)
-#define  __EXEC_OBJECT_HAS_FENCE	(1<<30)
-#define  __EXEC_OBJECT_NEEDS_MAP	(1<<29)
-#define  __EXEC_OBJECT_NEEDS_BIAS	(1<<28)
-#define  __EXEC_OBJECT_INTERNAL_FLAGS (0xf<<28) /* all of the above */
+#define __EXEC_HAS_RELOC	BIT(31)
+#define __EXEC_VALIDATED	BIT(30)
+#define UPDATE			PIN_OFFSET_FIXED
 
 
 #define BATCH_OFFSET_BIAS (256*1024)
 #define BATCH_OFFSET_BIAS (256*1024)
 
 
-struct i915_execbuffer_params {
-	struct drm_device               *dev;
-	struct drm_file                 *file;
-	struct i915_vma			*batch;
-	u32				dispatch_flags;
-	u32				args_batch_start_offset;
-	struct intel_engine_cs          *engine;
-	struct i915_gem_context         *ctx;
-	struct drm_i915_gem_request     *request;
-};
+#define __I915_EXEC_ILLEGAL_FLAGS \
+	(__I915_EXEC_UNKNOWN_FLAGS | I915_EXEC_CONSTANTS_MASK)
+
+/**
+ * DOC: User command execution
+ *
+ * Userspace submits commands to be executed on the GPU as an instruction
+ * stream within a GEM object we call a batchbuffer. This instructions may
+ * refer to other GEM objects containing auxiliary state such as kernels,
+ * samplers, render targets and even secondary batchbuffers. Userspace does
+ * not know where in the GPU memory these objects reside and so before the
+ * batchbuffer is passed to the GPU for execution, those addresses in the
+ * batchbuffer and auxiliary objects are updated. This is known as relocation,
+ * or patching. To try and avoid having to relocate each object on the next
+ * execution, userspace is told the location of those objects in this pass,
+ * but this remains just a hint as the kernel may choose a new location for
+ * any object in the future.
+ *
+ * Processing an execbuf ioctl is conceptually split up into a few phases.
+ *
+ * 1. Validation - Ensure all the pointers, handles and flags are valid.
+ * 2. Reservation - Assign GPU address space for every object
+ * 3. Relocation - Update any addresses to point to the final locations
+ * 4. Serialisation - Order the request with respect to its dependencies
+ * 5. Construction - Construct a request to execute the batchbuffer
+ * 6. Submission (at some point in the future execution)
+ *
+ * Reserving resources for the execbuf is the most complicated phase. We
+ * neither want to have to migrate the object in the address space, nor do
+ * we want to have to update any relocations pointing to this object. Ideally,
+ * we want to leave the object where it is and for all the existing relocations
+ * to match. If the object is given a new address, or if userspace thinks the
+ * object is elsewhere, we have to parse all the relocation entries and update
+ * the addresses. Userspace can set the I915_EXEC_NORELOC flag to hint that
+ * all the target addresses in all of its objects match the value in the
+ * relocation entries and that they all match the presumed offsets given by the
+ * list of execbuffer objects. Using this knowledge, we know that if we haven't
+ * moved any buffers, all the relocation entries are valid and we can skip
+ * the update. (If userspace is wrong, the likely outcome is an impromptu GPU
+ * hang.) The requirement for using I915_EXEC_NO_RELOC are:
+ *
+ *      The addresses written in the objects must match the corresponding
+ *      reloc.presumed_offset which in turn must match the corresponding
+ *      execobject.offset.
+ *
+ *      Any render targets written to in the batch must be flagged with
+ *      EXEC_OBJECT_WRITE.
+ *
+ *      To avoid stalling, execobject.offset should match the current
+ *      address of that object within the active context.
+ *
+ * The reservation is done is multiple phases. First we try and keep any
+ * object already bound in its current location - so as long as meets the
+ * constraints imposed by the new execbuffer. Any object left unbound after the
+ * first pass is then fitted into any available idle space. If an object does
+ * not fit, all objects are removed from the reservation and the process rerun
+ * after sorting the objects into a priority order (more difficult to fit
+ * objects are tried first). Failing that, the entire VM is cleared and we try
+ * to fit the execbuf once last time before concluding that it simply will not
+ * fit.
+ *
+ * A small complication to all of this is that we allow userspace not only to
+ * specify an alignment and a size for the object in the address space, but
+ * we also allow userspace to specify the exact offset. This objects are
+ * simpler to place (the location is known a priori) all we have to do is make
+ * sure the space is available.
+ *
+ * Once all the objects are in place, patching up the buried pointers to point
+ * to the final locations is a fairly simple job of walking over the relocation
+ * entry arrays, looking up the right address and rewriting the value into
+ * the object. Simple! ... The relocation entries are stored in user memory
+ * and so to access them we have to copy them into a local buffer. That copy
+ * has to avoid taking any pagefaults as they may lead back to a GEM object
+ * requiring the struct_mutex (i.e. recursive deadlock). So once again we split
+ * the relocation into multiple passes. First we try to do everything within an
+ * atomic context (avoid the pagefaults) which requires that we never wait. If
+ * we detect that we may wait, or if we need to fault, then we have to fallback
+ * to a slower path. The slowpath has to drop the mutex. (Can you hear alarm
+ * bells yet?) Dropping the mutex means that we lose all the state we have
+ * built up so far for the execbuf and we must reset any global data. However,
+ * we do leave the objects pinned in their final locations - which is a
+ * potential issue for concurrent execbufs. Once we have left the mutex, we can
+ * allocate and copy all the relocation entries into a large array at our
+ * leisure, reacquire the mutex, reclaim all the objects and other state and
+ * then proceed to update any incorrect addresses with the objects.
+ *
+ * As we process the relocation entries, we maintain a record of whether the
+ * object is being written to. Using NORELOC, we expect userspace to provide
+ * this information instead. We also check whether we can skip the relocation
+ * by comparing the expected value inside the relocation entry with the target's
+ * final address. If they differ, we have to map the current object and rewrite
+ * the 4 or 8 byte pointer within.
+ *
+ * Serialising an execbuf is quite simple according to the rules of the GEM
+ * ABI. Execution within each context is ordered by the order of submission.
+ * Writes to any GEM object are in order of submission and are exclusive. Reads
+ * from a GEM object are unordered with respect to other reads, but ordered by
+ * writes. A write submitted after a read cannot occur before the read, and
+ * similarly any read submitted after a write cannot occur before the write.
+ * Writes are ordered between engines such that only one write occurs at any
+ * time (completing any reads beforehand) - using semaphores where available
+ * and CPU serialisation otherwise. Other GEM access obey the same rules, any
+ * write (either via mmaps using set-domain, or via pwrite) must flush all GPU
+ * reads before starting, and any read (either using set-domain or pread) must
+ * flush all GPU writes before starting. (Note we only employ a barrier before,
+ * we currently rely on userspace not concurrently starting a new execution
+ * whilst reading or writing to an object. This may be an advantage or not
+ * depending on how much you trust userspace not to shoot themselves in the
+ * foot.) Serialisation may just result in the request being inserted into
+ * a DAG awaiting its turn, but most simple is to wait on the CPU until
+ * all dependencies are resolved.
+ *
+ * After all of that, is just a matter of closing the request and handing it to
+ * the hardware (well, leaving it in a queue to be executed). However, we also
+ * offer the ability for batchbuffers to be run with elevated privileges so
+ * that they access otherwise hidden registers. (Used to adjust L3 cache etc.)
+ * Before any batch is given extra privileges we first must check that it
+ * contains no nefarious instructions, we check that each instruction is from
+ * our whitelist and all registers are also from an allowed list. We first
+ * copy the user's batchbuffer to a shadow (so that the user doesn't have
+ * access to it, either by the CPU or GPU as we scan it) and then parse each
+ * instruction. If everything is ok, we set a flag telling the hardware to run
+ * the batchbuffer in trusted mode, otherwise the ioctl is rejected.
+ */
+
+struct i915_execbuffer {
+	struct drm_i915_private *i915; /** i915 backpointer */
+	struct drm_file *file; /** per-file lookup tables and limits */
+	struct drm_i915_gem_execbuffer2 *args; /** ioctl parameters */
+	struct drm_i915_gem_exec_object2 *exec; /** ioctl execobj[] */
+
+	struct intel_engine_cs *engine; /** engine to queue the request to */
+	struct i915_gem_context *ctx; /** context for building the request */
+	struct i915_address_space *vm; /** GTT and vma for the request */
+
+	struct drm_i915_gem_request *request; /** our request to build */
+	struct i915_vma *batch; /** identity of the batch obj/vma */
+
+	/** actual size of execobj[] as we may extend it for the cmdparser */
+	unsigned int buffer_count;
 
 
-struct eb_vmas {
-	struct drm_i915_private *i915;
-	struct list_head vmas;
-	int and;
-	union {
-		struct i915_vma *lut[0];
-		struct hlist_head buckets[0];
-	};
+	/** list of vma not yet bound during reservation phase */
+	struct list_head unbound;
+
+	/** list of vma that have execobj.relocation_count */
+	struct list_head relocs;
+
+	/**
+	 * Track the most recently used object for relocations, as we
+	 * frequently have to perform multiple relocations within the same
+	 * obj/page
+	 */
+	struct reloc_cache {
+		struct drm_mm_node node; /** temporary GTT binding */
+		unsigned long vaddr; /** Current kmap address */
+		unsigned long page; /** Currently mapped page index */
+		unsigned int gen; /** Cached value of INTEL_GEN */
+		bool use_64bit_reloc : 1;
+		bool has_llc : 1;
+		bool has_fence : 1;
+		bool needs_unfenced : 1;
+
+		struct drm_i915_gem_request *rq;
+		u32 *rq_cmd;
+		unsigned int rq_size;
+	} reloc_cache;
+
+	u64 invalid_flags; /** Set of execobj.flags that are invalid */
+	u32 context_flags; /** Set of execobj.flags to insert from the ctx */
+
+	u32 batch_start_offset; /** Location within object of batch */
+	u32 batch_len; /** Length of batch within object */
+	u32 batch_flags; /** Flags composed for emit_bb_start() */
+
+	/**
+	 * Indicate either the size of the hastable used to resolve
+	 * relocation handles, or if negative that we are using a direct
+	 * index into the execobj[].
+	 */
+	int lut_size;
+	struct hlist_head *buckets; /** ht for relocation handles */
 };
 };
 
 
-static struct eb_vmas *
-eb_create(struct drm_i915_private *i915,
-	  struct drm_i915_gem_execbuffer2 *args)
+/*
+ * As an alternative to creating a hashtable of handle-to-vma for a batch,
+ * we used the last available reserved field in the execobject[] and stash
+ * a link from the execobj to its vma.
+ */
+#define __exec_to_vma(ee) (ee)->rsvd2
+#define exec_to_vma(ee) u64_to_ptr(struct i915_vma, __exec_to_vma(ee))
+
+/*
+ * Used to convert any address to canonical form.
+ * Starting from gen8, some commands (e.g. STATE_BASE_ADDRESS,
+ * MI_LOAD_REGISTER_MEM and others, see Broadwell PRM Vol2a) require the
+ * addresses to be in a canonical form:
+ * "GraphicsAddress[63:48] are ignored by the HW and assumed to be in correct
+ * canonical form [63:48] == [47]."
+ */
+#define GEN8_HIGH_ADDRESS_BIT 47
+static inline u64 gen8_canonical_addr(u64 address)
 {
 {
-	struct eb_vmas *eb = NULL;
-
-	if (args->flags & I915_EXEC_HANDLE_LUT) {
-		unsigned size = args->buffer_count;
-		size *= sizeof(struct i915_vma *);
-		size += sizeof(struct eb_vmas);
-		eb = kmalloc(size, GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY);
-	}
-
-	if (eb == NULL) {
-		unsigned size = args->buffer_count;
-		unsigned count = PAGE_SIZE / sizeof(struct hlist_head) / 2;
-		BUILD_BUG_ON_NOT_POWER_OF_2(PAGE_SIZE / sizeof(struct hlist_head));
-		while (count > 2*size)
-			count >>= 1;
-		eb = kzalloc(count*sizeof(struct hlist_head) +
-			     sizeof(struct eb_vmas),
-			     GFP_TEMPORARY);
-		if (eb == NULL)
-			return eb;
-
-		eb->and = count - 1;
-	} else
-		eb->and = -args->buffer_count;
+	return sign_extend64(address, GEN8_HIGH_ADDRESS_BIT);
+}
 
 
-	eb->i915 = i915;
-	INIT_LIST_HEAD(&eb->vmas);
-	return eb;
+static inline u64 gen8_noncanonical_addr(u64 address)
+{
+	return address & GENMASK_ULL(GEN8_HIGH_ADDRESS_BIT, 0);
 }
 }
 
 
-static void
-eb_reset(struct eb_vmas *eb)
+static int eb_create(struct i915_execbuffer *eb)
 {
 {
-	if (eb->and >= 0)
-		memset(eb->buckets, 0, (eb->and+1)*sizeof(struct hlist_head));
+	if (!(eb->args->flags & I915_EXEC_HANDLE_LUT)) {
+		unsigned int size = 1 + ilog2(eb->buffer_count);
+
+		/*
+		 * Without a 1:1 association between relocation handles and
+		 * the execobject[] index, we instead create a hashtable.
+		 * We size it dynamically based on available memory, starting
+		 * first with 1:1 assocative hash and scaling back until
+		 * the allocation succeeds.
+		 *
+		 * Later on we use a positive lut_size to indicate we are
+		 * using this hashtable, and a negative value to indicate a
+		 * direct lookup.
+		 */
+		do {
+			eb->buckets = kzalloc(sizeof(struct hlist_head) << size,
+					      GFP_TEMPORARY |
+					      __GFP_NORETRY |
+					      __GFP_NOWARN);
+			if (eb->buckets)
+				break;
+		} while (--size);
+
+		if (unlikely(!eb->buckets)) {
+			eb->buckets = kzalloc(sizeof(struct hlist_head),
+					      GFP_TEMPORARY);
+			if (unlikely(!eb->buckets))
+				return -ENOMEM;
+		}
+
+		eb->lut_size = size;
+	} else {
+		eb->lut_size = -eb->buffer_count;
+	}
+
+	return 0;
 }
 }
 
 
-static struct i915_vma *
-eb_get_batch(struct eb_vmas *eb)
+static bool
+eb_vma_misplaced(const struct drm_i915_gem_exec_object2 *entry,
+		 const struct i915_vma *vma)
+{
+	if (!(entry->flags & __EXEC_OBJECT_HAS_PIN))
+		return true;
+
+	if (vma->node.size < entry->pad_to_size)
+		return true;
+
+	if (entry->alignment && !IS_ALIGNED(vma->node.start, entry->alignment))
+		return true;
+
+	if (entry->flags & EXEC_OBJECT_PINNED &&
+	    vma->node.start != entry->offset)
+		return true;
+
+	if (entry->flags & __EXEC_OBJECT_NEEDS_BIAS &&
+	    vma->node.start < BATCH_OFFSET_BIAS)
+		return true;
+
+	if (!(entry->flags & EXEC_OBJECT_SUPPORTS_48B_ADDRESS) &&
+	    (vma->node.start + vma->node.size - 1) >> 32)
+		return true;
+
+	return false;
+}
+
+static inline void
+eb_pin_vma(struct i915_execbuffer *eb,
+	   struct drm_i915_gem_exec_object2 *entry,
+	   struct i915_vma *vma)
+{
+	u64 flags;
+
+	if (vma->node.size)
+		flags = vma->node.start;
+	else
+		flags = entry->offset & PIN_OFFSET_MASK;
+
+	flags |= PIN_USER | PIN_NOEVICT | PIN_OFFSET_FIXED;
+	if (unlikely(entry->flags & EXEC_OBJECT_NEEDS_GTT))
+		flags |= PIN_GLOBAL;
+
+	if (unlikely(i915_vma_pin(vma, 0, 0, flags)))
+		return;
+
+	if (unlikely(entry->flags & EXEC_OBJECT_NEEDS_FENCE)) {
+		if (unlikely(i915_vma_get_fence(vma))) {
+			i915_vma_unpin(vma);
+			return;
+		}
+
+		if (i915_vma_pin_fence(vma))
+			entry->flags |= __EXEC_OBJECT_HAS_FENCE;
+	}
+
+	entry->flags |= __EXEC_OBJECT_HAS_PIN;
+}
+
+static inline void
+__eb_unreserve_vma(struct i915_vma *vma,
+		   const struct drm_i915_gem_exec_object2 *entry)
+{
+	GEM_BUG_ON(!(entry->flags & __EXEC_OBJECT_HAS_PIN));
+
+	if (unlikely(entry->flags & __EXEC_OBJECT_HAS_FENCE))
+		i915_vma_unpin_fence(vma);
+
+	__i915_vma_unpin(vma);
+}
+
+static inline void
+eb_unreserve_vma(struct i915_vma *vma,
+		 struct drm_i915_gem_exec_object2 *entry)
+{
+	if (!(entry->flags & __EXEC_OBJECT_HAS_PIN))
+		return;
+
+	__eb_unreserve_vma(vma, entry);
+	entry->flags &= ~__EXEC_OBJECT_RESERVED;
+}
+
+static int
+eb_validate_vma(struct i915_execbuffer *eb,
+		struct drm_i915_gem_exec_object2 *entry,
+		struct i915_vma *vma)
 {
 {
-	struct i915_vma *vma = list_entry(eb->vmas.prev, typeof(*vma), exec_list);
+	if (unlikely(entry->flags & eb->invalid_flags))
+		return -EINVAL;
+
+	if (unlikely(entry->alignment && !is_power_of_2(entry->alignment)))
+		return -EINVAL;
 
 
 	/*
 	/*
-	 * SNA is doing fancy tricks with compressing batch buffers, which leads
-	 * to negative relocation deltas. Usually that works out ok since the
-	 * relocate address is still positive, except when the batch is placed
-	 * very low in the GTT. Ensure this doesn't happen.
-	 *
-	 * Note that actual hangs have only been observed on gen7, but for
-	 * paranoia do it everywhere.
+	 * Offset can be used as input (EXEC_OBJECT_PINNED), reject
+	 * any non-page-aligned or non-canonical addresses.
 	 */
 	 */
-	if ((vma->exec_entry->flags & EXEC_OBJECT_PINNED) == 0)
-		vma->exec_entry->flags |= __EXEC_OBJECT_NEEDS_BIAS;
+	if (unlikely(entry->flags & EXEC_OBJECT_PINNED &&
+		     entry->offset != gen8_canonical_addr(entry->offset & PAGE_MASK)))
+		return -EINVAL;
 
 
-	return vma;
+	/* pad_to_size was once a reserved field, so sanitize it */
+	if (entry->flags & EXEC_OBJECT_PAD_TO_SIZE) {
+		if (unlikely(offset_in_page(entry->pad_to_size)))
+			return -EINVAL;
+	} else {
+		entry->pad_to_size = 0;
+	}
+
+	if (unlikely(vma->exec_entry)) {
+		DRM_DEBUG("Object [handle %d, index %d] appears more than once in object list\n",
+			  entry->handle, (int)(entry - eb->exec));
+		return -EINVAL;
+	}
+
+	/*
+	 * From drm_mm perspective address space is continuous,
+	 * so from this point we're always using non-canonical
+	 * form internally.
+	 */
+	entry->offset = gen8_noncanonical_addr(entry->offset);
+
+	return 0;
 }
 }
 
 
 static int
 static int
-eb_lookup_vmas(struct eb_vmas *eb,
-	       struct drm_i915_gem_exec_object2 *exec,
-	       const struct drm_i915_gem_execbuffer2 *args,
-	       struct i915_address_space *vm,
-	       struct drm_file *file)
+eb_add_vma(struct i915_execbuffer *eb,
+	   struct drm_i915_gem_exec_object2 *entry,
+	   struct i915_vma *vma)
 {
 {
-	struct drm_i915_gem_object *obj;
-	struct list_head objects;
-	int i, ret;
+	int err;
 
 
-	INIT_LIST_HEAD(&objects);
-	spin_lock(&file->table_lock);
-	/* Grab a reference to the object and release the lock so we can lookup
-	 * or create the VMA without using GFP_ATOMIC */
-	for (i = 0; i < args->buffer_count; i++) {
-		obj = to_intel_bo(idr_find(&file->object_idr, exec[i].handle));
-		if (obj == NULL) {
-			spin_unlock(&file->table_lock);
-			DRM_DEBUG("Invalid object handle %d at index %d\n",
-				   exec[i].handle, i);
-			ret = -ENOENT;
-			goto err;
+	GEM_BUG_ON(i915_vma_is_closed(vma));
+
+	if (!(eb->args->flags & __EXEC_VALIDATED)) {
+		err = eb_validate_vma(eb, entry, vma);
+		if (unlikely(err))
+			return err;
+	}
+
+	if (eb->lut_size >= 0) {
+		vma->exec_handle = entry->handle;
+		hlist_add_head(&vma->exec_node,
+			       &eb->buckets[hash_32(entry->handle,
+						    eb->lut_size)]);
+	}
+
+	if (entry->relocation_count)
+		list_add_tail(&vma->reloc_link, &eb->relocs);
+
+	if (!eb->reloc_cache.has_fence) {
+		entry->flags &= ~EXEC_OBJECT_NEEDS_FENCE;
+	} else {
+		if ((entry->flags & EXEC_OBJECT_NEEDS_FENCE ||
+		     eb->reloc_cache.needs_unfenced) &&
+		    i915_gem_object_is_tiled(vma->obj))
+			entry->flags |= EXEC_OBJECT_NEEDS_GTT | __EXEC_OBJECT_NEEDS_MAP;
+	}
+
+	if (!(entry->flags & EXEC_OBJECT_PINNED))
+		entry->flags |= eb->context_flags;
+
+	/*
+	 * Stash a pointer from the vma to execobj, so we can query its flags,
+	 * size, alignment etc as provided by the user. Also we stash a pointer
+	 * to the vma inside the execobj so that we can use a direct lookup
+	 * to find the right target VMA when doing relocations.
+	 */
+	vma->exec_entry = entry;
+	__exec_to_vma(entry) = (uintptr_t)vma;
+
+	err = 0;
+	eb_pin_vma(eb, entry, vma);
+	if (eb_vma_misplaced(entry, vma)) {
+		eb_unreserve_vma(vma, entry);
+
+		list_add_tail(&vma->exec_link, &eb->unbound);
+		if (drm_mm_node_allocated(&vma->node))
+			err = i915_vma_unbind(vma);
+	} else {
+		if (entry->offset != vma->node.start) {
+			entry->offset = vma->node.start | UPDATE;
+			eb->args->flags |= __EXEC_HAS_RELOC;
+		}
+	}
+	return err;
+}
+
+static inline int use_cpu_reloc(const struct reloc_cache *cache,
+				const struct drm_i915_gem_object *obj)
+{
+	if (!i915_gem_object_has_struct_page(obj))
+		return false;
+
+	if (DBG_FORCE_RELOC == FORCE_CPU_RELOC)
+		return true;
+
+	if (DBG_FORCE_RELOC == FORCE_GTT_RELOC)
+		return false;
+
+	return (cache->has_llc ||
+		obj->cache_dirty ||
+		obj->cache_level != I915_CACHE_NONE);
+}
+
+static int eb_reserve_vma(const struct i915_execbuffer *eb,
+			  struct i915_vma *vma)
+{
+	struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
+	u64 flags;
+	int err;
+
+	flags = PIN_USER | PIN_NONBLOCK;
+	if (entry->flags & EXEC_OBJECT_NEEDS_GTT)
+		flags |= PIN_GLOBAL;
+
+	/*
+	 * Wa32bitGeneralStateOffset & Wa32bitInstructionBaseOffset,
+	 * limit address to the first 4GBs for unflagged objects.
+	 */
+	if (!(entry->flags & EXEC_OBJECT_SUPPORTS_48B_ADDRESS))
+		flags |= PIN_ZONE_4G;
+
+	if (entry->flags & __EXEC_OBJECT_NEEDS_MAP)
+		flags |= PIN_MAPPABLE;
+
+	if (entry->flags & EXEC_OBJECT_PINNED) {
+		flags |= entry->offset | PIN_OFFSET_FIXED;
+		flags &= ~PIN_NONBLOCK; /* force overlapping PINNED checks */
+	} else if (entry->flags & __EXEC_OBJECT_NEEDS_BIAS) {
+		flags |= BATCH_OFFSET_BIAS | PIN_OFFSET_BIAS;
+	}
+
+	err = i915_vma_pin(vma, entry->pad_to_size, entry->alignment, flags);
+	if (err)
+		return err;
+
+	if (entry->offset != vma->node.start) {
+		entry->offset = vma->node.start | UPDATE;
+		eb->args->flags |= __EXEC_HAS_RELOC;
+	}
+
+	entry->flags |= __EXEC_OBJECT_HAS_PIN;
+	GEM_BUG_ON(eb_vma_misplaced(entry, vma));
+
+	if (unlikely(entry->flags & EXEC_OBJECT_NEEDS_FENCE)) {
+		err = i915_vma_get_fence(vma);
+		if (unlikely(err)) {
+			i915_vma_unpin(vma);
+			return err;
+		}
+
+		if (i915_vma_pin_fence(vma))
+			entry->flags |= __EXEC_OBJECT_HAS_FENCE;
+	}
+
+	return 0;
+}
+
+static int eb_reserve(struct i915_execbuffer *eb)
+{
+	const unsigned int count = eb->buffer_count;
+	struct list_head last;
+	struct i915_vma *vma;
+	unsigned int i, pass;
+	int err;
+
+	/*
+	 * Attempt to pin all of the buffers into the GTT.
+	 * This is done in 3 phases:
+	 *
+	 * 1a. Unbind all objects that do not match the GTT constraints for
+	 *     the execbuffer (fenceable, mappable, alignment etc).
+	 * 1b. Increment pin count for already bound objects.
+	 * 2.  Bind new objects.
+	 * 3.  Decrement pin count.
+	 *
+	 * This avoid unnecessary unbinding of later objects in order to make
+	 * room for the earlier objects *unless* we need to defragment.
+	 */
+
+	pass = 0;
+	err = 0;
+	do {
+		list_for_each_entry(vma, &eb->unbound, exec_link) {
+			err = eb_reserve_vma(eb, vma);
+			if (err)
+				break;
+		}
+		if (err != -ENOSPC)
+			return err;
+
+		/* Resort *all* the objects into priority order */
+		INIT_LIST_HEAD(&eb->unbound);
+		INIT_LIST_HEAD(&last);
+		for (i = 0; i < count; i++) {
+			struct drm_i915_gem_exec_object2 *entry = &eb->exec[i];
+
+			if (entry->flags & EXEC_OBJECT_PINNED &&
+			    entry->flags & __EXEC_OBJECT_HAS_PIN)
+				continue;
+
+			vma = exec_to_vma(entry);
+			eb_unreserve_vma(vma, entry);
+
+			if (entry->flags & EXEC_OBJECT_PINNED)
+				list_add(&vma->exec_link, &eb->unbound);
+			else if (entry->flags & __EXEC_OBJECT_NEEDS_MAP)
+				list_add_tail(&vma->exec_link, &eb->unbound);
+			else
+				list_add_tail(&vma->exec_link, &last);
+		}
+		list_splice_tail(&last, &eb->unbound);
+
+		switch (pass++) {
+		case 0:
+			break;
+
+		case 1:
+			/* Too fragmented, unbind everything and retry */
+			err = i915_gem_evict_vm(eb->vm);
+			if (err)
+				return err;
+			break;
+
+		default:
+			return -ENOSPC;
+		}
+	} while (1);
+}
+
+static inline struct hlist_head *
+ht_head(const  struct i915_gem_context_vma_lut *lut, u32 handle)
+{
+	return &lut->ht[hash_32(handle, lut->ht_bits)];
+}
+
+static inline bool
+ht_needs_resize(const struct i915_gem_context_vma_lut *lut)
+{
+	return (4*lut->ht_count > 3*lut->ht_size ||
+		4*lut->ht_count + 1 < lut->ht_size);
+}
+
+static unsigned int eb_batch_index(const struct i915_execbuffer *eb)
+{
+	if (eb->args->flags & I915_EXEC_BATCH_FIRST)
+		return 0;
+	else
+		return eb->buffer_count - 1;
+}
+
+static int eb_select_context(struct i915_execbuffer *eb)
+{
+	struct i915_gem_context *ctx;
+
+	ctx = i915_gem_context_lookup(eb->file->driver_priv, eb->args->rsvd1);
+	if (unlikely(IS_ERR(ctx)))
+		return PTR_ERR(ctx);
+
+	if (unlikely(i915_gem_context_is_banned(ctx))) {
+		DRM_DEBUG("Context %u tried to submit while banned\n",
+			  ctx->user_handle);
+		return -EIO;
+	}
+
+	eb->ctx = i915_gem_context_get(ctx);
+	eb->vm = ctx->ppgtt ? &ctx->ppgtt->base : &eb->i915->ggtt.base;
+
+	eb->context_flags = 0;
+	if (ctx->flags & CONTEXT_NO_ZEROMAP)
+		eb->context_flags |= __EXEC_OBJECT_NEEDS_BIAS;
+
+	return 0;
+}
+
+static int eb_lookup_vmas(struct i915_execbuffer *eb)
+{
+#define INTERMEDIATE BIT(0)
+	const unsigned int count = eb->buffer_count;
+	struct i915_gem_context_vma_lut *lut = &eb->ctx->vma_lut;
+	struct i915_vma *vma;
+	struct idr *idr;
+	unsigned int i;
+	int slow_pass = -1;
+	int err;
+
+	INIT_LIST_HEAD(&eb->relocs);
+	INIT_LIST_HEAD(&eb->unbound);
+
+	if (unlikely(lut->ht_size & I915_CTX_RESIZE_IN_PROGRESS))
+		flush_work(&lut->resize);
+	GEM_BUG_ON(lut->ht_size & I915_CTX_RESIZE_IN_PROGRESS);
+
+	for (i = 0; i < count; i++) {
+		__exec_to_vma(&eb->exec[i]) = 0;
+
+		hlist_for_each_entry(vma,
+				     ht_head(lut, eb->exec[i].handle),
+				     ctx_node) {
+			if (vma->ctx_handle != eb->exec[i].handle)
+				continue;
+
+			err = eb_add_vma(eb, &eb->exec[i], vma);
+			if (unlikely(err))
+				return err;
+
+			goto next_vma;
 		}
 		}
 
 
-		if (!list_empty(&obj->obj_exec_link)) {
-			spin_unlock(&file->table_lock);
-			DRM_DEBUG("Object %p [handle %d, index %d] appears more than once in object list\n",
-				   obj, exec[i].handle, i);
-			ret = -EINVAL;
+		if (slow_pass < 0)
+			slow_pass = i;
+next_vma: ;
+	}
+
+	if (slow_pass < 0)
+		goto out;
+
+	spin_lock(&eb->file->table_lock);
+	/*
+	 * Grab a reference to the object and release the lock so we can lookup
+	 * or create the VMA without using GFP_ATOMIC
+	 */
+	idr = &eb->file->object_idr;
+	for (i = slow_pass; i < count; i++) {
+		struct drm_i915_gem_object *obj;
+
+		if (__exec_to_vma(&eb->exec[i]))
+			continue;
+
+		obj = to_intel_bo(idr_find(idr, eb->exec[i].handle));
+		if (unlikely(!obj)) {
+			spin_unlock(&eb->file->table_lock);
+			DRM_DEBUG("Invalid object handle %d at index %d\n",
+				  eb->exec[i].handle, i);
+			err = -ENOENT;
 			goto err;
 			goto err;
 		}
 		}
 
 
-		i915_gem_object_get(obj);
-		list_add_tail(&obj->obj_exec_link, &objects);
+		__exec_to_vma(&eb->exec[i]) = INTERMEDIATE | (uintptr_t)obj;
 	}
 	}
-	spin_unlock(&file->table_lock);
+	spin_unlock(&eb->file->table_lock);
 
 
-	i = 0;
-	while (!list_empty(&objects)) {
-		struct i915_vma *vma;
+	for (i = slow_pass; i < count; i++) {
+		struct drm_i915_gem_object *obj;
 
 
-		obj = list_first_entry(&objects,
-				       struct drm_i915_gem_object,
-				       obj_exec_link);
+		if (!(__exec_to_vma(&eb->exec[i]) & INTERMEDIATE))
+			continue;
 
 
 		/*
 		/*
 		 * NOTE: We can leak any vmas created here when something fails
 		 * NOTE: We can leak any vmas created here when something fails
@@ -186,59 +769,93 @@ eb_lookup_vmas(struct eb_vmas *eb,
 		 * from the (obj, vm) we don't run the risk of creating
 		 * from the (obj, vm) we don't run the risk of creating
 		 * duplicated vmas for the same vm.
 		 * duplicated vmas for the same vm.
 		 */
 		 */
-		vma = i915_vma_instance(obj, vm, NULL);
+		obj = u64_to_ptr(typeof(*obj),
+				 __exec_to_vma(&eb->exec[i]) & ~INTERMEDIATE);
+		vma = i915_vma_instance(obj, eb->vm, NULL);
 		if (unlikely(IS_ERR(vma))) {
 		if (unlikely(IS_ERR(vma))) {
 			DRM_DEBUG("Failed to lookup VMA\n");
 			DRM_DEBUG("Failed to lookup VMA\n");
-			ret = PTR_ERR(vma);
+			err = PTR_ERR(vma);
 			goto err;
 			goto err;
 		}
 		}
 
 
-		/* Transfer ownership from the objects list to the vmas list. */
-		list_add_tail(&vma->exec_list, &eb->vmas);
-		list_del_init(&obj->obj_exec_link);
+		/* First come, first served */
+		if (!vma->ctx) {
+			vma->ctx = eb->ctx;
+			vma->ctx_handle = eb->exec[i].handle;
+			hlist_add_head(&vma->ctx_node,
+				       ht_head(lut, eb->exec[i].handle));
+			lut->ht_count++;
+			lut->ht_size |= I915_CTX_RESIZE_IN_PROGRESS;
+			if (i915_vma_is_ggtt(vma)) {
+				GEM_BUG_ON(obj->vma_hashed);
+				obj->vma_hashed = vma;
+			}
 
 
-		vma->exec_entry = &exec[i];
-		if (eb->and < 0) {
-			eb->lut[i] = vma;
-		} else {
-			uint32_t handle = args->flags & I915_EXEC_HANDLE_LUT ? i : exec[i].handle;
-			vma->exec_handle = handle;
-			hlist_add_head(&vma->exec_node,
-				       &eb->buckets[handle & eb->and]);
+			i915_vma_get(vma);
+		}
+
+		err = eb_add_vma(eb, &eb->exec[i], vma);
+		if (unlikely(err))
+			goto err;
+
+		/* Only after we validated the user didn't use our bits */
+		if (vma->ctx != eb->ctx) {
+			i915_vma_get(vma);
+			eb->exec[i].flags |= __EXEC_OBJECT_HAS_REF;
 		}
 		}
-		++i;
 	}
 	}
 
 
-	return 0;
+	if (lut->ht_size & I915_CTX_RESIZE_IN_PROGRESS) {
+		if (ht_needs_resize(lut))
+			queue_work(system_highpri_wq, &lut->resize);
+		else
+			lut->ht_size &= ~I915_CTX_RESIZE_IN_PROGRESS;
+	}
+
+out:
+	/* take note of the batch buffer before we might reorder the lists */
+	i = eb_batch_index(eb);
+	eb->batch = exec_to_vma(&eb->exec[i]);
+
+	/*
+	 * SNA is doing fancy tricks with compressing batch buffers, which leads
+	 * to negative relocation deltas. Usually that works out ok since the
+	 * relocate address is still positive, except when the batch is placed
+	 * very low in the GTT. Ensure this doesn't happen.
+	 *
+	 * Note that actual hangs have only been observed on gen7, but for
+	 * paranoia do it everywhere.
+	 */
+	if (!(eb->exec[i].flags & EXEC_OBJECT_PINNED))
+		eb->exec[i].flags |= __EXEC_OBJECT_NEEDS_BIAS;
+	if (eb->reloc_cache.has_fence)
+		eb->exec[i].flags |= EXEC_OBJECT_NEEDS_FENCE;
 
 
+	eb->args->flags |= __EXEC_VALIDATED;
+	return eb_reserve(eb);
 
 
 err:
 err:
-	while (!list_empty(&objects)) {
-		obj = list_first_entry(&objects,
-				       struct drm_i915_gem_object,
-				       obj_exec_link);
-		list_del_init(&obj->obj_exec_link);
-		i915_gem_object_put(obj);
+	for (i = slow_pass; i < count; i++) {
+		if (__exec_to_vma(&eb->exec[i]) & INTERMEDIATE)
+			__exec_to_vma(&eb->exec[i]) = 0;
 	}
 	}
-	/*
-	 * Objects already transfered to the vmas list will be unreferenced by
-	 * eb_destroy.
-	 */
-
-	return ret;
+	lut->ht_size &= ~I915_CTX_RESIZE_IN_PROGRESS;
+	return err;
+#undef INTERMEDIATE
 }
 }
 
 
-static struct i915_vma *eb_get_vma(struct eb_vmas *eb, unsigned long handle)
+static struct i915_vma *
+eb_get_vma(const struct i915_execbuffer *eb, unsigned long handle)
 {
 {
-	if (eb->and < 0) {
-		if (handle >= -eb->and)
+	if (eb->lut_size < 0) {
+		if (handle >= -eb->lut_size)
 			return NULL;
 			return NULL;
-		return eb->lut[handle];
+		return exec_to_vma(&eb->exec[handle]);
 	} else {
 	} else {
 		struct hlist_head *head;
 		struct hlist_head *head;
 		struct i915_vma *vma;
 		struct i915_vma *vma;
 
 
-		head = &eb->buckets[handle & eb->and];
+		head = &eb->buckets[hash_32(handle, eb->lut_size)];
 		hlist_for_each_entry(vma, head, exec_node) {
 		hlist_for_each_entry(vma, head, exec_node) {
 			if (vma->exec_handle == handle)
 			if (vma->exec_handle == handle)
 				return vma;
 				return vma;
@@ -247,96 +864,69 @@ static struct i915_vma *eb_get_vma(struct eb_vmas *eb, unsigned long handle)
 	}
 	}
 }
 }
 
 
-static void
-i915_gem_execbuffer_unreserve_vma(struct i915_vma *vma)
+static void eb_release_vmas(const struct i915_execbuffer *eb)
 {
 {
-	struct drm_i915_gem_exec_object2 *entry;
+	const unsigned int count = eb->buffer_count;
+	unsigned int i;
 
 
-	if (!drm_mm_node_allocated(&vma->node))
-		return;
-
-	entry = vma->exec_entry;
+	for (i = 0; i < count; i++) {
+		struct drm_i915_gem_exec_object2 *entry = &eb->exec[i];
+		struct i915_vma *vma = exec_to_vma(entry);
 
 
-	if (entry->flags & __EXEC_OBJECT_HAS_FENCE)
-		i915_vma_unpin_fence(vma);
+		if (!vma)
+			continue;
 
 
-	if (entry->flags & __EXEC_OBJECT_HAS_PIN)
-		__i915_vma_unpin(vma);
+		GEM_BUG_ON(vma->exec_entry != entry);
+		vma->exec_entry = NULL;
 
 
-	entry->flags &= ~(__EXEC_OBJECT_HAS_FENCE | __EXEC_OBJECT_HAS_PIN);
-}
+		if (entry->flags & __EXEC_OBJECT_HAS_PIN)
+			__eb_unreserve_vma(vma, entry);
 
 
-static void eb_destroy(struct eb_vmas *eb)
-{
-	while (!list_empty(&eb->vmas)) {
-		struct i915_vma *vma;
+		if (entry->flags & __EXEC_OBJECT_HAS_REF)
+			i915_vma_put(vma);
 
 
-		vma = list_first_entry(&eb->vmas,
-				       struct i915_vma,
-				       exec_list);
-		list_del_init(&vma->exec_list);
-		i915_gem_execbuffer_unreserve_vma(vma);
-		vma->exec_entry = NULL;
-		i915_vma_put(vma);
+		entry->flags &=
+			~(__EXEC_OBJECT_RESERVED | __EXEC_OBJECT_HAS_REF);
 	}
 	}
-	kfree(eb);
 }
 }
 
 
-static inline int use_cpu_reloc(struct drm_i915_gem_object *obj)
+static void eb_reset_vmas(const struct i915_execbuffer *eb)
 {
 {
-	if (!i915_gem_object_has_struct_page(obj))
-		return false;
-
-	if (DBG_USE_CPU_RELOC)
-		return DBG_USE_CPU_RELOC > 0;
-
-	return (HAS_LLC(to_i915(obj->base.dev)) ||
-		obj->base.write_domain == I915_GEM_DOMAIN_CPU ||
-		obj->cache_level != I915_CACHE_NONE);
+	eb_release_vmas(eb);
+	if (eb->lut_size >= 0)
+		memset(eb->buckets, 0,
+		       sizeof(struct hlist_head) << eb->lut_size);
 }
 }
 
 
-/* Used to convert any address to canonical form.
- * Starting from gen8, some commands (e.g. STATE_BASE_ADDRESS,
- * MI_LOAD_REGISTER_MEM and others, see Broadwell PRM Vol2a) require the
- * addresses to be in a canonical form:
- * "GraphicsAddress[63:48] are ignored by the HW and assumed to be in correct
- * canonical form [63:48] == [47]."
- */
-#define GEN8_HIGH_ADDRESS_BIT 47
-static inline uint64_t gen8_canonical_addr(uint64_t address)
+static void eb_destroy(const struct i915_execbuffer *eb)
 {
 {
-	return sign_extend64(address, GEN8_HIGH_ADDRESS_BIT);
-}
+	GEM_BUG_ON(eb->reloc_cache.rq);
 
 
-static inline uint64_t gen8_noncanonical_addr(uint64_t address)
-{
-	return address & ((1ULL << (GEN8_HIGH_ADDRESS_BIT + 1)) - 1);
+	if (eb->lut_size >= 0)
+		kfree(eb->buckets);
 }
 }
 
 
-static inline uint64_t
+static inline u64
 relocation_target(const struct drm_i915_gem_relocation_entry *reloc,
 relocation_target(const struct drm_i915_gem_relocation_entry *reloc,
-		  uint64_t target_offset)
+		  const struct i915_vma *target)
 {
 {
-	return gen8_canonical_addr((int)reloc->delta + target_offset);
+	return gen8_canonical_addr((int)reloc->delta + target->node.start);
 }
 }
 
 
-struct reloc_cache {
-	struct drm_i915_private *i915;
-	struct drm_mm_node node;
-	unsigned long vaddr;
-	unsigned int page;
-	bool use_64bit_reloc;
-};
-
 static void reloc_cache_init(struct reloc_cache *cache,
 static void reloc_cache_init(struct reloc_cache *cache,
 			     struct drm_i915_private *i915)
 			     struct drm_i915_private *i915)
 {
 {
 	cache->page = -1;
 	cache->page = -1;
 	cache->vaddr = 0;
 	cache->vaddr = 0;
-	cache->i915 = i915;
 	/* Must be a variable in the struct to allow GCC to unroll. */
 	/* Must be a variable in the struct to allow GCC to unroll. */
+	cache->gen = INTEL_GEN(i915);
+	cache->has_llc = HAS_LLC(i915);
 	cache->use_64bit_reloc = HAS_64BIT_RELOC(i915);
 	cache->use_64bit_reloc = HAS_64BIT_RELOC(i915);
+	cache->has_fence = cache->gen < 4;
+	cache->needs_unfenced = INTEL_INFO(i915)->unfenced_needs_alignment;
 	cache->node.allocated = false;
 	cache->node.allocated = false;
+	cache->rq = NULL;
+	cache->rq_size = 0;
 }
 }
 
 
 static inline void *unmask_page(unsigned long p)
 static inline void *unmask_page(unsigned long p)
@@ -351,10 +941,31 @@ static inline unsigned int unmask_flags(unsigned long p)
 
 
 #define KMAP 0x4 /* after CLFLUSH_FLAGS */
 #define KMAP 0x4 /* after CLFLUSH_FLAGS */
 
 
-static void reloc_cache_fini(struct reloc_cache *cache)
+static inline struct i915_ggtt *cache_to_ggtt(struct reloc_cache *cache)
+{
+	struct drm_i915_private *i915 =
+		container_of(cache, struct i915_execbuffer, reloc_cache)->i915;
+	return &i915->ggtt;
+}
+
+static void reloc_gpu_flush(struct reloc_cache *cache)
+{
+	GEM_BUG_ON(cache->rq_size >= cache->rq->batch->obj->base.size / sizeof(u32));
+	cache->rq_cmd[cache->rq_size] = MI_BATCH_BUFFER_END;
+	i915_gem_object_unpin_map(cache->rq->batch->obj);
+	i915_gem_chipset_flush(cache->rq->i915);
+
+	__i915_add_request(cache->rq, true);
+	cache->rq = NULL;
+}
+
+static void reloc_cache_reset(struct reloc_cache *cache)
 {
 {
 	void *vaddr;
 	void *vaddr;
 
 
+	if (cache->rq)
+		reloc_gpu_flush(cache);
+
 	if (!cache->vaddr)
 	if (!cache->vaddr)
 		return;
 		return;
 
 
@@ -369,7 +980,7 @@ static void reloc_cache_fini(struct reloc_cache *cache)
 		wmb();
 		wmb();
 		io_mapping_unmap_atomic((void __iomem *)vaddr);
 		io_mapping_unmap_atomic((void __iomem *)vaddr);
 		if (cache->node.allocated) {
 		if (cache->node.allocated) {
-			struct i915_ggtt *ggtt = &cache->i915->ggtt;
+			struct i915_ggtt *ggtt = cache_to_ggtt(cache);
 
 
 			ggtt->base.clear_range(&ggtt->base,
 			ggtt->base.clear_range(&ggtt->base,
 					       cache->node.start,
 					       cache->node.start,
@@ -379,11 +990,14 @@ static void reloc_cache_fini(struct reloc_cache *cache)
 			i915_vma_unpin((struct i915_vma *)cache->node.mm);
 			i915_vma_unpin((struct i915_vma *)cache->node.mm);
 		}
 		}
 	}
 	}
+
+	cache->vaddr = 0;
+	cache->page = -1;
 }
 }
 
 
 static void *reloc_kmap(struct drm_i915_gem_object *obj,
 static void *reloc_kmap(struct drm_i915_gem_object *obj,
 			struct reloc_cache *cache,
 			struct reloc_cache *cache,
-			int page)
+			unsigned long page)
 {
 {
 	void *vaddr;
 	void *vaddr;
 
 
@@ -391,11 +1005,11 @@ static void *reloc_kmap(struct drm_i915_gem_object *obj,
 		kunmap_atomic(unmask_page(cache->vaddr));
 		kunmap_atomic(unmask_page(cache->vaddr));
 	} else {
 	} else {
 		unsigned int flushes;
 		unsigned int flushes;
-		int ret;
+		int err;
 
 
-		ret = i915_gem_obj_prepare_shmem_write(obj, &flushes);
-		if (ret)
-			return ERR_PTR(ret);
+		err = i915_gem_obj_prepare_shmem_write(obj, &flushes);
+		if (err)
+			return ERR_PTR(err);
 
 
 		BUILD_BUG_ON(KMAP & CLFLUSH_FLAGS);
 		BUILD_BUG_ON(KMAP & CLFLUSH_FLAGS);
 		BUILD_BUG_ON((KMAP | CLFLUSH_FLAGS) & PAGE_MASK);
 		BUILD_BUG_ON((KMAP | CLFLUSH_FLAGS) & PAGE_MASK);
@@ -415,9 +1029,9 @@ static void *reloc_kmap(struct drm_i915_gem_object *obj,
 
 
 static void *reloc_iomap(struct drm_i915_gem_object *obj,
 static void *reloc_iomap(struct drm_i915_gem_object *obj,
 			 struct reloc_cache *cache,
 			 struct reloc_cache *cache,
-			 int page)
+			 unsigned long page)
 {
 {
-	struct i915_ggtt *ggtt = &cache->i915->ggtt;
+	struct i915_ggtt *ggtt = cache_to_ggtt(cache);
 	unsigned long offset;
 	unsigned long offset;
 	void *vaddr;
 	void *vaddr;
 
 
@@ -425,31 +1039,31 @@ static void *reloc_iomap(struct drm_i915_gem_object *obj,
 		io_mapping_unmap_atomic((void __force __iomem *) unmask_page(cache->vaddr));
 		io_mapping_unmap_atomic((void __force __iomem *) unmask_page(cache->vaddr));
 	} else {
 	} else {
 		struct i915_vma *vma;
 		struct i915_vma *vma;
-		int ret;
+		int err;
 
 
-		if (use_cpu_reloc(obj))
+		if (use_cpu_reloc(cache, obj))
 			return NULL;
 			return NULL;
 
 
-		ret = i915_gem_object_set_to_gtt_domain(obj, true);
-		if (ret)
-			return ERR_PTR(ret);
+		err = i915_gem_object_set_to_gtt_domain(obj, true);
+		if (err)
+			return ERR_PTR(err);
 
 
 		vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
 		vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
 					       PIN_MAPPABLE | PIN_NONBLOCK);
 					       PIN_MAPPABLE | PIN_NONBLOCK);
 		if (IS_ERR(vma)) {
 		if (IS_ERR(vma)) {
 			memset(&cache->node, 0, sizeof(cache->node));
 			memset(&cache->node, 0, sizeof(cache->node));
-			ret = drm_mm_insert_node_in_range
+			err = drm_mm_insert_node_in_range
 				(&ggtt->base.mm, &cache->node,
 				(&ggtt->base.mm, &cache->node,
 				 PAGE_SIZE, 0, I915_COLOR_UNEVICTABLE,
 				 PAGE_SIZE, 0, I915_COLOR_UNEVICTABLE,
 				 0, ggtt->mappable_end,
 				 0, ggtt->mappable_end,
 				 DRM_MM_INSERT_LOW);
 				 DRM_MM_INSERT_LOW);
-			if (ret) /* no inactive aperture space, use cpu reloc */
+			if (err) /* no inactive aperture space, use cpu reloc */
 				return NULL;
 				return NULL;
 		} else {
 		} else {
-			ret = i915_vma_put_fence(vma);
-			if (ret) {
+			err = i915_vma_put_fence(vma);
+			if (err) {
 				i915_vma_unpin(vma);
 				i915_vma_unpin(vma);
-				return ERR_PTR(ret);
+				return ERR_PTR(err);
 			}
 			}
 
 
 			cache->node.start = vma->node.start;
 			cache->node.start = vma->node.start;
@@ -467,7 +1081,8 @@ static void *reloc_iomap(struct drm_i915_gem_object *obj,
 		offset += page << PAGE_SHIFT;
 		offset += page << PAGE_SHIFT;
 	}
 	}
 
 
-	vaddr = (void __force *) io_mapping_map_atomic_wc(&cache->i915->ggtt.mappable, offset);
+	vaddr = (void __force *)io_mapping_map_atomic_wc(&ggtt->mappable,
+							 offset);
 	cache->page = page;
 	cache->page = page;
 	cache->vaddr = (unsigned long)vaddr;
 	cache->vaddr = (unsigned long)vaddr;
 
 
@@ -476,7 +1091,7 @@ static void *reloc_iomap(struct drm_i915_gem_object *obj,
 
 
 static void *reloc_vaddr(struct drm_i915_gem_object *obj,
 static void *reloc_vaddr(struct drm_i915_gem_object *obj,
 			 struct reloc_cache *cache,
 			 struct reloc_cache *cache,
-			 int page)
+			 unsigned long page)
 {
 {
 	void *vaddr;
 	void *vaddr;
 
 
@@ -503,7 +1118,8 @@ static void clflush_write32(u32 *addr, u32 value, unsigned int flushes)
 
 
 		*addr = value;
 		*addr = value;
 
 
-		/* Writes to the same cacheline are serialised by the CPU
+		/*
+		 * Writes to the same cacheline are serialised by the CPU
 		 * (including clflush). On the write path, we only require
 		 * (including clflush). On the write path, we only require
 		 * that it hits memory in an orderly fashion and place
 		 * that it hits memory in an orderly fashion and place
 		 * mb barriers at the start and end of the relocation phase
 		 * mb barriers at the start and end of the relocation phase
@@ -515,25 +1131,201 @@ static void clflush_write32(u32 *addr, u32 value, unsigned int flushes)
 		*addr = value;
 		*addr = value;
 }
 }
 
 
-static int
-relocate_entry(struct drm_i915_gem_object *obj,
+static int __reloc_gpu_alloc(struct i915_execbuffer *eb,
+			     struct i915_vma *vma,
+			     unsigned int len)
+{
+	struct reloc_cache *cache = &eb->reloc_cache;
+	struct drm_i915_gem_object *obj;
+	struct drm_i915_gem_request *rq;
+	struct i915_vma *batch;
+	u32 *cmd;
+	int err;
+
+	GEM_BUG_ON(vma->obj->base.write_domain & I915_GEM_DOMAIN_CPU);
+
+	obj = i915_gem_batch_pool_get(&eb->engine->batch_pool, PAGE_SIZE);
+	if (IS_ERR(obj))
+		return PTR_ERR(obj);
+
+	cmd = i915_gem_object_pin_map(obj,
+				      cache->has_llc ? I915_MAP_WB : I915_MAP_WC);
+	i915_gem_object_unpin_pages(obj);
+	if (IS_ERR(cmd))
+		return PTR_ERR(cmd);
+
+	err = i915_gem_object_set_to_wc_domain(obj, false);
+	if (err)
+		goto err_unmap;
+
+	batch = i915_vma_instance(obj, vma->vm, NULL);
+	if (IS_ERR(batch)) {
+		err = PTR_ERR(batch);
+		goto err_unmap;
+	}
+
+	err = i915_vma_pin(batch, 0, 0, PIN_USER | PIN_NONBLOCK);
+	if (err)
+		goto err_unmap;
+
+	rq = i915_gem_request_alloc(eb->engine, eb->ctx);
+	if (IS_ERR(rq)) {
+		err = PTR_ERR(rq);
+		goto err_unpin;
+	}
+
+	err = i915_gem_request_await_object(rq, vma->obj, true);
+	if (err)
+		goto err_request;
+
+	err = eb->engine->emit_flush(rq, EMIT_INVALIDATE);
+	if (err)
+		goto err_request;
+
+	err = i915_switch_context(rq);
+	if (err)
+		goto err_request;
+
+	err = eb->engine->emit_bb_start(rq,
+					batch->node.start, PAGE_SIZE,
+					cache->gen > 5 ? 0 : I915_DISPATCH_SECURE);
+	if (err)
+		goto err_request;
+
+	GEM_BUG_ON(!reservation_object_test_signaled_rcu(batch->resv, true));
+	i915_vma_move_to_active(batch, rq, 0);
+	reservation_object_lock(batch->resv, NULL);
+	reservation_object_add_excl_fence(batch->resv, &rq->fence);
+	reservation_object_unlock(batch->resv);
+	i915_vma_unpin(batch);
+
+	i915_vma_move_to_active(vma, rq, true);
+	reservation_object_lock(vma->resv, NULL);
+	reservation_object_add_excl_fence(vma->resv, &rq->fence);
+	reservation_object_unlock(vma->resv);
+
+	rq->batch = batch;
+
+	cache->rq = rq;
+	cache->rq_cmd = cmd;
+	cache->rq_size = 0;
+
+	/* Return with batch mapping (cmd) still pinned */
+	return 0;
+
+err_request:
+	i915_add_request(rq);
+err_unpin:
+	i915_vma_unpin(batch);
+err_unmap:
+	i915_gem_object_unpin_map(obj);
+	return err;
+}
+
+static u32 *reloc_gpu(struct i915_execbuffer *eb,
+		      struct i915_vma *vma,
+		      unsigned int len)
+{
+	struct reloc_cache *cache = &eb->reloc_cache;
+	u32 *cmd;
+
+	if (cache->rq_size > PAGE_SIZE/sizeof(u32) - (len + 1))
+		reloc_gpu_flush(cache);
+
+	if (unlikely(!cache->rq)) {
+		int err;
+
+		err = __reloc_gpu_alloc(eb, vma, len);
+		if (unlikely(err))
+			return ERR_PTR(err);
+	}
+
+	cmd = cache->rq_cmd + cache->rq_size;
+	cache->rq_size += len;
+
+	return cmd;
+}
+
+static u64
+relocate_entry(struct i915_vma *vma,
 	       const struct drm_i915_gem_relocation_entry *reloc,
 	       const struct drm_i915_gem_relocation_entry *reloc,
-	       struct reloc_cache *cache,
-	       u64 target_offset)
+	       struct i915_execbuffer *eb,
+	       const struct i915_vma *target)
 {
 {
 	u64 offset = reloc->offset;
 	u64 offset = reloc->offset;
-	bool wide = cache->use_64bit_reloc;
+	u64 target_offset = relocation_target(reloc, target);
+	bool wide = eb->reloc_cache.use_64bit_reloc;
 	void *vaddr;
 	void *vaddr;
 
 
-	target_offset = relocation_target(reloc, target_offset);
+	if (!eb->reloc_cache.vaddr &&
+	    (DBG_FORCE_RELOC == FORCE_GPU_RELOC ||
+	     !reservation_object_test_signaled_rcu(vma->resv, true))) {
+		const unsigned int gen = eb->reloc_cache.gen;
+		unsigned int len;
+		u32 *batch;
+		u64 addr;
+
+		if (wide)
+			len = offset & 7 ? 8 : 5;
+		else if (gen >= 4)
+			len = 4;
+		else if (gen >= 3)
+			len = 3;
+		else /* On gen2 MI_STORE_DWORD_IMM uses a physical address */
+			goto repeat;
+
+		batch = reloc_gpu(eb, vma, len);
+		if (IS_ERR(batch))
+			goto repeat;
+
+		addr = gen8_canonical_addr(vma->node.start + offset);
+		if (wide) {
+			if (offset & 7) {
+				*batch++ = MI_STORE_DWORD_IMM_GEN4;
+				*batch++ = lower_32_bits(addr);
+				*batch++ = upper_32_bits(addr);
+				*batch++ = lower_32_bits(target_offset);
+
+				addr = gen8_canonical_addr(addr + 4);
+
+				*batch++ = MI_STORE_DWORD_IMM_GEN4;
+				*batch++ = lower_32_bits(addr);
+				*batch++ = upper_32_bits(addr);
+				*batch++ = upper_32_bits(target_offset);
+			} else {
+				*batch++ = (MI_STORE_DWORD_IMM_GEN4 | (1 << 21)) + 1;
+				*batch++ = lower_32_bits(addr);
+				*batch++ = upper_32_bits(addr);
+				*batch++ = lower_32_bits(target_offset);
+				*batch++ = upper_32_bits(target_offset);
+			}
+		} else if (gen >= 6) {
+			*batch++ = MI_STORE_DWORD_IMM_GEN4;
+			*batch++ = 0;
+			*batch++ = addr;
+			*batch++ = target_offset;
+		} else if (gen >= 4) {
+			*batch++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
+			*batch++ = 0;
+			*batch++ = addr;
+			*batch++ = target_offset;
+		} else {
+			*batch++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL;
+			*batch++ = addr;
+			*batch++ = target_offset;
+		}
+
+		goto out;
+	}
+
 repeat:
 repeat:
-	vaddr = reloc_vaddr(obj, cache, offset >> PAGE_SHIFT);
+	vaddr = reloc_vaddr(vma->obj, &eb->reloc_cache, offset >> PAGE_SHIFT);
 	if (IS_ERR(vaddr))
 	if (IS_ERR(vaddr))
 		return PTR_ERR(vaddr);
 		return PTR_ERR(vaddr);
 
 
 	clflush_write32(vaddr + offset_in_page(offset),
 	clflush_write32(vaddr + offset_in_page(offset),
 			lower_32_bits(target_offset),
 			lower_32_bits(target_offset),
-			cache->vaddr);
+			eb->reloc_cache.vaddr);
 
 
 	if (wide) {
 	if (wide) {
 		offset += sizeof(u32);
 		offset += sizeof(u32);
@@ -542,48 +1334,29 @@ repeat:
 		goto repeat;
 		goto repeat;
 	}
 	}
 
 
-	return 0;
+out:
+	return target->node.start | UPDATE;
 }
 }
 
 
-static int
-i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
-				   struct eb_vmas *eb,
-				   struct drm_i915_gem_relocation_entry *reloc,
-				   struct reloc_cache *cache)
+static u64
+eb_relocate_entry(struct i915_execbuffer *eb,
+		  struct i915_vma *vma,
+		  const struct drm_i915_gem_relocation_entry *reloc)
 {
 {
-	struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
-	struct drm_gem_object *target_obj;
-	struct drm_i915_gem_object *target_i915_obj;
-	struct i915_vma *target_vma;
-	uint64_t target_offset;
-	int ret;
+	struct i915_vma *target;
+	int err;
 
 
 	/* we've already hold a reference to all valid objects */
 	/* we've already hold a reference to all valid objects */
-	target_vma = eb_get_vma(eb, reloc->target_handle);
-	if (unlikely(target_vma == NULL))
+	target = eb_get_vma(eb, reloc->target_handle);
+	if (unlikely(!target))
 		return -ENOENT;
 		return -ENOENT;
-	target_i915_obj = target_vma->obj;
-	target_obj = &target_vma->obj->base;
-
-	target_offset = gen8_canonical_addr(target_vma->node.start);
-
-	/* Sandybridge PPGTT errata: We need a global gtt mapping for MI and
-	 * pipe_control writes because the gpu doesn't properly redirect them
-	 * through the ppgtt for non_secure batchbuffers. */
-	if (unlikely(IS_GEN6(dev_priv) &&
-	    reloc->write_domain == I915_GEM_DOMAIN_INSTRUCTION)) {
-		ret = i915_vma_bind(target_vma, target_i915_obj->cache_level,
-				    PIN_GLOBAL);
-		if (WARN_ONCE(ret, "Unexpected failure to bind target VMA!"))
-			return ret;
-	}
 
 
 	/* Validate that the target is in a valid r/w GPU domain */
 	/* Validate that the target is in a valid r/w GPU domain */
 	if (unlikely(reloc->write_domain & (reloc->write_domain - 1))) {
 	if (unlikely(reloc->write_domain & (reloc->write_domain - 1))) {
 		DRM_DEBUG("reloc with multiple write domains: "
 		DRM_DEBUG("reloc with multiple write domains: "
-			  "obj %p target %d offset %d "
+			  "target %d offset %d "
 			  "read %08x write %08x",
 			  "read %08x write %08x",
-			  obj, reloc->target_handle,
+			  reloc->target_handle,
 			  (int) reloc->offset,
 			  (int) reloc->offset,
 			  reloc->read_domains,
 			  reloc->read_domains,
 			  reloc->write_domain);
 			  reloc->write_domain);
@@ -592,75 +1365,103 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
 	if (unlikely((reloc->write_domain | reloc->read_domains)
 	if (unlikely((reloc->write_domain | reloc->read_domains)
 		     & ~I915_GEM_GPU_DOMAINS)) {
 		     & ~I915_GEM_GPU_DOMAINS)) {
 		DRM_DEBUG("reloc with read/write non-GPU domains: "
 		DRM_DEBUG("reloc with read/write non-GPU domains: "
-			  "obj %p target %d offset %d "
+			  "target %d offset %d "
 			  "read %08x write %08x",
 			  "read %08x write %08x",
-			  obj, reloc->target_handle,
+			  reloc->target_handle,
 			  (int) reloc->offset,
 			  (int) reloc->offset,
 			  reloc->read_domains,
 			  reloc->read_domains,
 			  reloc->write_domain);
 			  reloc->write_domain);
 		return -EINVAL;
 		return -EINVAL;
 	}
 	}
 
 
-	target_obj->pending_read_domains |= reloc->read_domains;
-	target_obj->pending_write_domain |= reloc->write_domain;
+	if (reloc->write_domain) {
+		target->exec_entry->flags |= EXEC_OBJECT_WRITE;
+
+		/*
+		 * Sandybridge PPGTT errata: We need a global gtt mapping
+		 * for MI and pipe_control writes because the gpu doesn't
+		 * properly redirect them through the ppgtt for non_secure
+		 * batchbuffers.
+		 */
+		if (reloc->write_domain == I915_GEM_DOMAIN_INSTRUCTION &&
+		    IS_GEN6(eb->i915)) {
+			err = i915_vma_bind(target, target->obj->cache_level,
+					    PIN_GLOBAL);
+			if (WARN_ONCE(err,
+				      "Unexpected failure to bind target VMA!"))
+				return err;
+		}
+	}
 
 
-	/* If the relocation already has the right value in it, no
+	/*
+	 * If the relocation already has the right value in it, no
 	 * more work needs to be done.
 	 * more work needs to be done.
 	 */
 	 */
-	if (target_offset == reloc->presumed_offset)
+	if (!DBG_FORCE_RELOC &&
+	    gen8_canonical_addr(target->node.start) == reloc->presumed_offset)
 		return 0;
 		return 0;
 
 
 	/* Check that the relocation address is valid... */
 	/* Check that the relocation address is valid... */
 	if (unlikely(reloc->offset >
 	if (unlikely(reloc->offset >
-		     obj->base.size - (cache->use_64bit_reloc ? 8 : 4))) {
+		     vma->size - (eb->reloc_cache.use_64bit_reloc ? 8 : 4))) {
 		DRM_DEBUG("Relocation beyond object bounds: "
 		DRM_DEBUG("Relocation beyond object bounds: "
-			  "obj %p target %d offset %d size %d.\n",
-			  obj, reloc->target_handle,
-			  (int) reloc->offset,
-			  (int) obj->base.size);
+			  "target %d offset %d size %d.\n",
+			  reloc->target_handle,
+			  (int)reloc->offset,
+			  (int)vma->size);
 		return -EINVAL;
 		return -EINVAL;
 	}
 	}
 	if (unlikely(reloc->offset & 3)) {
 	if (unlikely(reloc->offset & 3)) {
 		DRM_DEBUG("Relocation not 4-byte aligned: "
 		DRM_DEBUG("Relocation not 4-byte aligned: "
-			  "obj %p target %d offset %d.\n",
-			  obj, reloc->target_handle,
-			  (int) reloc->offset);
+			  "target %d offset %d.\n",
+			  reloc->target_handle,
+			  (int)reloc->offset);
 		return -EINVAL;
 		return -EINVAL;
 	}
 	}
 
 
-	ret = relocate_entry(obj, reloc, cache, target_offset);
-	if (ret)
-		return ret;
+	/*
+	 * If we write into the object, we need to force the synchronisation
+	 * barrier, either with an asynchronous clflush or if we executed the
+	 * patching using the GPU (though that should be serialised by the
+	 * timeline). To be completely sure, and since we are required to
+	 * do relocations we are already stalling, disable the user's opt
+	 * of our synchronisation.
+	 */
+	vma->exec_entry->flags &= ~EXEC_OBJECT_ASYNC;
 
 
 	/* and update the user's relocation entry */
 	/* and update the user's relocation entry */
-	reloc->presumed_offset = target_offset;
-	return 0;
+	return relocate_entry(vma, reloc, eb, target);
 }
 }
 
 
-static int
-i915_gem_execbuffer_relocate_vma(struct i915_vma *vma,
-				 struct eb_vmas *eb)
+static int eb_relocate_vma(struct i915_execbuffer *eb, struct i915_vma *vma)
 {
 {
 #define N_RELOC(x) ((x) / sizeof(struct drm_i915_gem_relocation_entry))
 #define N_RELOC(x) ((x) / sizeof(struct drm_i915_gem_relocation_entry))
-	struct drm_i915_gem_relocation_entry stack_reloc[N_RELOC(512)];
-	struct drm_i915_gem_relocation_entry __user *user_relocs;
-	struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
-	struct reloc_cache cache;
-	int remain, ret = 0;
-
-	user_relocs = u64_to_user_ptr(entry->relocs_ptr);
-	reloc_cache_init(&cache, eb->i915);
+	struct drm_i915_gem_relocation_entry stack[N_RELOC(512)];
+	struct drm_i915_gem_relocation_entry __user *urelocs;
+	const struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
+	unsigned int remain;
 
 
+	urelocs = u64_to_user_ptr(entry->relocs_ptr);
 	remain = entry->relocation_count;
 	remain = entry->relocation_count;
-	while (remain) {
-		struct drm_i915_gem_relocation_entry *r = stack_reloc;
-		unsigned long unwritten;
-		unsigned int count;
+	if (unlikely(remain > N_RELOC(ULONG_MAX)))
+		return -EINVAL;
 
 
-		count = min_t(unsigned int, remain, ARRAY_SIZE(stack_reloc));
-		remain -= count;
+	/*
+	 * We must check that the entire relocation array is safe
+	 * to read. However, if the array is not writable the user loses
+	 * the updated relocation values.
+	 */
+	if (unlikely(!access_ok(VERIFY_READ, urelocs, remain*sizeof(urelocs))))
+		return -EFAULT;
+
+	do {
+		struct drm_i915_gem_relocation_entry *r = stack;
+		unsigned int count =
+			min_t(unsigned int, remain, ARRAY_SIZE(stack));
+		unsigned int copied;
 
 
-		/* This is the fast path and we cannot handle a pagefault
+		/*
+		 * This is the fast path and we cannot handle a pagefault
 		 * whilst holding the struct mutex lest the user pass in the
 		 * whilst holding the struct mutex lest the user pass in the
 		 * relocations contained within a mmaped bo. For in such a case
 		 * relocations contained within a mmaped bo. For in such a case
 		 * we, the page fault handler would call i915_gem_fault() and
 		 * we, the page fault handler would call i915_gem_fault() and
@@ -668,489 +1469,408 @@ i915_gem_execbuffer_relocate_vma(struct i915_vma *vma,
 		 * this is bad and so lockdep complains vehemently.
 		 * this is bad and so lockdep complains vehemently.
 		 */
 		 */
 		pagefault_disable();
 		pagefault_disable();
-		unwritten = __copy_from_user_inatomic(r, user_relocs, count*sizeof(r[0]));
+		copied = __copy_from_user_inatomic(r, urelocs, count * sizeof(r[0]));
 		pagefault_enable();
 		pagefault_enable();
-		if (unlikely(unwritten)) {
-			ret = -EFAULT;
+		if (unlikely(copied)) {
+			remain = -EFAULT;
 			goto out;
 			goto out;
 		}
 		}
 
 
+		remain -= count;
 		do {
 		do {
-			u64 offset = r->presumed_offset;
+			u64 offset = eb_relocate_entry(eb, vma, r);
 
 
-			ret = i915_gem_execbuffer_relocate_entry(vma->obj, eb, r, &cache);
-			if (ret)
+			if (likely(offset == 0)) {
+			} else if ((s64)offset < 0) {
+				remain = (int)offset;
 				goto out;
 				goto out;
-
-			if (r->presumed_offset != offset) {
-				pagefault_disable();
-				unwritten = __put_user(r->presumed_offset,
-						       &user_relocs->presumed_offset);
-				pagefault_enable();
-				if (unlikely(unwritten)) {
-					/* Note that reporting an error now
-					 * leaves everything in an inconsistent
-					 * state as we have *already* changed
-					 * the relocation value inside the
-					 * object. As we have not changed the
-					 * reloc.presumed_offset or will not
-					 * change the execobject.offset, on the
-					 * call we may not rewrite the value
-					 * inside the object, leaving it
-					 * dangling and causing a GPU hang.
-					 */
-					ret = -EFAULT;
-					goto out;
-				}
+			} else {
+				/*
+				 * Note that reporting an error now
+				 * leaves everything in an inconsistent
+				 * state as we have *already* changed
+				 * the relocation value inside the
+				 * object. As we have not changed the
+				 * reloc.presumed_offset or will not
+				 * change the execobject.offset, on the
+				 * call we may not rewrite the value
+				 * inside the object, leaving it
+				 * dangling and causing a GPU hang. Unless
+				 * userspace dynamically rebuilds the
+				 * relocations on each execbuf rather than
+				 * presume a static tree.
+				 *
+				 * We did previously check if the relocations
+				 * were writable (access_ok), an error now
+				 * would be a strange race with mprotect,
+				 * having already demonstrated that we
+				 * can read from this userspace address.
+				 */
+				offset = gen8_canonical_addr(offset & ~UPDATE);
+				__put_user(offset,
+					   &urelocs[r-stack].presumed_offset);
 			}
 			}
-
-			user_relocs++;
-			r++;
-		} while (--count);
-	}
-
+		} while (r++, --count);
+		urelocs += ARRAY_SIZE(stack);
+	} while (remain);
 out:
 out:
-	reloc_cache_fini(&cache);
-	return ret;
-#undef N_RELOC
+	reloc_cache_reset(&eb->reloc_cache);
+	return remain;
 }
 }
 
 
 static int
 static int
-i915_gem_execbuffer_relocate_vma_slow(struct i915_vma *vma,
-				      struct eb_vmas *eb,
-				      struct drm_i915_gem_relocation_entry *relocs)
+eb_relocate_vma_slow(struct i915_execbuffer *eb, struct i915_vma *vma)
 {
 {
 	const struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
 	const struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
-	struct reloc_cache cache;
-	int i, ret = 0;
+	struct drm_i915_gem_relocation_entry *relocs =
+		u64_to_ptr(typeof(*relocs), entry->relocs_ptr);
+	unsigned int i;
+	int err;
 
 
-	reloc_cache_init(&cache, eb->i915);
 	for (i = 0; i < entry->relocation_count; i++) {
 	for (i = 0; i < entry->relocation_count; i++) {
-		ret = i915_gem_execbuffer_relocate_entry(vma->obj, eb, &relocs[i], &cache);
-		if (ret)
-			break;
-	}
-	reloc_cache_fini(&cache);
+		u64 offset = eb_relocate_entry(eb, vma, &relocs[i]);
 
 
-	return ret;
+		if ((s64)offset < 0) {
+			err = (int)offset;
+			goto err;
+		}
+	}
+	err = 0;
+err:
+	reloc_cache_reset(&eb->reloc_cache);
+	return err;
 }
 }
 
 
-static int
-i915_gem_execbuffer_relocate(struct eb_vmas *eb)
+static int check_relocations(const struct drm_i915_gem_exec_object2 *entry)
 {
 {
-	struct i915_vma *vma;
-	int ret = 0;
+	const char __user *addr, *end;
+	unsigned long size;
+	char __maybe_unused c;
 
 
-	list_for_each_entry(vma, &eb->vmas, exec_list) {
-		ret = i915_gem_execbuffer_relocate_vma(vma, eb);
-		if (ret)
-			break;
-	}
+	size = entry->relocation_count;
+	if (size == 0)
+		return 0;
 
 
-	return ret;
-}
+	if (size > N_RELOC(ULONG_MAX))
+		return -EINVAL;
 
 
-static bool only_mappable_for_reloc(unsigned int flags)
-{
-	return (flags & (EXEC_OBJECT_NEEDS_FENCE | __EXEC_OBJECT_NEEDS_MAP)) ==
-		__EXEC_OBJECT_NEEDS_MAP;
+	addr = u64_to_user_ptr(entry->relocs_ptr);
+	size *= sizeof(struct drm_i915_gem_relocation_entry);
+	if (!access_ok(VERIFY_READ, addr, size))
+		return -EFAULT;
+
+	end = addr + size;
+	for (; addr < end; addr += PAGE_SIZE) {
+		int err = __get_user(c, addr);
+		if (err)
+			return err;
+	}
+	return __get_user(c, end - 1);
 }
 }
 
 
-static int
-i915_gem_execbuffer_reserve_vma(struct i915_vma *vma,
-				struct intel_engine_cs *engine,
-				bool *need_reloc)
+static int eb_copy_relocations(const struct i915_execbuffer *eb)
 {
 {
-	struct drm_i915_gem_object *obj = vma->obj;
-	struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
-	uint64_t flags;
-	int ret;
+	const unsigned int count = eb->buffer_count;
+	unsigned int i;
+	int err;
 
 
-	flags = PIN_USER;
-	if (entry->flags & EXEC_OBJECT_NEEDS_GTT)
-		flags |= PIN_GLOBAL;
+	for (i = 0; i < count; i++) {
+		const unsigned int nreloc = eb->exec[i].relocation_count;
+		struct drm_i915_gem_relocation_entry __user *urelocs;
+		struct drm_i915_gem_relocation_entry *relocs;
+		unsigned long size;
+		unsigned long copied;
 
 
-	if (!drm_mm_node_allocated(&vma->node)) {
-		/* Wa32bitGeneralStateOffset & Wa32bitInstructionBaseOffset,
-		 * limit address to the first 4GBs for unflagged objects.
-		 */
-		if ((entry->flags & EXEC_OBJECT_SUPPORTS_48B_ADDRESS) == 0)
-			flags |= PIN_ZONE_4G;
-		if (entry->flags & __EXEC_OBJECT_NEEDS_MAP)
-			flags |= PIN_GLOBAL | PIN_MAPPABLE;
-		if (entry->flags & __EXEC_OBJECT_NEEDS_BIAS)
-			flags |= BATCH_OFFSET_BIAS | PIN_OFFSET_BIAS;
-		if (entry->flags & EXEC_OBJECT_PINNED)
-			flags |= entry->offset | PIN_OFFSET_FIXED;
-		if ((flags & PIN_MAPPABLE) == 0)
-			flags |= PIN_HIGH;
-	}
-
-	ret = i915_vma_pin(vma,
-			   entry->pad_to_size,
-			   entry->alignment,
-			   flags);
-	if ((ret == -ENOSPC || ret == -E2BIG) &&
-	    only_mappable_for_reloc(entry->flags))
-		ret = i915_vma_pin(vma,
-				   entry->pad_to_size,
-				   entry->alignment,
-				   flags & ~PIN_MAPPABLE);
-	if (ret)
-		return ret;
+		if (nreloc == 0)
+			continue;
 
 
-	entry->flags |= __EXEC_OBJECT_HAS_PIN;
+		err = check_relocations(&eb->exec[i]);
+		if (err)
+			goto err;
 
 
-	if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) {
-		ret = i915_vma_get_fence(vma);
-		if (ret)
-			return ret;
+		urelocs = u64_to_user_ptr(eb->exec[i].relocs_ptr);
+		size = nreloc * sizeof(*relocs);
 
 
-		if (i915_vma_pin_fence(vma))
-			entry->flags |= __EXEC_OBJECT_HAS_FENCE;
-	}
+		relocs = kvmalloc_array(size, 1, GFP_TEMPORARY);
+		if (!relocs) {
+			kvfree(relocs);
+			err = -ENOMEM;
+			goto err;
+		}
 
 
-	if (entry->offset != vma->node.start) {
-		entry->offset = vma->node.start;
-		*need_reloc = true;
-	}
+		/* copy_from_user is limited to < 4GiB */
+		copied = 0;
+		do {
+			unsigned int len =
+				min_t(u64, BIT_ULL(31), size - copied);
+
+			if (__copy_from_user((char *)relocs + copied,
+					     (char *)urelocs + copied,
+					     len)) {
+				kvfree(relocs);
+				err = -EFAULT;
+				goto err;
+			}
+
+			copied += len;
+		} while (copied < size);
+
+		/*
+		 * As we do not update the known relocation offsets after
+		 * relocating (due to the complexities in lock handling),
+		 * we need to mark them as invalid now so that we force the
+		 * relocation processing next time. Just in case the target
+		 * object is evicted and then rebound into its old
+		 * presumed_offset before the next execbuffer - if that
+		 * happened we would make the mistake of assuming that the
+		 * relocations were valid.
+		 */
+		user_access_begin();
+		for (copied = 0; copied < nreloc; copied++)
+			unsafe_put_user(-1,
+					&urelocs[copied].presumed_offset,
+					end_user);
+end_user:
+		user_access_end();
 
 
-	if (entry->flags & EXEC_OBJECT_WRITE) {
-		obj->base.pending_read_domains = I915_GEM_DOMAIN_RENDER;
-		obj->base.pending_write_domain = I915_GEM_DOMAIN_RENDER;
+		eb->exec[i].relocs_ptr = (uintptr_t)relocs;
 	}
 	}
 
 
 	return 0;
 	return 0;
+
+err:
+	while (i--) {
+		struct drm_i915_gem_relocation_entry *relocs =
+			u64_to_ptr(typeof(*relocs), eb->exec[i].relocs_ptr);
+		if (eb->exec[i].relocation_count)
+			kvfree(relocs);
+	}
+	return err;
 }
 }
 
 
-static bool
-need_reloc_mappable(struct i915_vma *vma)
+static int eb_prefault_relocations(const struct i915_execbuffer *eb)
 {
 {
-	struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
-
-	if (entry->relocation_count == 0)
-		return false;
+	const unsigned int count = eb->buffer_count;
+	unsigned int i;
 
 
-	if (!i915_vma_is_ggtt(vma))
-		return false;
+	if (unlikely(i915.prefault_disable))
+		return 0;
 
 
-	/* See also use_cpu_reloc() */
-	if (HAS_LLC(to_i915(vma->obj->base.dev)))
-		return false;
+	for (i = 0; i < count; i++) {
+		int err;
 
 
-	if (vma->obj->base.write_domain == I915_GEM_DOMAIN_CPU)
-		return false;
+		err = check_relocations(&eb->exec[i]);
+		if (err)
+			return err;
+	}
 
 
-	return true;
+	return 0;
 }
 }
 
 
-static bool
-eb_vma_misplaced(struct i915_vma *vma)
+static noinline int eb_relocate_slow(struct i915_execbuffer *eb)
 {
 {
-	struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
-
-	WARN_ON(entry->flags & __EXEC_OBJECT_NEEDS_MAP &&
-		!i915_vma_is_ggtt(vma));
-
-	if (entry->alignment && !IS_ALIGNED(vma->node.start, entry->alignment))
-		return true;
-
-	if (vma->node.size < entry->pad_to_size)
-		return true;
-
-	if (entry->flags & EXEC_OBJECT_PINNED &&
-	    vma->node.start != entry->offset)
-		return true;
-
-	if (entry->flags & __EXEC_OBJECT_NEEDS_BIAS &&
-	    vma->node.start < BATCH_OFFSET_BIAS)
-		return true;
-
-	/* avoid costly ping-pong once a batch bo ended up non-mappable */
-	if (entry->flags & __EXEC_OBJECT_NEEDS_MAP &&
-	    !i915_vma_is_map_and_fenceable(vma))
-		return !only_mappable_for_reloc(entry->flags);
+	struct drm_device *dev = &eb->i915->drm;
+	bool have_copy = false;
+	struct i915_vma *vma;
+	int err = 0;
 
 
-	if ((entry->flags & EXEC_OBJECT_SUPPORTS_48B_ADDRESS) == 0 &&
-	    (vma->node.start + vma->node.size - 1) >> 32)
-		return true;
+repeat:
+	if (signal_pending(current)) {
+		err = -ERESTARTSYS;
+		goto out;
+	}
 
 
-	return false;
-}
+	/* We may process another execbuffer during the unlock... */
+	eb_reset_vmas(eb);
+	mutex_unlock(&dev->struct_mutex);
 
 
-static int
-i915_gem_execbuffer_reserve(struct intel_engine_cs *engine,
-			    struct list_head *vmas,
-			    struct i915_gem_context *ctx,
-			    bool *need_relocs)
-{
-	struct drm_i915_gem_object *obj;
-	struct i915_vma *vma;
-	struct i915_address_space *vm;
-	struct list_head ordered_vmas;
-	struct list_head pinned_vmas;
-	bool has_fenced_gpu_access = INTEL_GEN(engine->i915) < 4;
-	bool needs_unfenced_map = INTEL_INFO(engine->i915)->unfenced_needs_alignment;
-	int retry;
-
-	vm = list_first_entry(vmas, struct i915_vma, exec_list)->vm;
-
-	INIT_LIST_HEAD(&ordered_vmas);
-	INIT_LIST_HEAD(&pinned_vmas);
-	while (!list_empty(vmas)) {
-		struct drm_i915_gem_exec_object2 *entry;
-		bool need_fence, need_mappable;
-
-		vma = list_first_entry(vmas, struct i915_vma, exec_list);
-		obj = vma->obj;
-		entry = vma->exec_entry;
-
-		if (ctx->flags & CONTEXT_NO_ZEROMAP)
-			entry->flags |= __EXEC_OBJECT_NEEDS_BIAS;
-
-		if (!has_fenced_gpu_access)
-			entry->flags &= ~EXEC_OBJECT_NEEDS_FENCE;
-		need_fence =
-			(entry->flags & EXEC_OBJECT_NEEDS_FENCE ||
-			 needs_unfenced_map) &&
-			i915_gem_object_is_tiled(obj);
-		need_mappable = need_fence || need_reloc_mappable(vma);
-
-		if (entry->flags & EXEC_OBJECT_PINNED)
-			list_move_tail(&vma->exec_list, &pinned_vmas);
-		else if (need_mappable) {
-			entry->flags |= __EXEC_OBJECT_NEEDS_MAP;
-			list_move(&vma->exec_list, &ordered_vmas);
-		} else
-			list_move_tail(&vma->exec_list, &ordered_vmas);
-
-		obj->base.pending_read_domains = I915_GEM_GPU_DOMAINS & ~I915_GEM_DOMAIN_COMMAND;
-		obj->base.pending_write_domain = 0;
-	}
-	list_splice(&ordered_vmas, vmas);
-	list_splice(&pinned_vmas, vmas);
-
-	/* Attempt to pin all of the buffers into the GTT.
-	 * This is done in 3 phases:
+	/*
+	 * We take 3 passes through the slowpatch.
 	 *
 	 *
-	 * 1a. Unbind all objects that do not match the GTT constraints for
-	 *     the execbuffer (fenceable, mappable, alignment etc).
-	 * 1b. Increment pin count for already bound objects.
-	 * 2.  Bind new objects.
-	 * 3.  Decrement pin count.
+	 * 1 - we try to just prefault all the user relocation entries and
+	 * then attempt to reuse the atomic pagefault disabled fast path again.
 	 *
 	 *
-	 * This avoid unnecessary unbinding of later objects in order to make
-	 * room for the earlier objects *unless* we need to defragment.
+	 * 2 - we copy the user entries to a local buffer here outside of the
+	 * local and allow ourselves to wait upon any rendering before
+	 * relocations
+	 *
+	 * 3 - we already have a local copy of the relocation entries, but
+	 * were interrupted (EAGAIN) whilst waiting for the objects, try again.
 	 */
 	 */
-	retry = 0;
-	do {
-		int ret = 0;
-
-		/* Unbind any ill-fitting objects or pin. */
-		list_for_each_entry(vma, vmas, exec_list) {
-			if (!drm_mm_node_allocated(&vma->node))
-				continue;
-
-			if (eb_vma_misplaced(vma))
-				ret = i915_vma_unbind(vma);
-			else
-				ret = i915_gem_execbuffer_reserve_vma(vma,
-								      engine,
-								      need_relocs);
-			if (ret)
-				goto err;
-		}
-
-		/* Bind fresh objects */
-		list_for_each_entry(vma, vmas, exec_list) {
-			if (drm_mm_node_allocated(&vma->node))
-				continue;
-
-			ret = i915_gem_execbuffer_reserve_vma(vma, engine,
-							      need_relocs);
-			if (ret)
-				goto err;
-		}
-
-err:
-		if (ret != -ENOSPC || retry++)
-			return ret;
-
-		/* Decrement pin count for bound objects */
-		list_for_each_entry(vma, vmas, exec_list)
-			i915_gem_execbuffer_unreserve_vma(vma);
+	if (!err) {
+		err = eb_prefault_relocations(eb);
+	} else if (!have_copy) {
+		err = eb_copy_relocations(eb);
+		have_copy = err == 0;
+	} else {
+		cond_resched();
+		err = 0;
+	}
+	if (err) {
+		mutex_lock(&dev->struct_mutex);
+		goto out;
+	}
 
 
-		ret = i915_gem_evict_vm(vm, true);
-		if (ret)
-			return ret;
-	} while (1);
-}
+	/* A frequent cause for EAGAIN are currently unavailable client pages */
+	flush_workqueue(eb->i915->mm.userptr_wq);
 
 
-static int
-i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
-				  struct drm_i915_gem_execbuffer2 *args,
-				  struct drm_file *file,
-				  struct intel_engine_cs *engine,
-				  struct eb_vmas *eb,
-				  struct drm_i915_gem_exec_object2 *exec,
-				  struct i915_gem_context *ctx)
-{
-	struct drm_i915_gem_relocation_entry *reloc;
-	struct i915_address_space *vm;
-	struct i915_vma *vma;
-	bool need_relocs;
-	int *reloc_offset;
-	int i, total, ret;
-	unsigned count = args->buffer_count;
+	err = i915_mutex_lock_interruptible(dev);
+	if (err) {
+		mutex_lock(&dev->struct_mutex);
+		goto out;
+	}
 
 
-	vm = list_first_entry(&eb->vmas, struct i915_vma, exec_list)->vm;
+	/* reacquire the objects */
+	err = eb_lookup_vmas(eb);
+	if (err)
+		goto err;
 
 
-	/* We may process another execbuffer during the unlock... */
-	while (!list_empty(&eb->vmas)) {
-		vma = list_first_entry(&eb->vmas, struct i915_vma, exec_list);
-		list_del_init(&vma->exec_list);
-		i915_gem_execbuffer_unreserve_vma(vma);
-		i915_vma_put(vma);
+	list_for_each_entry(vma, &eb->relocs, reloc_link) {
+		if (!have_copy) {
+			pagefault_disable();
+			err = eb_relocate_vma(eb, vma);
+			pagefault_enable();
+			if (err)
+				goto repeat;
+		} else {
+			err = eb_relocate_vma_slow(eb, vma);
+			if (err)
+				goto err;
+		}
 	}
 	}
 
 
-	mutex_unlock(&dev->struct_mutex);
+	/*
+	 * Leave the user relocations as are, this is the painfully slow path,
+	 * and we want to avoid the complication of dropping the lock whilst
+	 * having buffers reserved in the aperture and so causing spurious
+	 * ENOSPC for random operations.
+	 */
 
 
-	total = 0;
-	for (i = 0; i < count; i++)
-		total += exec[i].relocation_count;
+err:
+	if (err == -EAGAIN)
+		goto repeat;
 
 
-	reloc_offset = kvmalloc_array(count, sizeof(*reloc_offset), GFP_KERNEL);
-	reloc = kvmalloc_array(total, sizeof(*reloc), GFP_KERNEL);
-	if (reloc == NULL || reloc_offset == NULL) {
-		kvfree(reloc);
-		kvfree(reloc_offset);
-		mutex_lock(&dev->struct_mutex);
-		return -ENOMEM;
-	}
+out:
+	if (have_copy) {
+		const unsigned int count = eb->buffer_count;
+		unsigned int i;
 
 
-	total = 0;
-	for (i = 0; i < count; i++) {
-		struct drm_i915_gem_relocation_entry __user *user_relocs;
-		u64 invalid_offset = (u64)-1;
-		int j;
+		for (i = 0; i < count; i++) {
+			const struct drm_i915_gem_exec_object2 *entry =
+				&eb->exec[i];
+			struct drm_i915_gem_relocation_entry *relocs;
 
 
-		user_relocs = u64_to_user_ptr(exec[i].relocs_ptr);
+			if (!entry->relocation_count)
+				continue;
 
 
-		if (copy_from_user(reloc+total, user_relocs,
-				   exec[i].relocation_count * sizeof(*reloc))) {
-			ret = -EFAULT;
-			mutex_lock(&dev->struct_mutex);
-			goto err;
+			relocs = u64_to_ptr(typeof(*relocs), entry->relocs_ptr);
+			kvfree(relocs);
 		}
 		}
+	}
 
 
-		/* As we do not update the known relocation offsets after
-		 * relocating (due to the complexities in lock handling),
-		 * we need to mark them as invalid now so that we force the
-		 * relocation processing next time. Just in case the target
-		 * object is evicted and then rebound into its old
-		 * presumed_offset before the next execbuffer - if that
-		 * happened we would make the mistake of assuming that the
-		 * relocations were valid.
-		 */
-		for (j = 0; j < exec[i].relocation_count; j++) {
-			if (__copy_to_user(&user_relocs[j].presumed_offset,
-					   &invalid_offset,
-					   sizeof(invalid_offset))) {
-				ret = -EFAULT;
-				mutex_lock(&dev->struct_mutex);
-				goto err;
-			}
-		}
+	return err ?: have_copy;
+}
 
 
-		reloc_offset[i] = total;
-		total += exec[i].relocation_count;
-	}
+static int eb_relocate(struct i915_execbuffer *eb)
+{
+	if (eb_lookup_vmas(eb))
+		goto slow;
 
 
-	ret = i915_mutex_lock_interruptible(dev);
-	if (ret) {
-		mutex_lock(&dev->struct_mutex);
-		goto err;
+	/* The objects are in their final locations, apply the relocations. */
+	if (eb->args->flags & __EXEC_HAS_RELOC) {
+		struct i915_vma *vma;
+
+		list_for_each_entry(vma, &eb->relocs, reloc_link) {
+			if (eb_relocate_vma(eb, vma))
+				goto slow;
+		}
 	}
 	}
 
 
-	/* reacquire the objects */
-	eb_reset(eb);
-	ret = eb_lookup_vmas(eb, exec, args, vm, file);
-	if (ret)
-		goto err;
+	return 0;
 
 
-	need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0;
-	ret = i915_gem_execbuffer_reserve(engine, &eb->vmas, ctx,
-					  &need_relocs);
-	if (ret)
-		goto err;
+slow:
+	return eb_relocate_slow(eb);
+}
 
 
-	list_for_each_entry(vma, &eb->vmas, exec_list) {
-		int offset = vma->exec_entry - exec;
-		ret = i915_gem_execbuffer_relocate_vma_slow(vma, eb,
-							    reloc + reloc_offset[offset]);
-		if (ret)
-			goto err;
-	}
+static void eb_export_fence(struct i915_vma *vma,
+			    struct drm_i915_gem_request *req,
+			    unsigned int flags)
+{
+	struct reservation_object *resv = vma->resv;
 
 
-	/* Leave the user relocations as are, this is the painfully slow path,
-	 * and we want to avoid the complication of dropping the lock whilst
-	 * having buffers reserved in the aperture and so causing spurious
-	 * ENOSPC for random operations.
+	/*
+	 * Ignore errors from failing to allocate the new fence, we can't
+	 * handle an error right now. Worst case should be missed
+	 * synchronisation leading to rendering corruption.
 	 */
 	 */
-
-err:
-	kvfree(reloc);
-	kvfree(reloc_offset);
-	return ret;
+	reservation_object_lock(resv, NULL);
+	if (flags & EXEC_OBJECT_WRITE)
+		reservation_object_add_excl_fence(resv, &req->fence);
+	else if (reservation_object_reserve_shared(resv) == 0)
+		reservation_object_add_shared_fence(resv, &req->fence);
+	reservation_object_unlock(resv);
 }
 }
 
 
-static int
-i915_gem_execbuffer_move_to_gpu(struct drm_i915_gem_request *req,
-				struct list_head *vmas)
+static int eb_move_to_gpu(struct i915_execbuffer *eb)
 {
 {
-	struct i915_vma *vma;
-	int ret;
+	const unsigned int count = eb->buffer_count;
+	unsigned int i;
+	int err;
 
 
-	list_for_each_entry(vma, vmas, exec_list) {
+	for (i = 0; i < count; i++) {
+		const struct drm_i915_gem_exec_object2 *entry = &eb->exec[i];
+		struct i915_vma *vma = exec_to_vma(entry);
 		struct drm_i915_gem_object *obj = vma->obj;
 		struct drm_i915_gem_object *obj = vma->obj;
 
 
-		if (vma->exec_entry->flags & EXEC_OBJECT_CAPTURE) {
+		if (entry->flags & EXEC_OBJECT_CAPTURE) {
 			struct i915_gem_capture_list *capture;
 			struct i915_gem_capture_list *capture;
 
 
 			capture = kmalloc(sizeof(*capture), GFP_KERNEL);
 			capture = kmalloc(sizeof(*capture), GFP_KERNEL);
 			if (unlikely(!capture))
 			if (unlikely(!capture))
 				return -ENOMEM;
 				return -ENOMEM;
 
 
-			capture->next = req->capture_list;
+			capture->next = eb->request->capture_list;
 			capture->vma = vma;
 			capture->vma = vma;
-			req->capture_list = capture;
+			eb->request->capture_list = capture;
 		}
 		}
 
 
-		if (vma->exec_entry->flags & EXEC_OBJECT_ASYNC)
-			continue;
+		if (entry->flags & EXEC_OBJECT_ASYNC)
+			goto skip_flushes;
 
 
-		if (obj->base.write_domain & I915_GEM_DOMAIN_CPU) {
+		if (unlikely(obj->cache_dirty && !obj->cache_coherent))
 			i915_gem_clflush_object(obj, 0);
 			i915_gem_clflush_object(obj, 0);
-			obj->base.write_domain = 0;
-		}
 
 
-		ret = i915_gem_request_await_object
-			(req, obj, obj->base.pending_write_domain);
-		if (ret)
-			return ret;
+		err = i915_gem_request_await_object
+			(eb->request, obj, entry->flags & EXEC_OBJECT_WRITE);
+		if (err)
+			return err;
+
+skip_flushes:
+		i915_vma_move_to_active(vma, eb->request, entry->flags);
+		__eb_unreserve_vma(vma, entry);
+		vma->exec_entry = NULL;
+	}
+
+	for (i = 0; i < count; i++) {
+		const struct drm_i915_gem_exec_object2 *entry = &eb->exec[i];
+		struct i915_vma *vma = exec_to_vma(entry);
+
+		eb_export_fence(vma, eb->request, entry->flags);
+		if (unlikely(entry->flags & __EXEC_OBJECT_HAS_REF))
+			i915_vma_put(vma);
 	}
 	}
+	eb->exec = NULL;
 
 
 	/* Unconditionally flush any chipset caches (for streaming writes). */
 	/* Unconditionally flush any chipset caches (for streaming writes). */
-	i915_gem_chipset_flush(req->engine->i915);
+	i915_gem_chipset_flush(eb->i915);
 
 
 	/* Unconditionally invalidate GPU caches and TLBs. */
 	/* Unconditionally invalidate GPU caches and TLBs. */
-	return req->engine->emit_flush(req, EMIT_INVALIDATE);
+	return eb->engine->emit_flush(eb->request, EMIT_INVALIDATE);
 }
 }
 
 
-static bool
-i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
+static bool i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
 {
 {
-	if (exec->flags & __I915_EXEC_UNKNOWN_FLAGS)
+	if (exec->flags & __I915_EXEC_ILLEGAL_FLAGS)
 		return false;
 		return false;
 
 
 	/* Kernel clipping was a DRI1 misfeature */
 	/* Kernel clipping was a DRI1 misfeature */
@@ -1170,107 +1890,6 @@ i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
 	return true;
 	return true;
 }
 }
 
 
-static int
-validate_exec_list(struct drm_device *dev,
-		   struct drm_i915_gem_exec_object2 *exec,
-		   int count)
-{
-	unsigned relocs_total = 0;
-	unsigned relocs_max = UINT_MAX / sizeof(struct drm_i915_gem_relocation_entry);
-	unsigned invalid_flags;
-	int i;
-
-	/* INTERNAL flags must not overlap with external ones */
-	BUILD_BUG_ON(__EXEC_OBJECT_INTERNAL_FLAGS & ~__EXEC_OBJECT_UNKNOWN_FLAGS);
-
-	invalid_flags = __EXEC_OBJECT_UNKNOWN_FLAGS;
-	if (USES_FULL_PPGTT(dev))
-		invalid_flags |= EXEC_OBJECT_NEEDS_GTT;
-
-	for (i = 0; i < count; i++) {
-		char __user *ptr = u64_to_user_ptr(exec[i].relocs_ptr);
-		int length; /* limited by fault_in_pages_readable() */
-
-		if (exec[i].flags & invalid_flags)
-			return -EINVAL;
-
-		/* Offset can be used as input (EXEC_OBJECT_PINNED), reject
-		 * any non-page-aligned or non-canonical addresses.
-		 */
-		if (exec[i].flags & EXEC_OBJECT_PINNED) {
-			if (exec[i].offset !=
-			    gen8_canonical_addr(exec[i].offset & PAGE_MASK))
-				return -EINVAL;
-		}
-
-		/* From drm_mm perspective address space is continuous,
-		 * so from this point we're always using non-canonical
-		 * form internally.
-		 */
-		exec[i].offset = gen8_noncanonical_addr(exec[i].offset);
-
-		if (exec[i].alignment && !is_power_of_2(exec[i].alignment))
-			return -EINVAL;
-
-		/* pad_to_size was once a reserved field, so sanitize it */
-		if (exec[i].flags & EXEC_OBJECT_PAD_TO_SIZE) {
-			if (offset_in_page(exec[i].pad_to_size))
-				return -EINVAL;
-		} else {
-			exec[i].pad_to_size = 0;
-		}
-
-		/* First check for malicious input causing overflow in
-		 * the worst case where we need to allocate the entire
-		 * relocation tree as a single array.
-		 */
-		if (exec[i].relocation_count > relocs_max - relocs_total)
-			return -EINVAL;
-		relocs_total += exec[i].relocation_count;
-
-		length = exec[i].relocation_count *
-			sizeof(struct drm_i915_gem_relocation_entry);
-		/*
-		 * We must check that the entire relocation array is safe
-		 * to read, but since we may need to update the presumed
-		 * offsets during execution, check for full write access.
-		 */
-		if (!access_ok(VERIFY_WRITE, ptr, length))
-			return -EFAULT;
-
-		if (likely(!i915.prefault_disable)) {
-			if (fault_in_pages_readable(ptr, length))
-				return -EFAULT;
-		}
-	}
-
-	return 0;
-}
-
-static struct i915_gem_context *
-i915_gem_validate_context(struct drm_device *dev, struct drm_file *file,
-			  struct intel_engine_cs *engine, const u32 ctx_id)
-{
-	struct i915_gem_context *ctx;
-
-	ctx = i915_gem_context_lookup(file->driver_priv, ctx_id);
-	if (IS_ERR(ctx))
-		return ctx;
-
-	if (i915_gem_context_is_banned(ctx)) {
-		DRM_DEBUG("Context %u tried to submit while banned\n", ctx_id);
-		return ERR_PTR(-EIO);
-	}
-
-	return ctx;
-}
-
-static bool gpu_write_needs_clflush(struct drm_i915_gem_object *obj)
-{
-	return !(obj->cache_level == I915_CACHE_NONE ||
-		 obj->cache_level == I915_CACHE_WT);
-}
-
 void i915_vma_move_to_active(struct i915_vma *vma,
 void i915_vma_move_to_active(struct i915_vma *vma,
 			     struct drm_i915_gem_request *req,
 			     struct drm_i915_gem_request *req,
 			     unsigned int flags)
 			     unsigned int flags)
@@ -1281,7 +1900,8 @@ void i915_vma_move_to_active(struct i915_vma *vma,
 	lockdep_assert_held(&req->i915->drm.struct_mutex);
 	lockdep_assert_held(&req->i915->drm.struct_mutex);
 	GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
 	GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
 
 
-	/* Add a reference if we're newly entering the active list.
+	/*
+	 * Add a reference if we're newly entering the active list.
 	 * The order in which we add operations to the retirement queue is
 	 * The order in which we add operations to the retirement queue is
 	 * vital here: mark_active adds to the start of the callback list,
 	 * vital here: mark_active adds to the start of the callback list,
 	 * such that subsequent callbacks are called first. Therefore we
 	 * such that subsequent callbacks are called first. Therefore we
@@ -1294,61 +1914,22 @@ void i915_vma_move_to_active(struct i915_vma *vma,
 	i915_gem_active_set(&vma->last_read[idx], req);
 	i915_gem_active_set(&vma->last_read[idx], req);
 	list_move_tail(&vma->vm_link, &vma->vm->active_list);
 	list_move_tail(&vma->vm_link, &vma->vm->active_list);
 
 
+	obj->base.write_domain = 0;
 	if (flags & EXEC_OBJECT_WRITE) {
 	if (flags & EXEC_OBJECT_WRITE) {
+		obj->base.write_domain = I915_GEM_DOMAIN_RENDER;
+
 		if (intel_fb_obj_invalidate(obj, ORIGIN_CS))
 		if (intel_fb_obj_invalidate(obj, ORIGIN_CS))
 			i915_gem_active_set(&obj->frontbuffer_write, req);
 			i915_gem_active_set(&obj->frontbuffer_write, req);
 
 
-		/* update for the implicit flush after a batch */
-		obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS;
-		if (!obj->cache_dirty && gpu_write_needs_clflush(obj))
-			obj->cache_dirty = true;
+		obj->base.read_domains = 0;
 	}
 	}
+	obj->base.read_domains |= I915_GEM_GPU_DOMAINS;
 
 
 	if (flags & EXEC_OBJECT_NEEDS_FENCE)
 	if (flags & EXEC_OBJECT_NEEDS_FENCE)
 		i915_gem_active_set(&vma->last_fence, req);
 		i915_gem_active_set(&vma->last_fence, req);
 }
 }
 
 
-static void eb_export_fence(struct drm_i915_gem_object *obj,
-			    struct drm_i915_gem_request *req,
-			    unsigned int flags)
-{
-	struct reservation_object *resv = obj->resv;
-
-	/* Ignore errors from failing to allocate the new fence, we can't
-	 * handle an error right now. Worst case should be missed
-	 * synchronisation leading to rendering corruption.
-	 */
-	reservation_object_lock(resv, NULL);
-	if (flags & EXEC_OBJECT_WRITE)
-		reservation_object_add_excl_fence(resv, &req->fence);
-	else if (reservation_object_reserve_shared(resv) == 0)
-		reservation_object_add_shared_fence(resv, &req->fence);
-	reservation_object_unlock(resv);
-}
-
-static void
-i915_gem_execbuffer_move_to_active(struct list_head *vmas,
-				   struct drm_i915_gem_request *req)
-{
-	struct i915_vma *vma;
-
-	list_for_each_entry(vma, vmas, exec_list) {
-		struct drm_i915_gem_object *obj = vma->obj;
-
-		obj->base.write_domain = obj->base.pending_write_domain;
-		if (obj->base.write_domain)
-			vma->exec_entry->flags |= EXEC_OBJECT_WRITE;
-		else
-			obj->base.pending_read_domains |= obj->base.read_domains;
-		obj->base.read_domains = obj->base.pending_read_domains;
-
-		i915_vma_move_to_active(vma, req, vma->exec_entry->flags);
-		eb_export_fence(obj, req, vma->exec_entry->flags);
-	}
-}
-
-static int
-i915_reset_gen7_sol_offsets(struct drm_i915_gem_request *req)
+static int i915_reset_gen7_sol_offsets(struct drm_i915_gem_request *req)
 {
 {
 	u32 *cs;
 	u32 *cs;
 	int i;
 	int i;
@@ -1358,50 +1939,43 @@ i915_reset_gen7_sol_offsets(struct drm_i915_gem_request *req)
 		return -EINVAL;
 		return -EINVAL;
 	}
 	}
 
 
-	cs = intel_ring_begin(req, 4 * 3);
+	cs = intel_ring_begin(req, 4 * 2 + 2);
 	if (IS_ERR(cs))
 	if (IS_ERR(cs))
 		return PTR_ERR(cs);
 		return PTR_ERR(cs);
 
 
+	*cs++ = MI_LOAD_REGISTER_IMM(4);
 	for (i = 0; i < 4; i++) {
 	for (i = 0; i < 4; i++) {
-		*cs++ = MI_LOAD_REGISTER_IMM(1);
 		*cs++ = i915_mmio_reg_offset(GEN7_SO_WRITE_OFFSET(i));
 		*cs++ = i915_mmio_reg_offset(GEN7_SO_WRITE_OFFSET(i));
 		*cs++ = 0;
 		*cs++ = 0;
 	}
 	}
-
+	*cs++ = MI_NOOP;
 	intel_ring_advance(req, cs);
 	intel_ring_advance(req, cs);
 
 
 	return 0;
 	return 0;
 }
 }
 
 
-static struct i915_vma *
-i915_gem_execbuffer_parse(struct intel_engine_cs *engine,
-			  struct drm_i915_gem_exec_object2 *shadow_exec_entry,
-			  struct drm_i915_gem_object *batch_obj,
-			  struct eb_vmas *eb,
-			  u32 batch_start_offset,
-			  u32 batch_len,
-			  bool is_master)
+static struct i915_vma *eb_parse(struct i915_execbuffer *eb, bool is_master)
 {
 {
 	struct drm_i915_gem_object *shadow_batch_obj;
 	struct drm_i915_gem_object *shadow_batch_obj;
 	struct i915_vma *vma;
 	struct i915_vma *vma;
-	int ret;
+	int err;
 
 
-	shadow_batch_obj = i915_gem_batch_pool_get(&engine->batch_pool,
-						   PAGE_ALIGN(batch_len));
+	shadow_batch_obj = i915_gem_batch_pool_get(&eb->engine->batch_pool,
+						   PAGE_ALIGN(eb->batch_len));
 	if (IS_ERR(shadow_batch_obj))
 	if (IS_ERR(shadow_batch_obj))
 		return ERR_CAST(shadow_batch_obj);
 		return ERR_CAST(shadow_batch_obj);
 
 
-	ret = intel_engine_cmd_parser(engine,
-				      batch_obj,
+	err = intel_engine_cmd_parser(eb->engine,
+				      eb->batch->obj,
 				      shadow_batch_obj,
 				      shadow_batch_obj,
-				      batch_start_offset,
-				      batch_len,
+				      eb->batch_start_offset,
+				      eb->batch_len,
 				      is_master);
 				      is_master);
-	if (ret) {
-		if (ret == -EACCES) /* unhandled chained batch */
+	if (err) {
+		if (err == -EACCES) /* unhandled chained batch */
 			vma = NULL;
 			vma = NULL;
 		else
 		else
-			vma = ERR_PTR(ret);
+			vma = ERR_PTR(err);
 		goto out;
 		goto out;
 	}
 	}
 
 
@@ -1409,12 +1983,11 @@ i915_gem_execbuffer_parse(struct intel_engine_cs *engine,
 	if (IS_ERR(vma))
 	if (IS_ERR(vma))
 		goto out;
 		goto out;
 
 
-	memset(shadow_exec_entry, 0, sizeof(*shadow_exec_entry));
-
-	vma->exec_entry = shadow_exec_entry;
-	vma->exec_entry->flags = __EXEC_OBJECT_HAS_PIN;
-	i915_gem_object_get(shadow_batch_obj);
-	list_add_tail(&vma->exec_list, &eb->vmas);
+	vma->exec_entry =
+		memset(&eb->exec[eb->buffer_count++],
+		       0, sizeof(*vma->exec_entry));
+	vma->exec_entry->flags = __EXEC_OBJECT_HAS_PIN | __EXEC_OBJECT_HAS_REF;
+	__exec_to_vma(vma->exec_entry) = (uintptr_t)i915_vma_get(vma);
 
 
 out:
 out:
 	i915_gem_object_unpin_pages(shadow_batch_obj);
 	i915_gem_object_unpin_pages(shadow_batch_obj);
@@ -1422,54 +1995,37 @@ out:
 }
 }
 
 
 static void
 static void
-add_to_client(struct drm_i915_gem_request *req,
-	      struct drm_file *file)
+add_to_client(struct drm_i915_gem_request *req, struct drm_file *file)
 {
 {
 	req->file_priv = file->driver_priv;
 	req->file_priv = file->driver_priv;
 	list_add_tail(&req->client_link, &req->file_priv->mm.request_list);
 	list_add_tail(&req->client_link, &req->file_priv->mm.request_list);
 }
 }
 
 
-static int
-execbuf_submit(struct i915_execbuffer_params *params,
-	       struct drm_i915_gem_execbuffer2 *args,
-	       struct list_head *vmas)
+static int eb_submit(struct i915_execbuffer *eb)
 {
 {
-	u64 exec_start, exec_len;
-	int ret;
-
-	ret = i915_gem_execbuffer_move_to_gpu(params->request, vmas);
-	if (ret)
-		return ret;
+	int err;
 
 
-	ret = i915_switch_context(params->request);
-	if (ret)
-		return ret;
+	err = eb_move_to_gpu(eb);
+	if (err)
+		return err;
 
 
-	if (args->flags & I915_EXEC_CONSTANTS_MASK) {
-		DRM_DEBUG("I915_EXEC_CONSTANTS_* unsupported\n");
-		return -EINVAL;
-	}
+	err = i915_switch_context(eb->request);
+	if (err)
+		return err;
 
 
-	if (args->flags & I915_EXEC_GEN7_SOL_RESET) {
-		ret = i915_reset_gen7_sol_offsets(params->request);
-		if (ret)
-			return ret;
+	if (eb->args->flags & I915_EXEC_GEN7_SOL_RESET) {
+		err = i915_reset_gen7_sol_offsets(eb->request);
+		if (err)
+			return err;
 	}
 	}
 
 
-	exec_len   = args->batch_len;
-	exec_start = params->batch->node.start +
-		     params->args_batch_start_offset;
-
-	if (exec_len == 0)
-		exec_len = params->batch->size - params->args_batch_start_offset;
-
-	ret = params->engine->emit_bb_start(params->request,
-					    exec_start, exec_len,
-					    params->dispatch_flags);
-	if (ret)
-		return ret;
-
-	i915_gem_execbuffer_move_to_active(vmas, params->request);
+	err = eb->engine->emit_bb_start(eb->request,
+					eb->batch->node.start +
+					eb->batch_start_offset,
+					eb->batch_len,
+					eb->batch_flags);
+	if (err)
+		return err;
 
 
 	return 0;
 	return 0;
 }
 }
@@ -1551,66 +2107,62 @@ eb_select_engine(struct drm_i915_private *dev_priv,
 }
 }
 
 
 static int
 static int
-i915_gem_do_execbuffer(struct drm_device *dev, void *data,
+i915_gem_do_execbuffer(struct drm_device *dev,
 		       struct drm_file *file,
 		       struct drm_file *file,
 		       struct drm_i915_gem_execbuffer2 *args,
 		       struct drm_i915_gem_execbuffer2 *args,
 		       struct drm_i915_gem_exec_object2 *exec)
 		       struct drm_i915_gem_exec_object2 *exec)
 {
 {
-	struct drm_i915_private *dev_priv = to_i915(dev);
-	struct i915_ggtt *ggtt = &dev_priv->ggtt;
-	struct eb_vmas *eb;
-	struct drm_i915_gem_exec_object2 shadow_exec_entry;
-	struct intel_engine_cs *engine;
-	struct i915_gem_context *ctx;
-	struct i915_address_space *vm;
-	struct i915_execbuffer_params params_master; /* XXX: will be removed later */
-	struct i915_execbuffer_params *params = &params_master;
-	const u32 ctx_id = i915_execbuffer2_get_context_id(*args);
-	u32 dispatch_flags;
+	struct i915_execbuffer eb;
 	struct dma_fence *in_fence = NULL;
 	struct dma_fence *in_fence = NULL;
 	struct sync_file *out_fence = NULL;
 	struct sync_file *out_fence = NULL;
 	int out_fence_fd = -1;
 	int out_fence_fd = -1;
-	int ret;
-	bool need_relocs;
-
-	if (!i915_gem_check_execbuffer(args))
-		return -EINVAL;
-
-	ret = validate_exec_list(dev, exec, args->buffer_count);
-	if (ret)
-		return ret;
-
-	dispatch_flags = 0;
+	int err;
+
+	BUILD_BUG_ON(__EXEC_OBJECT_INTERNAL_FLAGS &
+		     ~__EXEC_OBJECT_UNKNOWN_FLAGS);
+
+	eb.i915 = to_i915(dev);
+	eb.file = file;
+	eb.args = args;
+	if (DBG_FORCE_RELOC || !(args->flags & I915_EXEC_NO_RELOC))
+		args->flags |= __EXEC_HAS_RELOC;
+	eb.exec = exec;
+	eb.ctx = NULL;
+	eb.invalid_flags = __EXEC_OBJECT_UNKNOWN_FLAGS;
+	if (USES_FULL_PPGTT(eb.i915))
+		eb.invalid_flags |= EXEC_OBJECT_NEEDS_GTT;
+	reloc_cache_init(&eb.reloc_cache, eb.i915);
+
+	eb.buffer_count = args->buffer_count;
+	eb.batch_start_offset = args->batch_start_offset;
+	eb.batch_len = args->batch_len;
+
+	eb.batch_flags = 0;
 	if (args->flags & I915_EXEC_SECURE) {
 	if (args->flags & I915_EXEC_SECURE) {
 		if (!drm_is_current_master(file) || !capable(CAP_SYS_ADMIN))
 		if (!drm_is_current_master(file) || !capable(CAP_SYS_ADMIN))
 		    return -EPERM;
 		    return -EPERM;
 
 
-		dispatch_flags |= I915_DISPATCH_SECURE;
+		eb.batch_flags |= I915_DISPATCH_SECURE;
 	}
 	}
 	if (args->flags & I915_EXEC_IS_PINNED)
 	if (args->flags & I915_EXEC_IS_PINNED)
-		dispatch_flags |= I915_DISPATCH_PINNED;
-
-	engine = eb_select_engine(dev_priv, file, args);
-	if (!engine)
-		return -EINVAL;
+		eb.batch_flags |= I915_DISPATCH_PINNED;
 
 
-	if (args->buffer_count < 1) {
-		DRM_DEBUG("execbuf with %d buffers\n", args->buffer_count);
+	eb.engine = eb_select_engine(eb.i915, file, args);
+	if (!eb.engine)
 		return -EINVAL;
 		return -EINVAL;
-	}
 
 
 	if (args->flags & I915_EXEC_RESOURCE_STREAMER) {
 	if (args->flags & I915_EXEC_RESOURCE_STREAMER) {
-		if (!HAS_RESOURCE_STREAMER(dev_priv)) {
+		if (!HAS_RESOURCE_STREAMER(eb.i915)) {
 			DRM_DEBUG("RS is only allowed for Haswell, Gen8 and above\n");
 			DRM_DEBUG("RS is only allowed for Haswell, Gen8 and above\n");
 			return -EINVAL;
 			return -EINVAL;
 		}
 		}
-		if (engine->id != RCS) {
+		if (eb.engine->id != RCS) {
 			DRM_DEBUG("RS is not available on %s\n",
 			DRM_DEBUG("RS is not available on %s\n",
-				 engine->name);
+				 eb.engine->name);
 			return -EINVAL;
 			return -EINVAL;
 		}
 		}
 
 
-		dispatch_flags |= I915_DISPATCH_RS;
+		eb.batch_flags |= I915_DISPATCH_RS;
 	}
 	}
 
 
 	if (args->flags & I915_EXEC_FENCE_IN) {
 	if (args->flags & I915_EXEC_FENCE_IN) {
@@ -1622,102 +2174,62 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
 	if (args->flags & I915_EXEC_FENCE_OUT) {
 	if (args->flags & I915_EXEC_FENCE_OUT) {
 		out_fence_fd = get_unused_fd_flags(O_CLOEXEC);
 		out_fence_fd = get_unused_fd_flags(O_CLOEXEC);
 		if (out_fence_fd < 0) {
 		if (out_fence_fd < 0) {
-			ret = out_fence_fd;
+			err = out_fence_fd;
 			goto err_in_fence;
 			goto err_in_fence;
 		}
 		}
 	}
 	}
 
 
-	/* Take a local wakeref for preparing to dispatch the execbuf as
+	if (eb_create(&eb))
+		return -ENOMEM;
+
+	/*
+	 * Take a local wakeref for preparing to dispatch the execbuf as
 	 * we expect to access the hardware fairly frequently in the
 	 * we expect to access the hardware fairly frequently in the
 	 * process. Upon first dispatch, we acquire another prolonged
 	 * process. Upon first dispatch, we acquire another prolonged
 	 * wakeref that we hold until the GPU has been idle for at least
 	 * wakeref that we hold until the GPU has been idle for at least
 	 * 100ms.
 	 * 100ms.
 	 */
 	 */
-	intel_runtime_pm_get(dev_priv);
+	intel_runtime_pm_get(eb.i915);
+	err = i915_mutex_lock_interruptible(dev);
+	if (err)
+		goto err_rpm;
 
 
-	ret = i915_mutex_lock_interruptible(dev);
-	if (ret)
-		goto pre_mutex_err;
+	err = eb_select_context(&eb);
+	if (unlikely(err))
+		goto err_unlock;
 
 
-	ctx = i915_gem_validate_context(dev, file, engine, ctx_id);
-	if (IS_ERR(ctx)) {
-		mutex_unlock(&dev->struct_mutex);
-		ret = PTR_ERR(ctx);
-		goto pre_mutex_err;
-	}
-
-	i915_gem_context_get(ctx);
-
-	if (ctx->ppgtt)
-		vm = &ctx->ppgtt->base;
-	else
-		vm = &ggtt->base;
-
-	memset(&params_master, 0x00, sizeof(params_master));
-
-	eb = eb_create(dev_priv, args);
-	if (eb == NULL) {
-		i915_gem_context_put(ctx);
-		mutex_unlock(&dev->struct_mutex);
-		ret = -ENOMEM;
-		goto pre_mutex_err;
-	}
-
-	/* Look up object handles */
-	ret = eb_lookup_vmas(eb, exec, args, vm, file);
-	if (ret)
-		goto err;
-
-	/* take note of the batch buffer before we might reorder the lists */
-	params->batch = eb_get_batch(eb);
-
-	/* Move the objects en-masse into the GTT, evicting if necessary. */
-	need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0;
-	ret = i915_gem_execbuffer_reserve(engine, &eb->vmas, ctx,
-					  &need_relocs);
-	if (ret)
-		goto err;
-
-	/* The objects are in their final locations, apply the relocations. */
-	if (need_relocs)
-		ret = i915_gem_execbuffer_relocate(eb);
-	if (ret) {
-		if (ret == -EFAULT) {
-			ret = i915_gem_execbuffer_relocate_slow(dev, args, file,
-								engine,
-								eb, exec, ctx);
-			BUG_ON(!mutex_is_locked(&dev->struct_mutex));
-		}
-		if (ret)
-			goto err;
-	}
+	err = eb_relocate(&eb);
+	if (err)
+		/*
+		 * If the user expects the execobject.offset and
+		 * reloc.presumed_offset to be an exact match,
+		 * as for using NO_RELOC, then we cannot update
+		 * the execobject.offset until we have completed
+		 * relocation.
+		 */
+		args->flags &= ~__EXEC_HAS_RELOC;
+	if (err < 0)
+		goto err_vma;
 
 
-	/* Set the pending read domains for the batch buffer to COMMAND */
-	if (params->batch->obj->base.pending_write_domain) {
+	if (unlikely(eb.batch->exec_entry->flags & EXEC_OBJECT_WRITE)) {
 		DRM_DEBUG("Attempting to use self-modifying batch buffer\n");
 		DRM_DEBUG("Attempting to use self-modifying batch buffer\n");
-		ret = -EINVAL;
-		goto err;
+		err = -EINVAL;
+		goto err_vma;
 	}
 	}
-	if (args->batch_start_offset > params->batch->size ||
-	    args->batch_len > params->batch->size - args->batch_start_offset) {
+	if (eb.batch_start_offset > eb.batch->size ||
+	    eb.batch_len > eb.batch->size - eb.batch_start_offset) {
 		DRM_DEBUG("Attempting to use out-of-bounds batch\n");
 		DRM_DEBUG("Attempting to use out-of-bounds batch\n");
-		ret = -EINVAL;
-		goto err;
+		err = -EINVAL;
+		goto err_vma;
 	}
 	}
 
 
-	params->args_batch_start_offset = args->batch_start_offset;
-	if (engine->needs_cmd_parser && args->batch_len) {
+	if (eb.engine->needs_cmd_parser && eb.batch_len) {
 		struct i915_vma *vma;
 		struct i915_vma *vma;
 
 
-		vma = i915_gem_execbuffer_parse(engine, &shadow_exec_entry,
-						params->batch->obj,
-						eb,
-						args->batch_start_offset,
-						args->batch_len,
-						drm_is_current_master(file));
+		vma = eb_parse(&eb, drm_is_current_master(file));
 		if (IS_ERR(vma)) {
 		if (IS_ERR(vma)) {
-			ret = PTR_ERR(vma);
-			goto err;
+			err = PTR_ERR(vma);
+			goto err_vma;
 		}
 		}
 
 
 		if (vma) {
 		if (vma) {
@@ -1730,19 +2242,20 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
 			 * specifically don't want that set on batches the
 			 * specifically don't want that set on batches the
 			 * command parser has accepted.
 			 * command parser has accepted.
 			 */
 			 */
-			dispatch_flags |= I915_DISPATCH_SECURE;
-			params->args_batch_start_offset = 0;
-			params->batch = vma;
+			eb.batch_flags |= I915_DISPATCH_SECURE;
+			eb.batch_start_offset = 0;
+			eb.batch = vma;
 		}
 		}
 	}
 	}
 
 
-	params->batch->obj->base.pending_read_domains |= I915_GEM_DOMAIN_COMMAND;
+	if (eb.batch_len == 0)
+		eb.batch_len = eb.batch->size - eb.batch_start_offset;
 
 
-	/* snb/ivb/vlv conflate the "batch in ppgtt" bit with the "non-secure
+	/*
+	 * snb/ivb/vlv conflate the "batch in ppgtt" bit with the "non-secure
 	 * batch" bit. Hence we need to pin secure batches into the global gtt.
 	 * batch" bit. Hence we need to pin secure batches into the global gtt.
 	 * hsw should have this fixed, but bdw mucks it up again. */
 	 * hsw should have this fixed, but bdw mucks it up again. */
-	if (dispatch_flags & I915_DISPATCH_SECURE) {
-		struct drm_i915_gem_object *obj = params->batch->obj;
+	if (eb.batch_flags & I915_DISPATCH_SECURE) {
 		struct i915_vma *vma;
 		struct i915_vma *vma;
 
 
 		/*
 		/*
@@ -1755,66 +2268,56 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
 		 *   fitting due to fragmentation.
 		 *   fitting due to fragmentation.
 		 * So this is actually safe.
 		 * So this is actually safe.
 		 */
 		 */
-		vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, 0);
+		vma = i915_gem_object_ggtt_pin(eb.batch->obj, NULL, 0, 0, 0);
 		if (IS_ERR(vma)) {
 		if (IS_ERR(vma)) {
-			ret = PTR_ERR(vma);
-			goto err;
+			err = PTR_ERR(vma);
+			goto err_vma;
 		}
 		}
 
 
-		params->batch = vma;
+		eb.batch = vma;
 	}
 	}
 
 
+	/* All GPU relocation batches must be submitted prior to the user rq */
+	GEM_BUG_ON(eb.reloc_cache.rq);
+
 	/* Allocate a request for this batch buffer nice and early. */
 	/* Allocate a request for this batch buffer nice and early. */
-	params->request = i915_gem_request_alloc(engine, ctx);
-	if (IS_ERR(params->request)) {
-		ret = PTR_ERR(params->request);
+	eb.request = i915_gem_request_alloc(eb.engine, eb.ctx);
+	if (IS_ERR(eb.request)) {
+		err = PTR_ERR(eb.request);
 		goto err_batch_unpin;
 		goto err_batch_unpin;
 	}
 	}
 
 
 	if (in_fence) {
 	if (in_fence) {
-		ret = i915_gem_request_await_dma_fence(params->request,
-						       in_fence);
-		if (ret < 0)
+		err = i915_gem_request_await_dma_fence(eb.request, in_fence);
+		if (err < 0)
 			goto err_request;
 			goto err_request;
 	}
 	}
 
 
 	if (out_fence_fd != -1) {
 	if (out_fence_fd != -1) {
-		out_fence = sync_file_create(&params->request->fence);
+		out_fence = sync_file_create(&eb.request->fence);
 		if (!out_fence) {
 		if (!out_fence) {
-			ret = -ENOMEM;
+			err = -ENOMEM;
 			goto err_request;
 			goto err_request;
 		}
 		}
 	}
 	}
 
 
-	/* Whilst this request exists, batch_obj will be on the
+	/*
+	 * Whilst this request exists, batch_obj will be on the
 	 * active_list, and so will hold the active reference. Only when this
 	 * active_list, and so will hold the active reference. Only when this
 	 * request is retired will the the batch_obj be moved onto the
 	 * request is retired will the the batch_obj be moved onto the
 	 * inactive_list and lose its active reference. Hence we do not need
 	 * inactive_list and lose its active reference. Hence we do not need
 	 * to explicitly hold another reference here.
 	 * to explicitly hold another reference here.
 	 */
 	 */
-	params->request->batch = params->batch;
-
-	/*
-	 * Save assorted stuff away to pass through to *_submission().
-	 * NB: This data should be 'persistent' and not local as it will
-	 * kept around beyond the duration of the IOCTL once the GPU
-	 * scheduler arrives.
-	 */
-	params->dev                     = dev;
-	params->file                    = file;
-	params->engine                    = engine;
-	params->dispatch_flags          = dispatch_flags;
-	params->ctx                     = ctx;
+	eb.request->batch = eb.batch;
 
 
-	trace_i915_gem_request_queue(params->request, dispatch_flags);
-
-	ret = execbuf_submit(params, args, &eb->vmas);
+	trace_i915_gem_request_queue(eb.request, eb.batch_flags);
+	err = eb_submit(&eb);
 err_request:
 err_request:
-	__i915_add_request(params->request, ret == 0);
-	add_to_client(params->request, file);
+	__i915_add_request(eb.request, err == 0);
+	add_to_client(eb.request, file);
 
 
 	if (out_fence) {
 	if (out_fence) {
-		if (ret == 0) {
+		if (err == 0) {
 			fd_install(out_fence_fd, out_fence->file);
 			fd_install(out_fence_fd, out_fence->file);
 			args->rsvd2 &= GENMASK_ULL(0, 31); /* keep in-fence */
 			args->rsvd2 &= GENMASK_ULL(0, 31); /* keep in-fence */
 			args->rsvd2 |= (u64)out_fence_fd << 32;
 			args->rsvd2 |= (u64)out_fence_fd << 32;
@@ -1825,30 +2328,22 @@ err_request:
 	}
 	}
 
 
 err_batch_unpin:
 err_batch_unpin:
-	/*
-	 * FIXME: We crucially rely upon the active tracking for the (ppgtt)
-	 * batch vma for correctness. For less ugly and less fragility this
-	 * needs to be adjusted to also track the ggtt batch vma properly as
-	 * active.
-	 */
-	if (dispatch_flags & I915_DISPATCH_SECURE)
-		i915_vma_unpin(params->batch);
-err:
-	/* the request owns the ref now */
-	i915_gem_context_put(ctx);
-	eb_destroy(eb);
-
+	if (eb.batch_flags & I915_DISPATCH_SECURE)
+		i915_vma_unpin(eb.batch);
+err_vma:
+	if (eb.exec)
+		eb_release_vmas(&eb);
+	i915_gem_context_put(eb.ctx);
+err_unlock:
 	mutex_unlock(&dev->struct_mutex);
 	mutex_unlock(&dev->struct_mutex);
-
-pre_mutex_err:
-	/* intel_gpu_busy should also get a ref, so it will free when the device
-	 * is really idle. */
-	intel_runtime_pm_put(dev_priv);
+err_rpm:
+	intel_runtime_pm_put(eb.i915);
+	eb_destroy(&eb);
 	if (out_fence_fd != -1)
 	if (out_fence_fd != -1)
 		put_unused_fd(out_fence_fd);
 		put_unused_fd(out_fence_fd);
 err_in_fence:
 err_in_fence:
 	dma_fence_put(in_fence);
 	dma_fence_put(in_fence);
-	return ret;
+	return err;
 }
 }
 
 
 /*
 /*
@@ -1859,20 +2354,38 @@ int
 i915_gem_execbuffer(struct drm_device *dev, void *data,
 i915_gem_execbuffer(struct drm_device *dev, void *data,
 		    struct drm_file *file)
 		    struct drm_file *file)
 {
 {
+	const size_t sz = sizeof(struct drm_i915_gem_exec_object2);
 	struct drm_i915_gem_execbuffer *args = data;
 	struct drm_i915_gem_execbuffer *args = data;
 	struct drm_i915_gem_execbuffer2 exec2;
 	struct drm_i915_gem_execbuffer2 exec2;
 	struct drm_i915_gem_exec_object *exec_list = NULL;
 	struct drm_i915_gem_exec_object *exec_list = NULL;
 	struct drm_i915_gem_exec_object2 *exec2_list = NULL;
 	struct drm_i915_gem_exec_object2 *exec2_list = NULL;
-	int ret, i;
+	unsigned int i;
+	int err;
 
 
-	if (args->buffer_count < 1) {
-		DRM_DEBUG("execbuf with %d buffers\n", args->buffer_count);
+	if (args->buffer_count < 1 || args->buffer_count > SIZE_MAX / sz - 1) {
+		DRM_DEBUG("execbuf2 with %d buffers\n", args->buffer_count);
 		return -EINVAL;
 		return -EINVAL;
 	}
 	}
 
 
+	exec2.buffers_ptr = args->buffers_ptr;
+	exec2.buffer_count = args->buffer_count;
+	exec2.batch_start_offset = args->batch_start_offset;
+	exec2.batch_len = args->batch_len;
+	exec2.DR1 = args->DR1;
+	exec2.DR4 = args->DR4;
+	exec2.num_cliprects = args->num_cliprects;
+	exec2.cliprects_ptr = args->cliprects_ptr;
+	exec2.flags = I915_EXEC_RENDER;
+	i915_execbuffer2_set_context_id(exec2, 0);
+
+	if (!i915_gem_check_execbuffer(&exec2))
+		return -EINVAL;
+
 	/* Copy in the exec list from userland */
 	/* Copy in the exec list from userland */
-	exec_list = kvmalloc_array(sizeof(*exec_list), args->buffer_count, GFP_KERNEL);
-	exec2_list = kvmalloc_array(sizeof(*exec2_list), args->buffer_count, GFP_KERNEL);
+	exec_list = kvmalloc_array(args->buffer_count, sizeof(*exec_list),
+				   __GFP_NOWARN | GFP_TEMPORARY);
+	exec2_list = kvmalloc_array(args->buffer_count + 1, sz,
+				    __GFP_NOWARN | GFP_TEMPORARY);
 	if (exec_list == NULL || exec2_list == NULL) {
 	if (exec_list == NULL || exec2_list == NULL) {
 		DRM_DEBUG("Failed to allocate exec list for %d buffers\n",
 		DRM_DEBUG("Failed to allocate exec list for %d buffers\n",
 			  args->buffer_count);
 			  args->buffer_count);
@@ -1880,12 +2393,12 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
 		kvfree(exec2_list);
 		kvfree(exec2_list);
 		return -ENOMEM;
 		return -ENOMEM;
 	}
 	}
-	ret = copy_from_user(exec_list,
+	err = copy_from_user(exec_list,
 			     u64_to_user_ptr(args->buffers_ptr),
 			     u64_to_user_ptr(args->buffers_ptr),
 			     sizeof(*exec_list) * args->buffer_count);
 			     sizeof(*exec_list) * args->buffer_count);
-	if (ret != 0) {
+	if (err) {
 		DRM_DEBUG("copy %d exec entries failed %d\n",
 		DRM_DEBUG("copy %d exec entries failed %d\n",
-			  args->buffer_count, ret);
+			  args->buffer_count, err);
 		kvfree(exec_list);
 		kvfree(exec_list);
 		kvfree(exec2_list);
 		kvfree(exec2_list);
 		return -EFAULT;
 		return -EFAULT;
@@ -1903,99 +2416,94 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
 			exec2_list[i].flags = 0;
 			exec2_list[i].flags = 0;
 	}
 	}
 
 
-	exec2.buffers_ptr = args->buffers_ptr;
-	exec2.buffer_count = args->buffer_count;
-	exec2.batch_start_offset = args->batch_start_offset;
-	exec2.batch_len = args->batch_len;
-	exec2.DR1 = args->DR1;
-	exec2.DR4 = args->DR4;
-	exec2.num_cliprects = args->num_cliprects;
-	exec2.cliprects_ptr = args->cliprects_ptr;
-	exec2.flags = I915_EXEC_RENDER;
-	i915_execbuffer2_set_context_id(exec2, 0);
-
-	ret = i915_gem_do_execbuffer(dev, data, file, &exec2, exec2_list);
-	if (!ret) {
+	err = i915_gem_do_execbuffer(dev, file, &exec2, exec2_list);
+	if (exec2.flags & __EXEC_HAS_RELOC) {
 		struct drm_i915_gem_exec_object __user *user_exec_list =
 		struct drm_i915_gem_exec_object __user *user_exec_list =
 			u64_to_user_ptr(args->buffers_ptr);
 			u64_to_user_ptr(args->buffers_ptr);
 
 
 		/* Copy the new buffer offsets back to the user's exec list. */
 		/* Copy the new buffer offsets back to the user's exec list. */
 		for (i = 0; i < args->buffer_count; i++) {
 		for (i = 0; i < args->buffer_count; i++) {
+			if (!(exec2_list[i].offset & UPDATE))
+				continue;
+
 			exec2_list[i].offset =
 			exec2_list[i].offset =
-				gen8_canonical_addr(exec2_list[i].offset);
-			ret = __copy_to_user(&user_exec_list[i].offset,
-					     &exec2_list[i].offset,
-					     sizeof(user_exec_list[i].offset));
-			if (ret) {
-				ret = -EFAULT;
-				DRM_DEBUG("failed to copy %d exec entries "
-					  "back to user (%d)\n",
-					  args->buffer_count, ret);
+				gen8_canonical_addr(exec2_list[i].offset & PIN_OFFSET_MASK);
+			exec2_list[i].offset &= PIN_OFFSET_MASK;
+			if (__copy_to_user(&user_exec_list[i].offset,
+					   &exec2_list[i].offset,
+					   sizeof(user_exec_list[i].offset)))
 				break;
 				break;
-			}
 		}
 		}
 	}
 	}
 
 
 	kvfree(exec_list);
 	kvfree(exec_list);
 	kvfree(exec2_list);
 	kvfree(exec2_list);
-	return ret;
+	return err;
 }
 }
 
 
 int
 int
 i915_gem_execbuffer2(struct drm_device *dev, void *data,
 i915_gem_execbuffer2(struct drm_device *dev, void *data,
 		     struct drm_file *file)
 		     struct drm_file *file)
 {
 {
+	const size_t sz = sizeof(struct drm_i915_gem_exec_object2);
 	struct drm_i915_gem_execbuffer2 *args = data;
 	struct drm_i915_gem_execbuffer2 *args = data;
-	struct drm_i915_gem_exec_object2 *exec2_list = NULL;
-	int ret;
+	struct drm_i915_gem_exec_object2 *exec2_list;
+	int err;
 
 
-	if (args->buffer_count < 1 ||
-	    args->buffer_count > UINT_MAX / sizeof(*exec2_list)) {
+	if (args->buffer_count < 1 || args->buffer_count > SIZE_MAX / sz - 1) {
 		DRM_DEBUG("execbuf2 with %d buffers\n", args->buffer_count);
 		DRM_DEBUG("execbuf2 with %d buffers\n", args->buffer_count);
 		return -EINVAL;
 		return -EINVAL;
 	}
 	}
 
 
-	exec2_list = kvmalloc_array(args->buffer_count,
-				    sizeof(*exec2_list),
-				    GFP_TEMPORARY);
+	if (!i915_gem_check_execbuffer(args))
+		return -EINVAL;
+
+	/* Allocate an extra slot for use by the command parser */
+	exec2_list = kvmalloc_array(args->buffer_count + 1, sz,
+				    __GFP_NOWARN | GFP_TEMPORARY);
 	if (exec2_list == NULL) {
 	if (exec2_list == NULL) {
 		DRM_DEBUG("Failed to allocate exec list for %d buffers\n",
 		DRM_DEBUG("Failed to allocate exec list for %d buffers\n",
 			  args->buffer_count);
 			  args->buffer_count);
 		return -ENOMEM;
 		return -ENOMEM;
 	}
 	}
-	ret = copy_from_user(exec2_list,
-			     u64_to_user_ptr(args->buffers_ptr),
-			     sizeof(*exec2_list) * args->buffer_count);
-	if (ret != 0) {
-		DRM_DEBUG("copy %d exec entries failed %d\n",
-			  args->buffer_count, ret);
+	if (copy_from_user(exec2_list,
+			   u64_to_user_ptr(args->buffers_ptr),
+			   sizeof(*exec2_list) * args->buffer_count)) {
+		DRM_DEBUG("copy %d exec entries failed\n", args->buffer_count);
 		kvfree(exec2_list);
 		kvfree(exec2_list);
 		return -EFAULT;
 		return -EFAULT;
 	}
 	}
 
 
-	ret = i915_gem_do_execbuffer(dev, data, file, args, exec2_list);
-	if (!ret) {
-		/* Copy the new buffer offsets back to the user's exec list. */
+	err = i915_gem_do_execbuffer(dev, file, args, exec2_list);
+
+	/*
+	 * Now that we have begun execution of the batchbuffer, we ignore
+	 * any new error after this point. Also given that we have already
+	 * updated the associated relocations, we try to write out the current
+	 * object locations irrespective of any error.
+	 */
+	if (args->flags & __EXEC_HAS_RELOC) {
 		struct drm_i915_gem_exec_object2 __user *user_exec_list =
 		struct drm_i915_gem_exec_object2 __user *user_exec_list =
-				   u64_to_user_ptr(args->buffers_ptr);
-		int i;
+			u64_to_user_ptr(args->buffers_ptr);
+		unsigned int i;
 
 
+		/* Copy the new buffer offsets back to the user's exec list. */
+		user_access_begin();
 		for (i = 0; i < args->buffer_count; i++) {
 		for (i = 0; i < args->buffer_count; i++) {
+			if (!(exec2_list[i].offset & UPDATE))
+				continue;
+
 			exec2_list[i].offset =
 			exec2_list[i].offset =
-				gen8_canonical_addr(exec2_list[i].offset);
-			ret = __copy_to_user(&user_exec_list[i].offset,
-					     &exec2_list[i].offset,
-					     sizeof(user_exec_list[i].offset));
-			if (ret) {
-				ret = -EFAULT;
-				DRM_DEBUG("failed to copy %d exec entries "
-					  "back to user\n",
-					  args->buffer_count);
-				break;
-			}
+				gen8_canonical_addr(exec2_list[i].offset & PIN_OFFSET_MASK);
+			unsafe_put_user(exec2_list[i].offset,
+					&user_exec_list[i].offset,
+					end_user);
 		}
 		}
+end_user:
+		user_access_end();
 	}
 	}
 
 
+	args->flags &= ~__I915_EXEC_UNKNOWN_FLAGS;
 	kvfree(exec2_list);
 	kvfree(exec2_list);
-	return ret;
+	return err;
 }
 }

+ 13 - 3
drivers/gpu/drm/i915/i915_gem_gtt.c

@@ -1884,7 +1884,7 @@ static void gtt_write_workarounds(struct drm_i915_private *dev_priv)
 	 * called on driver load and after a GPU reset, so you can place
 	 * called on driver load and after a GPU reset, so you can place
 	 * workarounds here even if they get overwritten by GPU reset.
 	 * workarounds here even if they get overwritten by GPU reset.
 	 */
 	 */
-	/* WaIncreaseDefaultTLBEntries:chv,bdw,skl,bxt,kbl,glk */
+	/* WaIncreaseDefaultTLBEntries:chv,bdw,skl,bxt,kbl,glk,cfl */
 	if (IS_BROADWELL(dev_priv))
 	if (IS_BROADWELL(dev_priv))
 		I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_BDW);
 		I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_BDW);
 	else if (IS_CHERRYVIEW(dev_priv))
 	else if (IS_CHERRYVIEW(dev_priv))
@@ -3095,13 +3095,17 @@ int i915_ggtt_enable_hw(struct drm_i915_private *dev_priv)
 
 
 void i915_ggtt_enable_guc(struct drm_i915_private *i915)
 void i915_ggtt_enable_guc(struct drm_i915_private *i915)
 {
 {
+	GEM_BUG_ON(i915->ggtt.invalidate != gen6_ggtt_invalidate);
+
 	i915->ggtt.invalidate = guc_ggtt_invalidate;
 	i915->ggtt.invalidate = guc_ggtt_invalidate;
 }
 }
 
 
 void i915_ggtt_disable_guc(struct drm_i915_private *i915)
 void i915_ggtt_disable_guc(struct drm_i915_private *i915)
 {
 {
-	if (i915->ggtt.invalidate == guc_ggtt_invalidate)
-		i915->ggtt.invalidate = gen6_ggtt_invalidate;
+	/* We should only be called after i915_ggtt_enable_guc() */
+	GEM_BUG_ON(i915->ggtt.invalidate != guc_ggtt_invalidate);
+
+	i915->ggtt.invalidate = gen6_ggtt_invalidate;
 }
 }
 
 
 void i915_gem_restore_gtt_mappings(struct drm_i915_private *dev_priv)
 void i915_gem_restore_gtt_mappings(struct drm_i915_private *dev_priv)
@@ -3398,6 +3402,9 @@ int i915_gem_gtt_reserve(struct i915_address_space *vm,
 	if (err != -ENOSPC)
 	if (err != -ENOSPC)
 		return err;
 		return err;
 
 
+	if (flags & PIN_NOEVICT)
+		return -ENOSPC;
+
 	err = i915_gem_evict_for_node(vm, node, flags);
 	err = i915_gem_evict_for_node(vm, node, flags);
 	if (err == 0)
 	if (err == 0)
 		err = drm_mm_reserve_node(&vm->mm, node);
 		err = drm_mm_reserve_node(&vm->mm, node);
@@ -3512,6 +3519,9 @@ int i915_gem_gtt_insert(struct i915_address_space *vm,
 	if (err != -ENOSPC)
 	if (err != -ENOSPC)
 		return err;
 		return err;
 
 
+	if (flags & PIN_NOEVICT)
+		return -ENOSPC;
+
 	/* No free space, pick a slot at random.
 	/* No free space, pick a slot at random.
 	 *
 	 *
 	 * There is a pathological case here using a GTT shared between
 	 * There is a pathological case here using a GTT shared between

+ 2 - 0
drivers/gpu/drm/i915/i915_gem_gtt.h

@@ -255,6 +255,7 @@ struct i915_address_space {
 	struct drm_i915_file_private *file;
 	struct drm_i915_file_private *file;
 	struct list_head global_link;
 	struct list_head global_link;
 	u64 total;		/* size addr space maps (ex. 2GB for ggtt) */
 	u64 total;		/* size addr space maps (ex. 2GB for ggtt) */
+	u64 reserved;		/* size addr space reserved */
 
 
 	bool closed;
 	bool closed;
 
 
@@ -588,6 +589,7 @@ int i915_gem_gtt_insert(struct i915_address_space *vm,
 #define PIN_MAPPABLE		BIT(1)
 #define PIN_MAPPABLE		BIT(1)
 #define PIN_ZONE_4G		BIT(2)
 #define PIN_ZONE_4G		BIT(2)
 #define PIN_NONFAULT		BIT(3)
 #define PIN_NONFAULT		BIT(3)
+#define PIN_NOEVICT		BIT(4)
 
 
 #define PIN_MBZ			BIT(5) /* I915_VMA_PIN_OVERFLOW */
 #define PIN_MBZ			BIT(5) /* I915_VMA_PIN_OVERFLOW */
 #define PIN_GLOBAL		BIT(6) /* I915_VMA_GLOBAL_BIND */
 #define PIN_GLOBAL		BIT(6) /* I915_VMA_GLOBAL_BIND */

+ 3 - 1
drivers/gpu/drm/i915/i915_gem_internal.c

@@ -188,9 +188,11 @@ i915_gem_object_create_internal(struct drm_i915_private *i915,
 	drm_gem_private_object_init(&i915->drm, &obj->base, size);
 	drm_gem_private_object_init(&i915->drm, &obj->base, size);
 	i915_gem_object_init(obj, &i915_gem_object_internal_ops);
 	i915_gem_object_init(obj, &i915_gem_object_internal_ops);
 
 
-	obj->base.write_domain = I915_GEM_DOMAIN_CPU;
 	obj->base.read_domains = I915_GEM_DOMAIN_CPU;
 	obj->base.read_domains = I915_GEM_DOMAIN_CPU;
+	obj->base.write_domain = I915_GEM_DOMAIN_CPU;
 	obj->cache_level = HAS_LLC(i915) ? I915_CACHE_LLC : I915_CACHE_NONE;
 	obj->cache_level = HAS_LLC(i915) ? I915_CACHE_LLC : I915_CACHE_NONE;
+	obj->cache_coherent = i915_gem_object_is_coherent(obj);
+	obj->cache_dirty = !obj->cache_coherent;
 
 
 	return obj;
 	return obj;
 }
 }

+ 18 - 4
drivers/gpu/drm/i915/i915_gem_object.h

@@ -68,9 +68,25 @@ struct drm_i915_gem_object {
 
 
 	const struct drm_i915_gem_object_ops *ops;
 	const struct drm_i915_gem_object_ops *ops;
 
 
-	/** List of VMAs backed by this object */
+	/**
+	 * @vma_list: List of VMAs backed by this object
+	 *
+	 * The VMA on this list are ordered by type, all GGTT vma are placed
+	 * at the head and all ppGTT vma are placed at the tail. The different
+	 * types of GGTT vma are unordered between themselves, use the
+	 * @vma_tree (which has a defined order between all VMA) to find an
+	 * exact match.
+	 */
 	struct list_head vma_list;
 	struct list_head vma_list;
+	/**
+	 * @vma_tree: Ordered tree of VMAs backed by this object
+	 *
+	 * All VMA created for this object are placed in the @vma_tree for
+	 * fast retrieval via a binary search in i915_vma_instance().
+	 * They are also added to @vma_list for easy iteration.
+	 */
 	struct rb_root vma_tree;
 	struct rb_root vma_tree;
+	struct i915_vma *vma_hashed;
 
 
 	/** Stolen memory for this object, instead of being backed by shmem. */
 	/** Stolen memory for this object, instead of being backed by shmem. */
 	struct drm_mm_node *stolen;
 	struct drm_mm_node *stolen;
@@ -85,9 +101,6 @@ struct drm_i915_gem_object {
 	 */
 	 */
 	struct list_head userfault_link;
 	struct list_head userfault_link;
 
 
-	/** Used in execbuf to temporarily hold a ref */
-	struct list_head obj_exec_link;
-
 	struct list_head batch_pool_link;
 	struct list_head batch_pool_link;
 	I915_SELFTEST_DECLARE(struct list_head st_link);
 	I915_SELFTEST_DECLARE(struct list_head st_link);
 
 
@@ -106,6 +119,7 @@ struct drm_i915_gem_object {
 	unsigned long gt_ro:1;
 	unsigned long gt_ro:1;
 	unsigned int cache_level:3;
 	unsigned int cache_level:3;
 	unsigned int cache_dirty:1;
 	unsigned int cache_dirty:1;
+	unsigned int cache_coherent:1;
 
 
 	atomic_t frontbuffer_bits;
 	atomic_t frontbuffer_bits;
 	unsigned int frontbuffer_ggtt_origin; /* write once */
 	unsigned int frontbuffer_ggtt_origin; /* write once */

+ 7 - 13
drivers/gpu/drm/i915/i915_gem_request.c

@@ -62,7 +62,7 @@ static bool i915_fence_enable_signaling(struct dma_fence *fence)
 		return false;
 		return false;
 
 
 	intel_engine_enable_signaling(to_request(fence), true);
 	intel_engine_enable_signaling(to_request(fence), true);
-	return true;
+	return !i915_fence_signaled(fence);
 }
 }
 
 
 static signed long i915_fence_wait(struct dma_fence *fence,
 static signed long i915_fence_wait(struct dma_fence *fence,
@@ -683,7 +683,6 @@ static int
 i915_gem_request_await_request(struct drm_i915_gem_request *to,
 i915_gem_request_await_request(struct drm_i915_gem_request *to,
 			       struct drm_i915_gem_request *from)
 			       struct drm_i915_gem_request *from)
 {
 {
-	u32 seqno;
 	int ret;
 	int ret;
 
 
 	GEM_BUG_ON(to == from);
 	GEM_BUG_ON(to == from);
@@ -707,18 +706,14 @@ i915_gem_request_await_request(struct drm_i915_gem_request *to,
 		return ret < 0 ? ret : 0;
 		return ret < 0 ? ret : 0;
 	}
 	}
 
 
-	seqno = i915_gem_request_global_seqno(from);
-	if (!seqno)
-		goto await_dma_fence;
+	if (to->engine->semaphore.sync_to) {
+		u32 seqno;
 
 
-	if (!to->engine->semaphore.sync_to) {
-		if (!__i915_gem_request_started(from, seqno))
-			goto await_dma_fence;
+		GEM_BUG_ON(!from->engine->semaphore.signal);
 
 
-		if (!__i915_spin_request(from, seqno, TASK_INTERRUPTIBLE, 2))
+		seqno = i915_gem_request_global_seqno(from);
+		if (!seqno)
 			goto await_dma_fence;
 			goto await_dma_fence;
-	} else {
-		GEM_BUG_ON(!from->engine->semaphore.signal);
 
 
 		if (seqno <= to->timeline->global_sync[from->engine->id])
 		if (seqno <= to->timeline->global_sync[from->engine->id])
 			return 0;
 			return 0;
@@ -729,10 +724,9 @@ i915_gem_request_await_request(struct drm_i915_gem_request *to,
 			return ret;
 			return ret;
 
 
 		to->timeline->global_sync[from->engine->id] = seqno;
 		to->timeline->global_sync[from->engine->id] = seqno;
+		return 0;
 	}
 	}
 
 
-	return 0;
-
 await_dma_fence:
 await_dma_fence:
 	ret = i915_sw_fence_await_dma_fence(&to->submit,
 	ret = i915_sw_fence_await_dma_fence(&to->submit,
 					    &from->fence, 0,
 					    &from->fence, 0,

+ 21 - 7
drivers/gpu/drm/i915/i915_gem_shrinker.c

@@ -38,16 +38,21 @@
 static bool shrinker_lock(struct drm_i915_private *dev_priv, bool *unlock)
 static bool shrinker_lock(struct drm_i915_private *dev_priv, bool *unlock)
 {
 {
 	switch (mutex_trylock_recursive(&dev_priv->drm.struct_mutex)) {
 	switch (mutex_trylock_recursive(&dev_priv->drm.struct_mutex)) {
-	case MUTEX_TRYLOCK_FAILED:
-		return false;
-
-	case MUTEX_TRYLOCK_SUCCESS:
-		*unlock = true;
-		return true;
-
 	case MUTEX_TRYLOCK_RECURSIVE:
 	case MUTEX_TRYLOCK_RECURSIVE:
 		*unlock = false;
 		*unlock = false;
 		return true;
 		return true;
+
+	case MUTEX_TRYLOCK_FAILED:
+		do {
+			cpu_relax();
+			if (mutex_trylock(&dev_priv->drm.struct_mutex)) {
+	case MUTEX_TRYLOCK_SUCCESS:
+				*unlock = true;
+				return true;
+			}
+		} while (!need_resched());
+
+		return false;
 	}
 	}
 
 
 	BUG();
 	BUG();
@@ -332,6 +337,15 @@ i915_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
 					 sc->nr_to_scan - freed,
 					 sc->nr_to_scan - freed,
 					 I915_SHRINK_BOUND |
 					 I915_SHRINK_BOUND |
 					 I915_SHRINK_UNBOUND);
 					 I915_SHRINK_UNBOUND);
+	if (freed < sc->nr_to_scan && current_is_kswapd()) {
+		intel_runtime_pm_get(dev_priv);
+		freed += i915_gem_shrink(dev_priv,
+					 sc->nr_to_scan - freed,
+					 I915_SHRINK_ACTIVE |
+					 I915_SHRINK_BOUND |
+					 I915_SHRINK_UNBOUND);
+		intel_runtime_pm_put(dev_priv);
+	}
 
 
 	shrinker_unlock(dev_priv, unlock);
 	shrinker_unlock(dev_priv, unlock);
 
 

+ 1 - 0
drivers/gpu/drm/i915/i915_gem_stolen.c

@@ -590,6 +590,7 @@ _i915_gem_object_create_stolen(struct drm_i915_private *dev_priv,
 	obj->stolen = stolen;
 	obj->stolen = stolen;
 	obj->base.read_domains = I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT;
 	obj->base.read_domains = I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT;
 	obj->cache_level = HAS_LLC(dev_priv) ? I915_CACHE_LLC : I915_CACHE_NONE;
 	obj->cache_level = HAS_LLC(dev_priv) ? I915_CACHE_LLC : I915_CACHE_NONE;
+	obj->cache_coherent = true; /* assumptions! more like cache_oblivious */
 
 
 	if (i915_gem_object_pin_pages(obj))
 	if (i915_gem_object_pin_pages(obj))
 		goto cleanup;
 		goto cleanup;

+ 19 - 5
drivers/gpu/drm/i915/i915_gem_userptr.c

@@ -378,7 +378,7 @@ __i915_mm_struct_free(struct kref *kref)
 	mutex_unlock(&mm->i915->mm_lock);
 	mutex_unlock(&mm->i915->mm_lock);
 
 
 	INIT_WORK(&mm->work, __i915_mm_struct_free__worker);
 	INIT_WORK(&mm->work, __i915_mm_struct_free__worker);
-	schedule_work(&mm->work);
+	queue_work(mm->i915->mm.userptr_wq, &mm->work);
 }
 }
 
 
 static void
 static void
@@ -598,7 +598,7 @@ __i915_gem_userptr_get_pages_schedule(struct drm_i915_gem_object *obj)
 	get_task_struct(work->task);
 	get_task_struct(work->task);
 
 
 	INIT_WORK(&work->work, __i915_gem_userptr_get_pages_worker);
 	INIT_WORK(&work->work, __i915_gem_userptr_get_pages_worker);
-	schedule_work(&work->work);
+	queue_work(to_i915(obj->base.dev)->mm.userptr_wq, &work->work);
 
 
 	return ERR_PTR(-EAGAIN);
 	return ERR_PTR(-EAGAIN);
 }
 }
@@ -802,9 +802,11 @@ i915_gem_userptr_ioctl(struct drm_device *dev, void *data, struct drm_file *file
 
 
 	drm_gem_private_object_init(dev, &obj->base, args->user_size);
 	drm_gem_private_object_init(dev, &obj->base, args->user_size);
 	i915_gem_object_init(obj, &i915_gem_userptr_ops);
 	i915_gem_object_init(obj, &i915_gem_userptr_ops);
-	obj->cache_level = I915_CACHE_LLC;
-	obj->base.write_domain = I915_GEM_DOMAIN_CPU;
 	obj->base.read_domains = I915_GEM_DOMAIN_CPU;
 	obj->base.read_domains = I915_GEM_DOMAIN_CPU;
+	obj->base.write_domain = I915_GEM_DOMAIN_CPU;
+	obj->cache_level = I915_CACHE_LLC;
+	obj->cache_coherent = i915_gem_object_is_coherent(obj);
+	obj->cache_dirty = !obj->cache_coherent;
 
 
 	obj->userptr.ptr = args->user_ptr;
 	obj->userptr.ptr = args->user_ptr;
 	obj->userptr.read_only = !!(args->flags & I915_USERPTR_READ_ONLY);
 	obj->userptr.read_only = !!(args->flags & I915_USERPTR_READ_ONLY);
@@ -828,8 +830,20 @@ i915_gem_userptr_ioctl(struct drm_device *dev, void *data, struct drm_file *file
 	return 0;
 	return 0;
 }
 }
 
 
-void i915_gem_init_userptr(struct drm_i915_private *dev_priv)
+int i915_gem_init_userptr(struct drm_i915_private *dev_priv)
 {
 {
 	mutex_init(&dev_priv->mm_lock);
 	mutex_init(&dev_priv->mm_lock);
 	hash_init(dev_priv->mm_structs);
 	hash_init(dev_priv->mm_structs);
+
+	dev_priv->mm.userptr_wq =
+		alloc_workqueue("i915-userptr-acquire", WQ_HIGHPRI, 0);
+	if (!dev_priv->mm.userptr_wq)
+		return -ENOMEM;
+
+	return 0;
+}
+
+void i915_gem_cleanup_userptr(struct drm_i915_private *dev_priv)
+{
+	destroy_workqueue(dev_priv->mm.userptr_wq);
 }
 }

+ 1 - 1
drivers/gpu/drm/i915/i915_guc_submission.c

@@ -105,7 +105,7 @@ static int __reserve_doorbell(struct i915_guc_client *client)
 		end += offset;
 		end += offset;
 	}
 	}
 
 
-	id = find_next_zero_bit(client->guc->doorbell_bitmap, offset, end);
+	id = find_next_zero_bit(client->guc->doorbell_bitmap, end, offset);
 	if (id == end)
 	if (id == end)
 		return -ENOSPC;
 		return -ENOSPC;
 
 

+ 4 - 2
drivers/gpu/drm/i915/i915_irq.c

@@ -2548,7 +2548,8 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
 			I915_WRITE(SDEIIR, iir);
 			I915_WRITE(SDEIIR, iir);
 			ret = IRQ_HANDLED;
 			ret = IRQ_HANDLED;
 
 
-			if (HAS_PCH_SPT(dev_priv) || HAS_PCH_KBP(dev_priv))
+			if (HAS_PCH_SPT(dev_priv) || HAS_PCH_KBP(dev_priv) ||
+			    HAS_PCH_CNP(dev_priv))
 				spt_irq_handler(dev_priv, iir);
 				spt_irq_handler(dev_priv, iir);
 			else
 			else
 				cpt_irq_handler(dev_priv, iir);
 				cpt_irq_handler(dev_priv, iir);
@@ -4289,7 +4290,8 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
 		dev->driver->disable_vblank = gen8_disable_vblank;
 		dev->driver->disable_vblank = gen8_disable_vblank;
 		if (IS_GEN9_LP(dev_priv))
 		if (IS_GEN9_LP(dev_priv))
 			dev_priv->display.hpd_irq_setup = bxt_hpd_irq_setup;
 			dev_priv->display.hpd_irq_setup = bxt_hpd_irq_setup;
-		else if (HAS_PCH_SPT(dev_priv) || HAS_PCH_KBP(dev_priv))
+		else if (HAS_PCH_SPT(dev_priv) || HAS_PCH_KBP(dev_priv) ||
+			 HAS_PCH_CNP(dev_priv))
 			dev_priv->display.hpd_irq_setup = spt_hpd_irq_setup;
 			dev_priv->display.hpd_irq_setup = spt_hpd_irq_setup;
 		else
 		else
 			dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup;
 			dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup;

+ 5376 - 0
drivers/gpu/drm/i915/i915_oa_bdw.c

@@ -0,0 +1,5376 @@
+/*
+ * Autogenerated file by GPU Top : https://github.com/rib/gputop
+ * DO NOT EDIT manually!
+ *
+ *
+ * Copyright (c) 2015 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#include <linux/sysfs.h>
+
+#include "i915_drv.h"
+#include "i915_oa_bdw.h"
+
+enum metric_set_id {
+	METRIC_SET_ID_RENDER_BASIC = 1,
+	METRIC_SET_ID_COMPUTE_BASIC,
+	METRIC_SET_ID_RENDER_PIPE_PROFILE,
+	METRIC_SET_ID_MEMORY_READS,
+	METRIC_SET_ID_MEMORY_WRITES,
+	METRIC_SET_ID_COMPUTE_EXTENDED,
+	METRIC_SET_ID_COMPUTE_L3_CACHE,
+	METRIC_SET_ID_DATA_PORT_READS_COALESCING,
+	METRIC_SET_ID_DATA_PORT_WRITES_COALESCING,
+	METRIC_SET_ID_HDC_AND_SF,
+	METRIC_SET_ID_L3_1,
+	METRIC_SET_ID_L3_2,
+	METRIC_SET_ID_L3_3,
+	METRIC_SET_ID_L3_4,
+	METRIC_SET_ID_RASTERIZER_AND_PIXEL_BACKEND,
+	METRIC_SET_ID_SAMPLER_1,
+	METRIC_SET_ID_SAMPLER_2,
+	METRIC_SET_ID_TDL_1,
+	METRIC_SET_ID_TDL_2,
+	METRIC_SET_ID_COMPUTE_EXTRA,
+	METRIC_SET_ID_VME_PIPE,
+	METRIC_SET_ID_TEST_OA,
+};
+
+int i915_oa_n_builtin_metric_sets_bdw = 22;
+
+static const struct i915_oa_reg b_counter_config_render_basic[] = {
+	{ _MMIO(0x2710), 0x00000000 },
+	{ _MMIO(0x2714), 0x00800000 },
+	{ _MMIO(0x2720), 0x00000000 },
+	{ _MMIO(0x2724), 0x00800000 },
+	{ _MMIO(0x2740), 0x00000000 },
+};
+
+static const struct i915_oa_reg flex_eu_config_render_basic[] = {
+	{ _MMIO(0xe458), 0x00005004 },
+	{ _MMIO(0xe558), 0x00010003 },
+	{ _MMIO(0xe658), 0x00012011 },
+	{ _MMIO(0xe758), 0x00015014 },
+	{ _MMIO(0xe45c), 0x00051050 },
+	{ _MMIO(0xe55c), 0x00053052 },
+	{ _MMIO(0xe65c), 0x00055054 },
+};
+
+static const struct i915_oa_reg mux_config_render_basic_0_slices_0x01[] = {
+	{ _MMIO(0x9888), 0x143f000f },
+	{ _MMIO(0x9888), 0x14110014 },
+	{ _MMIO(0x9888), 0x14310014 },
+	{ _MMIO(0x9888), 0x14bf000f },
+	{ _MMIO(0x9888), 0x118a0317 },
+	{ _MMIO(0x9888), 0x13837be0 },
+	{ _MMIO(0x9888), 0x3b800060 },
+	{ _MMIO(0x9888), 0x3d800005 },
+	{ _MMIO(0x9888), 0x005c4000 },
+	{ _MMIO(0x9888), 0x065c8000 },
+	{ _MMIO(0x9888), 0x085cc000 },
+	{ _MMIO(0x9888), 0x003d8000 },
+	{ _MMIO(0x9888), 0x183d0800 },
+	{ _MMIO(0x9888), 0x0a3f0023 },
+	{ _MMIO(0x9888), 0x103f0000 },
+	{ _MMIO(0x9888), 0x00584000 },
+	{ _MMIO(0x9888), 0x08584000 },
+	{ _MMIO(0x9888), 0x0a5a4000 },
+	{ _MMIO(0x9888), 0x005b4000 },
+	{ _MMIO(0x9888), 0x0e5b8000 },
+	{ _MMIO(0x9888), 0x185b2400 },
+	{ _MMIO(0x9888), 0x0a1d4000 },
+	{ _MMIO(0x9888), 0x0c1f0800 },
+	{ _MMIO(0x9888), 0x0e1faa00 },
+	{ _MMIO(0x9888), 0x00384000 },
+	{ _MMIO(0x9888), 0x0e384000 },
+	{ _MMIO(0x9888), 0x16384000 },
+	{ _MMIO(0x9888), 0x18380001 },
+	{ _MMIO(0x9888), 0x00392000 },
+	{ _MMIO(0x9888), 0x06398000 },
+	{ _MMIO(0x9888), 0x0839a000 },
+	{ _MMIO(0x9888), 0x0a391000 },
+	{ _MMIO(0x9888), 0x00104000 },
+	{ _MMIO(0x9888), 0x08104000 },
+	{ _MMIO(0x9888), 0x00110030 },
+	{ _MMIO(0x9888), 0x08110031 },
+	{ _MMIO(0x9888), 0x10110000 },
+	{ _MMIO(0x9888), 0x00134000 },
+	{ _MMIO(0x9888), 0x16130020 },
+	{ _MMIO(0x9888), 0x06308000 },
+	{ _MMIO(0x9888), 0x08308000 },
+	{ _MMIO(0x9888), 0x06311800 },
+	{ _MMIO(0x9888), 0x08311880 },
+	{ _MMIO(0x9888), 0x10310000 },
+	{ _MMIO(0x9888), 0x0e334000 },
+	{ _MMIO(0x9888), 0x16330080 },
+	{ _MMIO(0x9888), 0x0abf1180 },
+	{ _MMIO(0x9888), 0x10bf0000 },
+	{ _MMIO(0x9888), 0x0ada8000 },
+	{ _MMIO(0x9888), 0x0a9d8000 },
+	{ _MMIO(0x9888), 0x109f0002 },
+	{ _MMIO(0x9888), 0x0ab94000 },
+	{ _MMIO(0x9888), 0x0d888000 },
+	{ _MMIO(0x9888), 0x038a0380 },
+	{ _MMIO(0x9888), 0x058a000e },
+	{ _MMIO(0x9888), 0x018a8000 },
+	{ _MMIO(0x9888), 0x0f8a8000 },
+	{ _MMIO(0x9888), 0x198a8000 },
+	{ _MMIO(0x9888), 0x1b8a00a0 },
+	{ _MMIO(0x9888), 0x078a0000 },
+	{ _MMIO(0x9888), 0x098a0000 },
+	{ _MMIO(0x9888), 0x238b2820 },
+	{ _MMIO(0x9888), 0x258b2550 },
+	{ _MMIO(0x9888), 0x198c1000 },
+	{ _MMIO(0x9888), 0x0b8d8000 },
+	{ _MMIO(0x9888), 0x1f85aa80 },
+	{ _MMIO(0x9888), 0x2185aaa0 },
+	{ _MMIO(0x9888), 0x2385002a },
+	{ _MMIO(0x9888), 0x0d831021 },
+	{ _MMIO(0x9888), 0x0f83572f },
+	{ _MMIO(0x9888), 0x01835680 },
+	{ _MMIO(0x9888), 0x0383002c },
+	{ _MMIO(0x9888), 0x11830000 },
+	{ _MMIO(0x9888), 0x19835400 },
+	{ _MMIO(0x9888), 0x1b830001 },
+	{ _MMIO(0x9888), 0x05830000 },
+	{ _MMIO(0x9888), 0x07834000 },
+	{ _MMIO(0x9888), 0x09834000 },
+	{ _MMIO(0x9888), 0x0184c000 },
+	{ _MMIO(0x9888), 0x07848000 },
+	{ _MMIO(0x9888), 0x0984c000 },
+	{ _MMIO(0x9888), 0x0b84c000 },
+	{ _MMIO(0x9888), 0x0d84c000 },
+	{ _MMIO(0x9888), 0x0f84c000 },
+	{ _MMIO(0x9888), 0x0384c000 },
+	{ _MMIO(0x9888), 0x05844000 },
+	{ _MMIO(0x9888), 0x1b80c137 },
+	{ _MMIO(0x9888), 0x1d80c147 },
+	{ _MMIO(0x9888), 0x21800000 },
+	{ _MMIO(0x9888), 0x1180c000 },
+	{ _MMIO(0x9888), 0x17808000 },
+	{ _MMIO(0x9888), 0x1980c000 },
+	{ _MMIO(0x9888), 0x1f80c000 },
+	{ _MMIO(0x9888), 0x1380c000 },
+	{ _MMIO(0x9888), 0x15804000 },
+	{ _MMIO(0x9888), 0x4d801110 },
+	{ _MMIO(0x9888), 0x4f800331 },
+	{ _MMIO(0x9888), 0x43800802 },
+	{ _MMIO(0x9888), 0x51800000 },
+	{ _MMIO(0x9888), 0x45801465 },
+	{ _MMIO(0x9888), 0x53801111 },
+	{ _MMIO(0x9888), 0x478014a5 },
+	{ _MMIO(0x9888), 0x31800000 },
+	{ _MMIO(0x9888), 0x3f800ca5 },
+	{ _MMIO(0x9888), 0x41800003 },
+};
+
+static const struct i915_oa_reg mux_config_render_basic_1_slices_0x02[] = {
+	{ _MMIO(0x9888), 0x143f000f },
+	{ _MMIO(0x9888), 0x14bf000f },
+	{ _MMIO(0x9888), 0x14910014 },
+	{ _MMIO(0x9888), 0x14b10014 },
+	{ _MMIO(0x9888), 0x118a0317 },
+	{ _MMIO(0x9888), 0x13837be0 },
+	{ _MMIO(0x9888), 0x3b800060 },
+	{ _MMIO(0x9888), 0x3d800005 },
+	{ _MMIO(0x9888), 0x0a3f0023 },
+	{ _MMIO(0x9888), 0x103f0000 },
+	{ _MMIO(0x9888), 0x0a5a4000 },
+	{ _MMIO(0x9888), 0x0a1d4000 },
+	{ _MMIO(0x9888), 0x0e1f8000 },
+	{ _MMIO(0x9888), 0x0a391000 },
+	{ _MMIO(0x9888), 0x00dc4000 },
+	{ _MMIO(0x9888), 0x06dc8000 },
+	{ _MMIO(0x9888), 0x08dcc000 },
+	{ _MMIO(0x9888), 0x00bd8000 },
+	{ _MMIO(0x9888), 0x18bd0800 },
+	{ _MMIO(0x9888), 0x0abf1180 },
+	{ _MMIO(0x9888), 0x10bf0000 },
+	{ _MMIO(0x9888), 0x00d84000 },
+	{ _MMIO(0x9888), 0x08d84000 },
+	{ _MMIO(0x9888), 0x0ada8000 },
+	{ _MMIO(0x9888), 0x00db4000 },
+	{ _MMIO(0x9888), 0x0edb8000 },
+	{ _MMIO(0x9888), 0x18db2400 },
+	{ _MMIO(0x9888), 0x0a9d8000 },
+	{ _MMIO(0x9888), 0x0c9f0800 },
+	{ _MMIO(0x9888), 0x0e9f2a00 },
+	{ _MMIO(0x9888), 0x109f0002 },
+	{ _MMIO(0x9888), 0x00b84000 },
+	{ _MMIO(0x9888), 0x0eb84000 },
+	{ _MMIO(0x9888), 0x16b84000 },
+	{ _MMIO(0x9888), 0x18b80001 },
+	{ _MMIO(0x9888), 0x00b92000 },
+	{ _MMIO(0x9888), 0x06b98000 },
+	{ _MMIO(0x9888), 0x08b9a000 },
+	{ _MMIO(0x9888), 0x0ab94000 },
+	{ _MMIO(0x9888), 0x00904000 },
+	{ _MMIO(0x9888), 0x08904000 },
+	{ _MMIO(0x9888), 0x00910030 },
+	{ _MMIO(0x9888), 0x08910031 },
+	{ _MMIO(0x9888), 0x10910000 },
+	{ _MMIO(0x9888), 0x00934000 },
+	{ _MMIO(0x9888), 0x16930020 },
+	{ _MMIO(0x9888), 0x06b08000 },
+	{ _MMIO(0x9888), 0x08b08000 },
+	{ _MMIO(0x9888), 0x06b11800 },
+	{ _MMIO(0x9888), 0x08b11880 },
+	{ _MMIO(0x9888), 0x10b10000 },
+	{ _MMIO(0x9888), 0x0eb34000 },
+	{ _MMIO(0x9888), 0x16b30080 },
+	{ _MMIO(0x9888), 0x01888000 },
+	{ _MMIO(0x9888), 0x0d88b800 },
+	{ _MMIO(0x9888), 0x038a0380 },
+	{ _MMIO(0x9888), 0x058a000e },
+	{ _MMIO(0x9888), 0x1b8a0080 },
+	{ _MMIO(0x9888), 0x078a0000 },
+	{ _MMIO(0x9888), 0x098a0000 },
+	{ _MMIO(0x9888), 0x238b2840 },
+	{ _MMIO(0x9888), 0x258b26a0 },
+	{ _MMIO(0x9888), 0x018c4000 },
+	{ _MMIO(0x9888), 0x0f8c4000 },
+	{ _MMIO(0x9888), 0x178c2000 },
+	{ _MMIO(0x9888), 0x198c1100 },
+	{ _MMIO(0x9888), 0x018d2000 },
+	{ _MMIO(0x9888), 0x078d8000 },
+	{ _MMIO(0x9888), 0x098da000 },
+	{ _MMIO(0x9888), 0x0b8d8000 },
+	{ _MMIO(0x9888), 0x1f85aa80 },
+	{ _MMIO(0x9888), 0x2185aaa0 },
+	{ _MMIO(0x9888), 0x2385002a },
+	{ _MMIO(0x9888), 0x0d831021 },
+	{ _MMIO(0x9888), 0x0f83572f },
+	{ _MMIO(0x9888), 0x01835680 },
+	{ _MMIO(0x9888), 0x0383002c },
+	{ _MMIO(0x9888), 0x11830000 },
+	{ _MMIO(0x9888), 0x19835400 },
+	{ _MMIO(0x9888), 0x1b830001 },
+	{ _MMIO(0x9888), 0x05830000 },
+	{ _MMIO(0x9888), 0x07834000 },
+	{ _MMIO(0x9888), 0x09834000 },
+	{ _MMIO(0x9888), 0x0184c000 },
+	{ _MMIO(0x9888), 0x07848000 },
+	{ _MMIO(0x9888), 0x0984c000 },
+	{ _MMIO(0x9888), 0x0b84c000 },
+	{ _MMIO(0x9888), 0x0d84c000 },
+	{ _MMIO(0x9888), 0x0f84c000 },
+	{ _MMIO(0x9888), 0x0384c000 },
+	{ _MMIO(0x9888), 0x05844000 },
+	{ _MMIO(0x9888), 0x1b80c137 },
+	{ _MMIO(0x9888), 0x1d80c147 },
+	{ _MMIO(0x9888), 0x21800000 },
+	{ _MMIO(0x9888), 0x1180c000 },
+	{ _MMIO(0x9888), 0x17808000 },
+	{ _MMIO(0x9888), 0x1980c000 },
+	{ _MMIO(0x9888), 0x1f80c000 },
+	{ _MMIO(0x9888), 0x1380c000 },
+	{ _MMIO(0x9888), 0x15804000 },
+	{ _MMIO(0x9888), 0x4d801550 },
+	{ _MMIO(0x9888), 0x4f800331 },
+	{ _MMIO(0x9888), 0x43800802 },
+	{ _MMIO(0x9888), 0x51800400 },
+	{ _MMIO(0x9888), 0x458004a1 },
+	{ _MMIO(0x9888), 0x53805555 },
+	{ _MMIO(0x9888), 0x47800421 },
+	{ _MMIO(0x9888), 0x31800000 },
+	{ _MMIO(0x9888), 0x3f801421 },
+	{ _MMIO(0x9888), 0x41800845 },
+};
+
+static int
+get_render_basic_mux_config(struct drm_i915_private *dev_priv,
+			    const struct i915_oa_reg **regs,
+			    int *lens)
+{
+	int n = 0;
+
+	BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 2);
+	BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 2);
+
+	if (INTEL_INFO(dev_priv)->sseu.slice_mask & 0x01) {
+		regs[n] = mux_config_render_basic_0_slices_0x01;
+		lens[n] = ARRAY_SIZE(mux_config_render_basic_0_slices_0x01);
+		n++;
+	}
+	if (INTEL_INFO(dev_priv)->sseu.slice_mask & 0x02) {
+		regs[n] = mux_config_render_basic_1_slices_0x02;
+		lens[n] = ARRAY_SIZE(mux_config_render_basic_1_slices_0x02);
+		n++;
+	}
+
+	return n;
+}
+
+static const struct i915_oa_reg b_counter_config_compute_basic[] = {
+	{ _MMIO(0x2710), 0x00000000 },
+	{ _MMIO(0x2714), 0x00800000 },
+	{ _MMIO(0x2720), 0x00000000 },
+	{ _MMIO(0x2724), 0x00800000 },
+	{ _MMIO(0x2740), 0x00000000 },
+};
+
+static const struct i915_oa_reg flex_eu_config_compute_basic[] = {
+	{ _MMIO(0xe458), 0x00005004 },
+	{ _MMIO(0xe558), 0x00000003 },
+	{ _MMIO(0xe658), 0x00002001 },
+	{ _MMIO(0xe758), 0x00778008 },
+	{ _MMIO(0xe45c), 0x00088078 },
+	{ _MMIO(0xe55c), 0x00808708 },
+	{ _MMIO(0xe65c), 0x00a08908 },
+};
+
+static const struct i915_oa_reg mux_config_compute_basic_0_slices_0x01[] = {
+	{ _MMIO(0x9888), 0x105c00e0 },
+	{ _MMIO(0x9888), 0x105800e0 },
+	{ _MMIO(0x9888), 0x103800e0 },
+	{ _MMIO(0x9888), 0x3580001a },
+	{ _MMIO(0x9888), 0x3b800060 },
+	{ _MMIO(0x9888), 0x3d800005 },
+	{ _MMIO(0x9888), 0x065c2100 },
+	{ _MMIO(0x9888), 0x0a5c0041 },
+	{ _MMIO(0x9888), 0x0c5c6600 },
+	{ _MMIO(0x9888), 0x005c6580 },
+	{ _MMIO(0x9888), 0x085c8000 },
+	{ _MMIO(0x9888), 0x0e5c8000 },
+	{ _MMIO(0x9888), 0x00580042 },
+	{ _MMIO(0x9888), 0x08582080 },
+	{ _MMIO(0x9888), 0x0c58004c },
+	{ _MMIO(0x9888), 0x0e582580 },
+	{ _MMIO(0x9888), 0x005b4000 },
+	{ _MMIO(0x9888), 0x185b1000 },
+	{ _MMIO(0x9888), 0x1a5b0104 },
+	{ _MMIO(0x9888), 0x0c1fa800 },
+	{ _MMIO(0x9888), 0x0e1faa00 },
+	{ _MMIO(0x9888), 0x101f02aa },
+	{ _MMIO(0x9888), 0x08380042 },
+	{ _MMIO(0x9888), 0x0a382080 },
+	{ _MMIO(0x9888), 0x0e38404c },
+	{ _MMIO(0x9888), 0x0238404b },
+	{ _MMIO(0x9888), 0x00384000 },
+	{ _MMIO(0x9888), 0x16380000 },
+	{ _MMIO(0x9888), 0x18381145 },
+	{ _MMIO(0x9888), 0x04380000 },
+	{ _MMIO(0x9888), 0x0039a000 },
+	{ _MMIO(0x9888), 0x06398000 },
+	{ _MMIO(0x9888), 0x0839a000 },
+	{ _MMIO(0x9888), 0x0a39a000 },
+	{ _MMIO(0x9888), 0x0c39a000 },
+	{ _MMIO(0x9888), 0x0e39a000 },
+	{ _MMIO(0x9888), 0x02392000 },
+	{ _MMIO(0x9888), 0x018a8000 },
+	{ _MMIO(0x9888), 0x0f8a8000 },
+	{ _MMIO(0x9888), 0x198a8000 },
+	{ _MMIO(0x9888), 0x1b8aaaa0 },
+	{ _MMIO(0x9888), 0x1d8a0002 },
+	{ _MMIO(0x9888), 0x038a8000 },
+	{ _MMIO(0x9888), 0x058a8000 },
+	{ _MMIO(0x9888), 0x238b02a0 },
+	{ _MMIO(0x9888), 0x258b5550 },
+	{ _MMIO(0x9888), 0x278b0015 },
+	{ _MMIO(0x9888), 0x1f850a80 },
+	{ _MMIO(0x9888), 0x2185aaa0 },
+	{ _MMIO(0x9888), 0x2385002a },
+	{ _MMIO(0x9888), 0x01834000 },
+	{ _MMIO(0x9888), 0x0f834000 },
+	{ _MMIO(0x9888), 0x19835400 },
+	{ _MMIO(0x9888), 0x1b830155 },
+	{ _MMIO(0x9888), 0x03834000 },
+	{ _MMIO(0x9888), 0x05834000 },
+	{ _MMIO(0x9888), 0x0184c000 },
+	{ _MMIO(0x9888), 0x07848000 },
+	{ _MMIO(0x9888), 0x0984c000 },
+	{ _MMIO(0x9888), 0x0b84c000 },
+	{ _MMIO(0x9888), 0x0d84c000 },
+	{ _MMIO(0x9888), 0x0f84c000 },
+	{ _MMIO(0x9888), 0x03844000 },
+	{ _MMIO(0x9888), 0x17808137 },
+	{ _MMIO(0x9888), 0x1980c147 },
+	{ _MMIO(0x9888), 0x1b80c0e5 },
+	{ _MMIO(0x9888), 0x1d80c0e3 },
+	{ _MMIO(0x9888), 0x21800000 },
+	{ _MMIO(0x9888), 0x1180c000 },
+	{ _MMIO(0x9888), 0x1f80c000 },
+	{ _MMIO(0x9888), 0x13804000 },
+	{ _MMIO(0x9888), 0x15800000 },
+	{ _MMIO(0xd24), 0x00000000 },
+	{ _MMIO(0x9888), 0x4d801000 },
+	{ _MMIO(0x9888), 0x4f800111 },
+	{ _MMIO(0x9888), 0x43800062 },
+	{ _MMIO(0x9888), 0x51800000 },
+	{ _MMIO(0x9888), 0x45800062 },
+	{ _MMIO(0x9888), 0x53800000 },
+	{ _MMIO(0x9888), 0x47800062 },
+	{ _MMIO(0x9888), 0x31800000 },
+	{ _MMIO(0x9888), 0x3f801062 },
+	{ _MMIO(0x9888), 0x41801084 },
+};
+
+static const struct i915_oa_reg mux_config_compute_basic_2_slices_0x02[] = {
+	{ _MMIO(0x9888), 0x10dc00e0 },
+	{ _MMIO(0x9888), 0x10d800e0 },
+	{ _MMIO(0x9888), 0x10b800e0 },
+	{ _MMIO(0x9888), 0x3580001a },
+	{ _MMIO(0x9888), 0x3b800060 },
+	{ _MMIO(0x9888), 0x3d800005 },
+	{ _MMIO(0x9888), 0x06dc2100 },
+	{ _MMIO(0x9888), 0x0adc0041 },
+	{ _MMIO(0x9888), 0x0cdc6600 },
+	{ _MMIO(0x9888), 0x00dc6580 },
+	{ _MMIO(0x9888), 0x08dc8000 },
+	{ _MMIO(0x9888), 0x0edc8000 },
+	{ _MMIO(0x9888), 0x00d80042 },
+	{ _MMIO(0x9888), 0x08d82080 },
+	{ _MMIO(0x9888), 0x0cd8004c },
+	{ _MMIO(0x9888), 0x0ed82580 },
+	{ _MMIO(0x9888), 0x00db4000 },
+	{ _MMIO(0x9888), 0x18db1000 },
+	{ _MMIO(0x9888), 0x1adb0104 },
+	{ _MMIO(0x9888), 0x0c9fa800 },
+	{ _MMIO(0x9888), 0x0e9faa00 },
+	{ _MMIO(0x9888), 0x109f02aa },
+	{ _MMIO(0x9888), 0x08b80042 },
+	{ _MMIO(0x9888), 0x0ab82080 },
+	{ _MMIO(0x9888), 0x0eb8404c },
+	{ _MMIO(0x9888), 0x02b8404b },
+	{ _MMIO(0x9888), 0x00b84000 },
+	{ _MMIO(0x9888), 0x16b80000 },
+	{ _MMIO(0x9888), 0x18b81145 },
+	{ _MMIO(0x9888), 0x04b80000 },
+	{ _MMIO(0x9888), 0x00b9a000 },
+	{ _MMIO(0x9888), 0x06b98000 },
+	{ _MMIO(0x9888), 0x08b9a000 },
+	{ _MMIO(0x9888), 0x0ab9a000 },
+	{ _MMIO(0x9888), 0x0cb9a000 },
+	{ _MMIO(0x9888), 0x0eb9a000 },
+	{ _MMIO(0x9888), 0x02b92000 },
+	{ _MMIO(0x9888), 0x01888000 },
+	{ _MMIO(0x9888), 0x0d88f800 },
+	{ _MMIO(0x9888), 0x0f88000f },
+	{ _MMIO(0x9888), 0x03888000 },
+	{ _MMIO(0x9888), 0x05888000 },
+	{ _MMIO(0x9888), 0x238b0540 },
+	{ _MMIO(0x9888), 0x258baaa0 },
+	{ _MMIO(0x9888), 0x278b002a },
+	{ _MMIO(0x9888), 0x018c4000 },
+	{ _MMIO(0x9888), 0x0f8c4000 },
+	{ _MMIO(0x9888), 0x178c2000 },
+	{ _MMIO(0x9888), 0x198c5500 },
+	{ _MMIO(0x9888), 0x1b8c0015 },
+	{ _MMIO(0x9888), 0x038c4000 },
+	{ _MMIO(0x9888), 0x058c4000 },
+	{ _MMIO(0x9888), 0x018da000 },
+	{ _MMIO(0x9888), 0x078d8000 },
+	{ _MMIO(0x9888), 0x098da000 },
+	{ _MMIO(0x9888), 0x0b8da000 },
+	{ _MMIO(0x9888), 0x0d8da000 },
+	{ _MMIO(0x9888), 0x0f8da000 },
+	{ _MMIO(0x9888), 0x038d2000 },
+	{ _MMIO(0x9888), 0x1f850a80 },
+	{ _MMIO(0x9888), 0x2185aaa0 },
+	{ _MMIO(0x9888), 0x2385002a },
+	{ _MMIO(0x9888), 0x01834000 },
+	{ _MMIO(0x9888), 0x0f834000 },
+	{ _MMIO(0x9888), 0x19835400 },
+	{ _MMIO(0x9888), 0x1b830155 },
+	{ _MMIO(0x9888), 0x03834000 },
+	{ _MMIO(0x9888), 0x05834000 },
+	{ _MMIO(0x9888), 0x0184c000 },
+	{ _MMIO(0x9888), 0x07848000 },
+	{ _MMIO(0x9888), 0x0984c000 },
+	{ _MMIO(0x9888), 0x0b84c000 },
+	{ _MMIO(0x9888), 0x0d84c000 },
+	{ _MMIO(0x9888), 0x0f84c000 },
+	{ _MMIO(0x9888), 0x03844000 },
+	{ _MMIO(0x9888), 0x17808137 },
+	{ _MMIO(0x9888), 0x1980c147 },
+	{ _MMIO(0x9888), 0x1b80c0e5 },
+	{ _MMIO(0x9888), 0x1d80c0e3 },
+	{ _MMIO(0x9888), 0x21800000 },
+	{ _MMIO(0x9888), 0x1180c000 },
+	{ _MMIO(0x9888), 0x1f80c000 },
+	{ _MMIO(0x9888), 0x13804000 },
+	{ _MMIO(0x9888), 0x15800000 },
+	{ _MMIO(0xd24), 0x00000000 },
+	{ _MMIO(0x9888), 0x4d805000 },
+	{ _MMIO(0x9888), 0x4f800555 },
+	{ _MMIO(0x9888), 0x43800062 },
+	{ _MMIO(0x9888), 0x51800000 },
+	{ _MMIO(0x9888), 0x45800062 },
+	{ _MMIO(0x9888), 0x53800000 },
+	{ _MMIO(0x9888), 0x47800062 },
+	{ _MMIO(0x9888), 0x31800000 },
+	{ _MMIO(0x9888), 0x3f800062 },
+	{ _MMIO(0x9888), 0x41800000 },
+};
+
+static int
+get_compute_basic_mux_config(struct drm_i915_private *dev_priv,
+			     const struct i915_oa_reg **regs,
+			     int *lens)
+{
+	int n = 0;
+
+	BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 2);
+	BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 2);
+
+	if (INTEL_INFO(dev_priv)->sseu.slice_mask & 0x01) {
+		regs[n] = mux_config_compute_basic_0_slices_0x01;
+		lens[n] = ARRAY_SIZE(mux_config_compute_basic_0_slices_0x01);
+		n++;
+	}
+	if (INTEL_INFO(dev_priv)->sseu.slice_mask & 0x02) {
+		regs[n] = mux_config_compute_basic_2_slices_0x02;
+		lens[n] = ARRAY_SIZE(mux_config_compute_basic_2_slices_0x02);
+		n++;
+	}
+
+	return n;
+}
+
+static const struct i915_oa_reg b_counter_config_render_pipe_profile[] = {
+	{ _MMIO(0x2724), 0xf0800000 },
+	{ _MMIO(0x2720), 0x00000000 },
+	{ _MMIO(0x2714), 0xf0800000 },
+	{ _MMIO(0x2710), 0x00000000 },
+	{ _MMIO(0x2740), 0x00000000 },
+	{ _MMIO(0x2770), 0x0007ffea },
+	{ _MMIO(0x2774), 0x00007ffc },
+	{ _MMIO(0x2778), 0x0007affa },
+	{ _MMIO(0x277c), 0x0000f5fd },
+	{ _MMIO(0x2780), 0x00079ffa },
+	{ _MMIO(0x2784), 0x0000f3fb },
+	{ _MMIO(0x2788), 0x0007bf7a },
+	{ _MMIO(0x278c), 0x0000f7e7 },
+	{ _MMIO(0x2790), 0x0007fefa },
+	{ _MMIO(0x2794), 0x0000f7cf },
+	{ _MMIO(0x2798), 0x00077ffa },
+	{ _MMIO(0x279c), 0x0000efdf },
+	{ _MMIO(0x27a0), 0x0006fffa },
+	{ _MMIO(0x27a4), 0x0000cfbf },
+	{ _MMIO(0x27a8), 0x0003fffa },
+	{ _MMIO(0x27ac), 0x00005f7f },
+};
+
+static const struct i915_oa_reg flex_eu_config_render_pipe_profile[] = {
+	{ _MMIO(0xe458), 0x00005004 },
+	{ _MMIO(0xe558), 0x00015014 },
+	{ _MMIO(0xe658), 0x00025024 },
+	{ _MMIO(0xe758), 0x00035034 },
+	{ _MMIO(0xe45c), 0x00045044 },
+	{ _MMIO(0xe55c), 0x00055054 },
+	{ _MMIO(0xe65c), 0x00065064 },
+};
+
+static const struct i915_oa_reg mux_config_render_pipe_profile[] = {
+	{ _MMIO(0x9888), 0x0a1e0000 },
+	{ _MMIO(0x9888), 0x0c1f000f },
+	{ _MMIO(0x9888), 0x10176800 },
+	{ _MMIO(0x9888), 0x1191001f },
+	{ _MMIO(0x9888), 0x0b880320 },
+	{ _MMIO(0x9888), 0x01890c40 },
+	{ _MMIO(0x9888), 0x118a1c00 },
+	{ _MMIO(0x9888), 0x118d7c00 },
+	{ _MMIO(0x9888), 0x118e0020 },
+	{ _MMIO(0x9888), 0x118f4c00 },
+	{ _MMIO(0x9888), 0x11900000 },
+	{ _MMIO(0x9888), 0x13900001 },
+	{ _MMIO(0x9888), 0x065c4000 },
+	{ _MMIO(0x9888), 0x0c3d8000 },
+	{ _MMIO(0x9888), 0x06584000 },
+	{ _MMIO(0x9888), 0x0c5b4000 },
+	{ _MMIO(0x9888), 0x081e0040 },
+	{ _MMIO(0x9888), 0x0e1e0000 },
+	{ _MMIO(0x9888), 0x021f5400 },
+	{ _MMIO(0x9888), 0x001f0000 },
+	{ _MMIO(0x9888), 0x101f0010 },
+	{ _MMIO(0x9888), 0x0e1f0080 },
+	{ _MMIO(0x9888), 0x0c384000 },
+	{ _MMIO(0x9888), 0x06392000 },
+	{ _MMIO(0x9888), 0x0c13c000 },
+	{ _MMIO(0x9888), 0x06164000 },
+	{ _MMIO(0x9888), 0x06170012 },
+	{ _MMIO(0x9888), 0x00170000 },
+	{ _MMIO(0x9888), 0x01910005 },
+	{ _MMIO(0x9888), 0x07880002 },
+	{ _MMIO(0x9888), 0x01880c00 },
+	{ _MMIO(0x9888), 0x0f880000 },
+	{ _MMIO(0x9888), 0x0d880000 },
+	{ _MMIO(0x9888), 0x05880000 },
+	{ _MMIO(0x9888), 0x09890032 },
+	{ _MMIO(0x9888), 0x078a0800 },
+	{ _MMIO(0x9888), 0x0f8a0a00 },
+	{ _MMIO(0x9888), 0x198a4000 },
+	{ _MMIO(0x9888), 0x1b8a2000 },
+	{ _MMIO(0x9888), 0x1d8a0000 },
+	{ _MMIO(0x9888), 0x038a4000 },
+	{ _MMIO(0x9888), 0x0b8a8000 },
+	{ _MMIO(0x9888), 0x0d8a8000 },
+	{ _MMIO(0x9888), 0x238b54c0 },
+	{ _MMIO(0x9888), 0x258baa55 },
+	{ _MMIO(0x9888), 0x278b0019 },
+	{ _MMIO(0x9888), 0x198c0100 },
+	{ _MMIO(0x9888), 0x058c4000 },
+	{ _MMIO(0x9888), 0x0f8d0015 },
+	{ _MMIO(0x9888), 0x018d1000 },
+	{ _MMIO(0x9888), 0x098d8000 },
+	{ _MMIO(0x9888), 0x0b8df000 },
+	{ _MMIO(0x9888), 0x0d8d3000 },
+	{ _MMIO(0x9888), 0x038de000 },
+	{ _MMIO(0x9888), 0x058d3000 },
+	{ _MMIO(0x9888), 0x0d8e0004 },
+	{ _MMIO(0x9888), 0x058e000c },
+	{ _MMIO(0x9888), 0x098e0000 },
+	{ _MMIO(0x9888), 0x078e0000 },
+	{ _MMIO(0x9888), 0x038e0000 },
+	{ _MMIO(0x9888), 0x0b8f0020 },
+	{ _MMIO(0x9888), 0x198f0c00 },
+	{ _MMIO(0x9888), 0x078f8000 },
+	{ _MMIO(0x9888), 0x098f4000 },
+	{ _MMIO(0x9888), 0x0b900980 },
+	{ _MMIO(0x9888), 0x03900d80 },
+	{ _MMIO(0x9888), 0x01900000 },
+	{ _MMIO(0x9888), 0x1f85aa80 },
+	{ _MMIO(0x9888), 0x2185aaaa },
+	{ _MMIO(0x9888), 0x2385002a },
+	{ _MMIO(0x9888), 0x01834000 },
+	{ _MMIO(0x9888), 0x0f834000 },
+	{ _MMIO(0x9888), 0x19835400 },
+	{ _MMIO(0x9888), 0x1b830155 },
+	{ _MMIO(0x9888), 0x03834000 },
+	{ _MMIO(0x9888), 0x05834000 },
+	{ _MMIO(0x9888), 0x07834000 },
+	{ _MMIO(0x9888), 0x09834000 },
+	{ _MMIO(0x9888), 0x0b834000 },
+	{ _MMIO(0x9888), 0x0d834000 },
+	{ _MMIO(0x9888), 0x0184c000 },
+	{ _MMIO(0x9888), 0x0784c000 },
+	{ _MMIO(0x9888), 0x0984c000 },
+	{ _MMIO(0x9888), 0x0b84c000 },
+	{ _MMIO(0x9888), 0x0d84c000 },
+	{ _MMIO(0x9888), 0x0f84c000 },
+	{ _MMIO(0x9888), 0x0384c000 },
+	{ _MMIO(0x9888), 0x0584c000 },
+	{ _MMIO(0x9888), 0x1180c000 },
+	{ _MMIO(0x9888), 0x1780c000 },
+	{ _MMIO(0x9888), 0x1980c000 },
+	{ _MMIO(0x9888), 0x1b80c000 },
+	{ _MMIO(0x9888), 0x1d80c000 },
+	{ _MMIO(0x9888), 0x1f80c000 },
+	{ _MMIO(0x9888), 0x1380c000 },
+	{ _MMIO(0x9888), 0x1580c000 },
+	{ _MMIO(0xd24), 0x00000000 },
+	{ _MMIO(0x9888), 0x4d801111 },
+	{ _MMIO(0x9888), 0x3d800800 },
+	{ _MMIO(0x9888), 0x4f801011 },
+	{ _MMIO(0x9888), 0x43800443 },
+	{ _MMIO(0x9888), 0x51801111 },
+	{ _MMIO(0x9888), 0x45800422 },
+	{ _MMIO(0x9888), 0x53801111 },
+	{ _MMIO(0x9888), 0x47800c60 },
+	{ _MMIO(0x9888), 0x21800000 },
+	{ _MMIO(0x9888), 0x31800000 },
+	{ _MMIO(0x9888), 0x3f800422 },
+	{ _MMIO(0x9888), 0x41800021 },
+};
+
+static int
+get_render_pipe_profile_mux_config(struct drm_i915_private *dev_priv,
+				   const struct i915_oa_reg **regs,
+				   int *lens)
+{
+	int n = 0;
+
+	BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
+	BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
+
+	regs[n] = mux_config_render_pipe_profile;
+	lens[n] = ARRAY_SIZE(mux_config_render_pipe_profile);
+	n++;
+
+	return n;
+}
+
+static const struct i915_oa_reg b_counter_config_memory_reads[] = {
+	{ _MMIO(0x2724), 0xf0800000 },
+	{ _MMIO(0x2720), 0x00000000 },
+	{ _MMIO(0x2714), 0xf0800000 },
+	{ _MMIO(0x2710), 0x00000000 },
+	{ _MMIO(0x274c), 0x86543210 },
+	{ _MMIO(0x2748), 0x86543210 },
+	{ _MMIO(0x2744), 0x00006667 },
+	{ _MMIO(0x2740), 0x00000000 },
+	{ _MMIO(0x275c), 0x86543210 },
+	{ _MMIO(0x2758), 0x86543210 },
+	{ _MMIO(0x2754), 0x00006465 },
+	{ _MMIO(0x2750), 0x00000000 },
+	{ _MMIO(0x2770), 0x0007f81a },
+	{ _MMIO(0x2774), 0x0000fe00 },
+	{ _MMIO(0x2778), 0x0007f82a },
+	{ _MMIO(0x277c), 0x0000fe00 },
+	{ _MMIO(0x2780), 0x0007f872 },
+	{ _MMIO(0x2784), 0x0000fe00 },
+	{ _MMIO(0x2788), 0x0007f8ba },
+	{ _MMIO(0x278c), 0x0000fe00 },
+	{ _MMIO(0x2790), 0x0007f87a },
+	{ _MMIO(0x2794), 0x0000fe00 },
+	{ _MMIO(0x2798), 0x0007f8ea },
+	{ _MMIO(0x279c), 0x0000fe00 },
+	{ _MMIO(0x27a0), 0x0007f8e2 },
+	{ _MMIO(0x27a4), 0x0000fe00 },
+	{ _MMIO(0x27a8), 0x0007f8f2 },
+	{ _MMIO(0x27ac), 0x0000fe00 },
+};
+
+static const struct i915_oa_reg flex_eu_config_memory_reads[] = {
+	{ _MMIO(0xe458), 0x00005004 },
+	{ _MMIO(0xe558), 0x00015014 },
+	{ _MMIO(0xe658), 0x00025024 },
+	{ _MMIO(0xe758), 0x00035034 },
+	{ _MMIO(0xe45c), 0x00045044 },
+	{ _MMIO(0xe55c), 0x00055054 },
+	{ _MMIO(0xe65c), 0x00065064 },
+};
+
+static const struct i915_oa_reg mux_config_memory_reads[] = {
+	{ _MMIO(0x9888), 0x198b0343 },
+	{ _MMIO(0x9888), 0x13845800 },
+	{ _MMIO(0x9888), 0x15840018 },
+	{ _MMIO(0x9888), 0x3580001a },
+	{ _MMIO(0x9888), 0x038b6300 },
+	{ _MMIO(0x9888), 0x058b6b62 },
+	{ _MMIO(0x9888), 0x078b006a },
+	{ _MMIO(0x9888), 0x118b0000 },
+	{ _MMIO(0x9888), 0x238b0000 },
+	{ _MMIO(0x9888), 0x258b0000 },
+	{ _MMIO(0x9888), 0x1f85a080 },
+	{ _MMIO(0x9888), 0x2185aaaa },
+	{ _MMIO(0x9888), 0x2385000a },
+	{ _MMIO(0x9888), 0x07834000 },
+	{ _MMIO(0x9888), 0x09834000 },
+	{ _MMIO(0x9888), 0x0b834000 },
+	{ _MMIO(0x9888), 0x0d834000 },
+	{ _MMIO(0x9888), 0x01840018 },
+	{ _MMIO(0x9888), 0x07844c80 },
+	{ _MMIO(0x9888), 0x09840d9a },
+	{ _MMIO(0x9888), 0x0b840e9c },
+	{ _MMIO(0x9888), 0x0d840f9e },
+	{ _MMIO(0x9888), 0x0f840010 },
+	{ _MMIO(0x9888), 0x11840000 },
+	{ _MMIO(0x9888), 0x03848000 },
+	{ _MMIO(0x9888), 0x0584c000 },
+	{ _MMIO(0x9888), 0x2f8000e5 },
+	{ _MMIO(0x9888), 0x138080e3 },
+	{ _MMIO(0x9888), 0x1580c0e1 },
+	{ _MMIO(0x9888), 0x21800000 },
+	{ _MMIO(0x9888), 0x11804000 },
+	{ _MMIO(0x9888), 0x1780c000 },
+	{ _MMIO(0x9888), 0x1980c000 },
+	{ _MMIO(0x9888), 0x1b80c000 },
+	{ _MMIO(0x9888), 0x1d80c000 },
+	{ _MMIO(0x9888), 0x1f804000 },
+	{ _MMIO(0xd24), 0x00000000 },
+	{ _MMIO(0x9888), 0x4d800000 },
+	{ _MMIO(0x9888), 0x3d800800 },
+	{ _MMIO(0x9888), 0x4f800000 },
+	{ _MMIO(0x9888), 0x43800842 },
+	{ _MMIO(0x9888), 0x51800000 },
+	{ _MMIO(0x9888), 0x45800842 },
+	{ _MMIO(0x9888), 0x53800000 },
+	{ _MMIO(0x9888), 0x47801042 },
+	{ _MMIO(0x9888), 0x31800000 },
+	{ _MMIO(0x9888), 0x3f800084 },
+	{ _MMIO(0x9888), 0x41800000 },
+};
+
+static int
+get_memory_reads_mux_config(struct drm_i915_private *dev_priv,
+			    const struct i915_oa_reg **regs,
+			    int *lens)
+{
+	int n = 0;
+
+	BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
+	BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
+
+	regs[n] = mux_config_memory_reads;
+	lens[n] = ARRAY_SIZE(mux_config_memory_reads);
+	n++;
+
+	return n;
+}
+
+static const struct i915_oa_reg b_counter_config_memory_writes[] = {
+	{ _MMIO(0x2724), 0xf0800000 },
+	{ _MMIO(0x2720), 0x00000000 },
+	{ _MMIO(0x2714), 0xf0800000 },
+	{ _MMIO(0x2710), 0x00000000 },
+	{ _MMIO(0x274c), 0x86543210 },
+	{ _MMIO(0x2748), 0x86543210 },
+	{ _MMIO(0x2744), 0x00006667 },
+	{ _MMIO(0x2740), 0x00000000 },
+	{ _MMIO(0x275c), 0x86543210 },
+	{ _MMIO(0x2758), 0x86543210 },
+	{ _MMIO(0x2754), 0x00006465 },
+	{ _MMIO(0x2750), 0x00000000 },
+	{ _MMIO(0x2770), 0x0007f81a },
+	{ _MMIO(0x2774), 0x0000fe00 },
+	{ _MMIO(0x2778), 0x0007f82a },
+	{ _MMIO(0x277c), 0x0000fe00 },
+	{ _MMIO(0x2780), 0x0007f822 },
+	{ _MMIO(0x2784), 0x0000fe00 },
+	{ _MMIO(0x2788), 0x0007f8ba },
+	{ _MMIO(0x278c), 0x0000fe00 },
+	{ _MMIO(0x2790), 0x0007f87a },
+	{ _MMIO(0x2794), 0x0000fe00 },
+	{ _MMIO(0x2798), 0x0007f8ea },
+	{ _MMIO(0x279c), 0x0000fe00 },
+	{ _MMIO(0x27a0), 0x0007f8e2 },
+	{ _MMIO(0x27a4), 0x0000fe00 },
+	{ _MMIO(0x27a8), 0x0007f8f2 },
+	{ _MMIO(0x27ac), 0x0000fe00 },
+};
+
+static const struct i915_oa_reg flex_eu_config_memory_writes[] = {
+	{ _MMIO(0xe458), 0x00005004 },
+	{ _MMIO(0xe558), 0x00015014 },
+	{ _MMIO(0xe658), 0x00025024 },
+	{ _MMIO(0xe758), 0x00035034 },
+	{ _MMIO(0xe45c), 0x00045044 },
+	{ _MMIO(0xe55c), 0x00055054 },
+	{ _MMIO(0xe65c), 0x00065064 },
+};
+
+static const struct i915_oa_reg mux_config_memory_writes[] = {
+	{ _MMIO(0x9888), 0x198b0343 },
+	{ _MMIO(0x9888), 0x13845400 },
+	{ _MMIO(0x9888), 0x3580001a },
+	{ _MMIO(0x9888), 0x3d800805 },
+	{ _MMIO(0x9888), 0x038b6300 },
+	{ _MMIO(0x9888), 0x058b6b62 },
+	{ _MMIO(0x9888), 0x078b006a },
+	{ _MMIO(0x9888), 0x118b0000 },
+	{ _MMIO(0x9888), 0x238b0000 },
+	{ _MMIO(0x9888), 0x258b0000 },
+	{ _MMIO(0x9888), 0x1f85a080 },
+	{ _MMIO(0x9888), 0x2185aaaa },
+	{ _MMIO(0x9888), 0x23850002 },
+	{ _MMIO(0x9888), 0x07834000 },
+	{ _MMIO(0x9888), 0x09834000 },
+	{ _MMIO(0x9888), 0x0b834000 },
+	{ _MMIO(0x9888), 0x0d834000 },
+	{ _MMIO(0x9888), 0x01840010 },
+	{ _MMIO(0x9888), 0x07844880 },
+	{ _MMIO(0x9888), 0x09840992 },
+	{ _MMIO(0x9888), 0x0b840a94 },
+	{ _MMIO(0x9888), 0x0d840b96 },
+	{ _MMIO(0x9888), 0x11840000 },
+	{ _MMIO(0x9888), 0x03848000 },
+	{ _MMIO(0x9888), 0x0584c000 },
+	{ _MMIO(0x9888), 0x2d800147 },
+	{ _MMIO(0x9888), 0x2f8000e5 },
+	{ _MMIO(0x9888), 0x138080e3 },
+	{ _MMIO(0x9888), 0x1580c0e1 },
+	{ _MMIO(0x9888), 0x21800000 },
+	{ _MMIO(0x9888), 0x11804000 },
+	{ _MMIO(0x9888), 0x1780c000 },
+	{ _MMIO(0x9888), 0x1980c000 },
+	{ _MMIO(0x9888), 0x1b80c000 },
+	{ _MMIO(0x9888), 0x1d80c000 },
+	{ _MMIO(0x9888), 0x1f800000 },
+	{ _MMIO(0xd24), 0x00000000 },
+	{ _MMIO(0x9888), 0x4d800000 },
+	{ _MMIO(0x9888), 0x4f800000 },
+	{ _MMIO(0x9888), 0x43800842 },
+	{ _MMIO(0x9888), 0x51800000 },
+	{ _MMIO(0x9888), 0x45800842 },
+	{ _MMIO(0x9888), 0x53800000 },
+	{ _MMIO(0x9888), 0x47801082 },
+	{ _MMIO(0x9888), 0x31800000 },
+	{ _MMIO(0x9888), 0x3f800084 },
+	{ _MMIO(0x9888), 0x41800000 },
+};
+
+static int
+get_memory_writes_mux_config(struct drm_i915_private *dev_priv,
+			     const struct i915_oa_reg **regs,
+			     int *lens)
+{
+	int n = 0;
+
+	BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
+	BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
+
+	regs[n] = mux_config_memory_writes;
+	lens[n] = ARRAY_SIZE(mux_config_memory_writes);
+	n++;
+
+	return n;
+}
+
+static const struct i915_oa_reg b_counter_config_compute_extended[] = {
+	{ _MMIO(0x2724), 0xf0800000 },
+	{ _MMIO(0x2720), 0x00000000 },
+	{ _MMIO(0x2714), 0xf0800000 },
+	{ _MMIO(0x2710), 0x00000000 },
+	{ _MMIO(0x2740), 0x00000000 },
+	{ _MMIO(0x2770), 0x0007fc2a },
+	{ _MMIO(0x2774), 0x0000bf00 },
+	{ _MMIO(0x2778), 0x0007fc6a },
+	{ _MMIO(0x277c), 0x0000bf00 },
+	{ _MMIO(0x2780), 0x0007fc92 },
+	{ _MMIO(0x2784), 0x0000bf00 },
+	{ _MMIO(0x2788), 0x0007fca2 },
+	{ _MMIO(0x278c), 0x0000bf00 },
+	{ _MMIO(0x2790), 0x0007fc32 },
+	{ _MMIO(0x2794), 0x0000bf00 },
+	{ _MMIO(0x2798), 0x0007fc9a },
+	{ _MMIO(0x279c), 0x0000bf00 },
+	{ _MMIO(0x27a0), 0x0007fe6a },
+	{ _MMIO(0x27a4), 0x0000bf00 },
+	{ _MMIO(0x27a8), 0x0007fe7a },
+	{ _MMIO(0x27ac), 0x0000bf00 },
+};
+
+static const struct i915_oa_reg flex_eu_config_compute_extended[] = {
+	{ _MMIO(0xe458), 0x00005004 },
+	{ _MMIO(0xe558), 0x00000003 },
+	{ _MMIO(0xe658), 0x00002001 },
+	{ _MMIO(0xe758), 0x00778008 },
+	{ _MMIO(0xe45c), 0x00088078 },
+	{ _MMIO(0xe55c), 0x00808708 },
+	{ _MMIO(0xe65c), 0x00a08908 },
+};
+
+static const struct i915_oa_reg mux_config_compute_extended_0_subslices_0x01[] = {
+	{ _MMIO(0x9888), 0x143d0160 },
+	{ _MMIO(0x9888), 0x163d2800 },
+	{ _MMIO(0x9888), 0x183d0120 },
+	{ _MMIO(0x9888), 0x105800e0 },
+	{ _MMIO(0x9888), 0x005cc000 },
+	{ _MMIO(0x9888), 0x065c8000 },
+	{ _MMIO(0x9888), 0x085cc000 },
+	{ _MMIO(0x9888), 0x0a5cc000 },
+	{ _MMIO(0x9888), 0x0c5cc000 },
+	{ _MMIO(0x9888), 0x0e5cc000 },
+	{ _MMIO(0x9888), 0x025cc000 },
+	{ _MMIO(0x9888), 0x045cc000 },
+	{ _MMIO(0x9888), 0x003d0011 },
+	{ _MMIO(0x9888), 0x063d0900 },
+	{ _MMIO(0x9888), 0x083d0a13 },
+	{ _MMIO(0x9888), 0x0a3d0b15 },
+	{ _MMIO(0x9888), 0x0c3d2317 },
+	{ _MMIO(0x9888), 0x043d21b7 },
+	{ _MMIO(0x9888), 0x103d0000 },
+	{ _MMIO(0x9888), 0x0e3d0000 },
+	{ _MMIO(0x9888), 0x1a3d0000 },
+	{ _MMIO(0x9888), 0x0e5825c1 },
+	{ _MMIO(0x9888), 0x00586100 },
+	{ _MMIO(0x9888), 0x0258204c },
+	{ _MMIO(0x9888), 0x06588000 },
+	{ _MMIO(0x9888), 0x0858c000 },
+	{ _MMIO(0x9888), 0x0a58c000 },
+	{ _MMIO(0x9888), 0x0c58c000 },
+	{ _MMIO(0x9888), 0x0458c000 },
+	{ _MMIO(0x9888), 0x005b4000 },
+	{ _MMIO(0x9888), 0x0e5b4000 },
+	{ _MMIO(0x9888), 0x185b5400 },
+	{ _MMIO(0x9888), 0x1a5b0155 },
+	{ _MMIO(0x9888), 0x025b4000 },
+	{ _MMIO(0x9888), 0x045b4000 },
+	{ _MMIO(0x9888), 0x065b4000 },
+	{ _MMIO(0x9888), 0x085b4000 },
+	{ _MMIO(0x9888), 0x0a5b4000 },
+	{ _MMIO(0x9888), 0x0c1fa800 },
+	{ _MMIO(0x9888), 0x0e1faa2a },
+	{ _MMIO(0x9888), 0x101f02aa },
+	{ _MMIO(0x9888), 0x00384000 },
+	{ _MMIO(0x9888), 0x0e384000 },
+	{ _MMIO(0x9888), 0x16384000 },
+	{ _MMIO(0x9888), 0x18381555 },
+	{ _MMIO(0x9888), 0x02384000 },
+	{ _MMIO(0x9888), 0x04384000 },
+	{ _MMIO(0x9888), 0x06384000 },
+	{ _MMIO(0x9888), 0x08384000 },
+	{ _MMIO(0x9888), 0x0a384000 },
+	{ _MMIO(0x9888), 0x0039a000 },
+	{ _MMIO(0x9888), 0x06398000 },
+	{ _MMIO(0x9888), 0x0839a000 },
+	{ _MMIO(0x9888), 0x0a39a000 },
+	{ _MMIO(0x9888), 0x0c39a000 },
+	{ _MMIO(0x9888), 0x0e39a000 },
+	{ _MMIO(0x9888), 0x0239a000 },
+	{ _MMIO(0x9888), 0x0439a000 },
+	{ _MMIO(0x9888), 0x018a8000 },
+	{ _MMIO(0x9888), 0x0f8a8000 },
+	{ _MMIO(0x9888), 0x198a8000 },
+	{ _MMIO(0x9888), 0x1b8aaaa0 },
+	{ _MMIO(0x9888), 0x1d8a0002 },
+	{ _MMIO(0x9888), 0x038a8000 },
+	{ _MMIO(0x9888), 0x058a8000 },
+	{ _MMIO(0x9888), 0x078a8000 },
+	{ _MMIO(0x9888), 0x098a8000 },
+	{ _MMIO(0x9888), 0x0b8a8000 },
+	{ _MMIO(0x9888), 0x238b2aa0 },
+	{ _MMIO(0x9888), 0x258b5551 },
+	{ _MMIO(0x9888), 0x278b0015 },
+	{ _MMIO(0x9888), 0x1f85aa80 },
+	{ _MMIO(0x9888), 0x2185aaa2 },
+	{ _MMIO(0x9888), 0x2385002a },
+	{ _MMIO(0x9888), 0x01834000 },
+	{ _MMIO(0x9888), 0x0f834000 },
+	{ _MMIO(0x9888), 0x19835400 },
+	{ _MMIO(0x9888), 0x1b830155 },
+	{ _MMIO(0x9888), 0x03834000 },
+	{ _MMIO(0x9888), 0x05834000 },
+	{ _MMIO(0x9888), 0x07834000 },
+	{ _MMIO(0x9888), 0x09834000 },
+	{ _MMIO(0x9888), 0x0b834000 },
+	{ _MMIO(0x9888), 0x0184c000 },
+	{ _MMIO(0x9888), 0x07848000 },
+	{ _MMIO(0x9888), 0x0984c000 },
+	{ _MMIO(0x9888), 0x0b84c000 },
+	{ _MMIO(0x9888), 0x0d84c000 },
+	{ _MMIO(0x9888), 0x0f84c000 },
+	{ _MMIO(0x9888), 0x0384c000 },
+	{ _MMIO(0x9888), 0x0584c000 },
+	{ _MMIO(0x9888), 0x1180c000 },
+	{ _MMIO(0x9888), 0x17808000 },
+	{ _MMIO(0x9888), 0x1980c000 },
+	{ _MMIO(0x9888), 0x1b80c000 },
+	{ _MMIO(0x9888), 0x1d80c000 },
+	{ _MMIO(0x9888), 0x1f80c000 },
+	{ _MMIO(0x9888), 0x1380c000 },
+	{ _MMIO(0x9888), 0x1580c000 },
+	{ _MMIO(0xd24), 0x00000000 },
+	{ _MMIO(0x9888), 0x4d800000 },
+	{ _MMIO(0x9888), 0x3d800000 },
+	{ _MMIO(0x9888), 0x4f800000 },
+	{ _MMIO(0x9888), 0x43800000 },
+	{ _MMIO(0x9888), 0x51800000 },
+	{ _MMIO(0x9888), 0x45800000 },
+	{ _MMIO(0x9888), 0x53800000 },
+	{ _MMIO(0x9888), 0x47800420 },
+	{ _MMIO(0x9888), 0x21800000 },
+	{ _MMIO(0x9888), 0x31800000 },
+	{ _MMIO(0x9888), 0x3f800421 },
+	{ _MMIO(0x9888), 0x41800000 },
+};
+
+static const struct i915_oa_reg mux_config_compute_extended_2_subslices_0x02[] = {
+	{ _MMIO(0x9888), 0x105c00e0 },
+	{ _MMIO(0x9888), 0x145b0160 },
+	{ _MMIO(0x9888), 0x165b2800 },
+	{ _MMIO(0x9888), 0x185b0120 },
+	{ _MMIO(0x9888), 0x0e5c25c1 },
+	{ _MMIO(0x9888), 0x005c6100 },
+	{ _MMIO(0x9888), 0x025c204c },
+	{ _MMIO(0x9888), 0x065c8000 },
+	{ _MMIO(0x9888), 0x085cc000 },
+	{ _MMIO(0x9888), 0x0a5cc000 },
+	{ _MMIO(0x9888), 0x0c5cc000 },
+	{ _MMIO(0x9888), 0x045cc000 },
+	{ _MMIO(0x9888), 0x005b0011 },
+	{ _MMIO(0x9888), 0x065b0900 },
+	{ _MMIO(0x9888), 0x085b0a13 },
+	{ _MMIO(0x9888), 0x0a5b0b15 },
+	{ _MMIO(0x9888), 0x0c5b2317 },
+	{ _MMIO(0x9888), 0x045b21b7 },
+	{ _MMIO(0x9888), 0x105b0000 },
+	{ _MMIO(0x9888), 0x0e5b0000 },
+	{ _MMIO(0x9888), 0x1a5b0000 },
+	{ _MMIO(0x9888), 0x0c1fa800 },
+	{ _MMIO(0x9888), 0x0e1faa2a },
+	{ _MMIO(0x9888), 0x101f02aa },
+	{ _MMIO(0x9888), 0x00384000 },
+	{ _MMIO(0x9888), 0x0e384000 },
+	{ _MMIO(0x9888), 0x16384000 },
+	{ _MMIO(0x9888), 0x18381555 },
+	{ _MMIO(0x9888), 0x02384000 },
+	{ _MMIO(0x9888), 0x04384000 },
+	{ _MMIO(0x9888), 0x06384000 },
+	{ _MMIO(0x9888), 0x08384000 },
+	{ _MMIO(0x9888), 0x0a384000 },
+	{ _MMIO(0x9888), 0x0039a000 },
+	{ _MMIO(0x9888), 0x06398000 },
+	{ _MMIO(0x9888), 0x0839a000 },
+	{ _MMIO(0x9888), 0x0a39a000 },
+	{ _MMIO(0x9888), 0x0c39a000 },
+	{ _MMIO(0x9888), 0x0e39a000 },
+	{ _MMIO(0x9888), 0x0239a000 },
+	{ _MMIO(0x9888), 0x0439a000 },
+	{ _MMIO(0x9888), 0x018a8000 },
+	{ _MMIO(0x9888), 0x0f8a8000 },
+	{ _MMIO(0x9888), 0x198a8000 },
+	{ _MMIO(0x9888), 0x1b8aaaa0 },
+	{ _MMIO(0x9888), 0x1d8a0002 },
+	{ _MMIO(0x9888), 0x038a8000 },
+	{ _MMIO(0x9888), 0x058a8000 },
+	{ _MMIO(0x9888), 0x078a8000 },
+	{ _MMIO(0x9888), 0x098a8000 },
+	{ _MMIO(0x9888), 0x0b8a8000 },
+	{ _MMIO(0x9888), 0x238b2aa0 },
+	{ _MMIO(0x9888), 0x258b5551 },
+	{ _MMIO(0x9888), 0x278b0015 },
+	{ _MMIO(0x9888), 0x1f85aa80 },
+	{ _MMIO(0x9888), 0x2185aaa2 },
+	{ _MMIO(0x9888), 0x2385002a },
+	{ _MMIO(0x9888), 0x01834000 },
+	{ _MMIO(0x9888), 0x0f834000 },
+	{ _MMIO(0x9888), 0x19835400 },
+	{ _MMIO(0x9888), 0x1b830155 },
+	{ _MMIO(0x9888), 0x03834000 },
+	{ _MMIO(0x9888), 0x05834000 },
+	{ _MMIO(0x9888), 0x07834000 },
+	{ _MMIO(0x9888), 0x09834000 },
+	{ _MMIO(0x9888), 0x0b834000 },
+	{ _MMIO(0x9888), 0x0184c000 },
+	{ _MMIO(0x9888), 0x07848000 },
+	{ _MMIO(0x9888), 0x0984c000 },
+	{ _MMIO(0x9888), 0x0b84c000 },
+	{ _MMIO(0x9888), 0x0d84c000 },
+	{ _MMIO(0x9888), 0x0f84c000 },
+	{ _MMIO(0x9888), 0x0384c000 },
+	{ _MMIO(0x9888), 0x0584c000 },
+	{ _MMIO(0x9888), 0x1180c000 },
+	{ _MMIO(0x9888), 0x17808000 },
+	{ _MMIO(0x9888), 0x1980c000 },
+	{ _MMIO(0x9888), 0x1b80c000 },
+	{ _MMIO(0x9888), 0x1d80c000 },
+	{ _MMIO(0x9888), 0x1f80c000 },
+	{ _MMIO(0x9888), 0x1380c000 },
+	{ _MMIO(0x9888), 0x1580c000 },
+	{ _MMIO(0xd24), 0x00000000 },
+	{ _MMIO(0x9888), 0x4d800000 },
+	{ _MMIO(0x9888), 0x3d800000 },
+	{ _MMIO(0x9888), 0x4f800000 },
+	{ _MMIO(0x9888), 0x43800000 },
+	{ _MMIO(0x9888), 0x51800000 },
+	{ _MMIO(0x9888), 0x45800000 },
+	{ _MMIO(0x9888), 0x53800000 },
+	{ _MMIO(0x9888), 0x47800420 },
+	{ _MMIO(0x9888), 0x21800000 },
+	{ _MMIO(0x9888), 0x31800000 },
+	{ _MMIO(0x9888), 0x3f800421 },
+	{ _MMIO(0x9888), 0x41800000 },
+};
+
+static const struct i915_oa_reg mux_config_compute_extended_4_subslices_0x04[] = {
+	{ _MMIO(0x9888), 0x103800e0 },
+	{ _MMIO(0x9888), 0x143a0160 },
+	{ _MMIO(0x9888), 0x163a2800 },
+	{ _MMIO(0x9888), 0x183a0120 },
+	{ _MMIO(0x9888), 0x0c1fa800 },
+	{ _MMIO(0x9888), 0x0e1faa2a },
+	{ _MMIO(0x9888), 0x101f02aa },
+	{ _MMIO(0x9888), 0x0e38a5c1 },
+	{ _MMIO(0x9888), 0x0038a100 },
+	{ _MMIO(0x9888), 0x0238204c },
+	{ _MMIO(0x9888), 0x16388000 },
+	{ _MMIO(0x9888), 0x183802aa },
+	{ _MMIO(0x9888), 0x04380000 },
+	{ _MMIO(0x9888), 0x06380000 },
+	{ _MMIO(0x9888), 0x08388000 },
+	{ _MMIO(0x9888), 0x0a388000 },
+	{ _MMIO(0x9888), 0x0039a000 },
+	{ _MMIO(0x9888), 0x06398000 },
+	{ _MMIO(0x9888), 0x0839a000 },
+	{ _MMIO(0x9888), 0x0a39a000 },
+	{ _MMIO(0x9888), 0x0c39a000 },
+	{ _MMIO(0x9888), 0x0e39a000 },
+	{ _MMIO(0x9888), 0x0239a000 },
+	{ _MMIO(0x9888), 0x0439a000 },
+	{ _MMIO(0x9888), 0x003a0011 },
+	{ _MMIO(0x9888), 0x063a0900 },
+	{ _MMIO(0x9888), 0x083a0a13 },
+	{ _MMIO(0x9888), 0x0a3a0b15 },
+	{ _MMIO(0x9888), 0x0c3a2317 },
+	{ _MMIO(0x9888), 0x043a21b7 },
+	{ _MMIO(0x9888), 0x103a0000 },
+	{ _MMIO(0x9888), 0x0e3a0000 },
+	{ _MMIO(0x9888), 0x1a3a0000 },
+	{ _MMIO(0x9888), 0x018a8000 },
+	{ _MMIO(0x9888), 0x0f8a8000 },
+	{ _MMIO(0x9888), 0x198a8000 },
+	{ _MMIO(0x9888), 0x1b8aaaa0 },
+	{ _MMIO(0x9888), 0x1d8a0002 },
+	{ _MMIO(0x9888), 0x038a8000 },
+	{ _MMIO(0x9888), 0x058a8000 },
+	{ _MMIO(0x9888), 0x078a8000 },
+	{ _MMIO(0x9888), 0x098a8000 },
+	{ _MMIO(0x9888), 0x0b8a8000 },
+	{ _MMIO(0x9888), 0x238b2aa0 },
+	{ _MMIO(0x9888), 0x258b5551 },
+	{ _MMIO(0x9888), 0x278b0015 },
+	{ _MMIO(0x9888), 0x1f85aa80 },
+	{ _MMIO(0x9888), 0x2185aaa2 },
+	{ _MMIO(0x9888), 0x2385002a },
+	{ _MMIO(0x9888), 0x01834000 },
+	{ _MMIO(0x9888), 0x0f834000 },
+	{ _MMIO(0x9888), 0x19835400 },
+	{ _MMIO(0x9888), 0x1b830155 },
+	{ _MMIO(0x9888), 0x03834000 },
+	{ _MMIO(0x9888), 0x05834000 },
+	{ _MMIO(0x9888), 0x07834000 },
+	{ _MMIO(0x9888), 0x09834000 },
+	{ _MMIO(0x9888), 0x0b834000 },
+	{ _MMIO(0x9888), 0x0184c000 },
+	{ _MMIO(0x9888), 0x07848000 },
+	{ _MMIO(0x9888), 0x0984c000 },
+	{ _MMIO(0x9888), 0x0b84c000 },
+	{ _MMIO(0x9888), 0x0d84c000 },
+	{ _MMIO(0x9888), 0x0f84c000 },
+	{ _MMIO(0x9888), 0x0384c000 },
+	{ _MMIO(0x9888), 0x0584c000 },
+	{ _MMIO(0x9888), 0x1180c000 },
+	{ _MMIO(0x9888), 0x17808000 },
+	{ _MMIO(0x9888), 0x1980c000 },
+	{ _MMIO(0x9888), 0x1b80c000 },
+	{ _MMIO(0x9888), 0x1d80c000 },
+	{ _MMIO(0x9888), 0x1f80c000 },
+	{ _MMIO(0x9888), 0x1380c000 },
+	{ _MMIO(0x9888), 0x1580c000 },
+	{ _MMIO(0xd24), 0x00000000 },
+	{ _MMIO(0x9888), 0x4d800000 },
+	{ _MMIO(0x9888), 0x3d800000 },
+	{ _MMIO(0x9888), 0x4f800000 },
+	{ _MMIO(0x9888), 0x43800000 },
+	{ _MMIO(0x9888), 0x51800000 },
+	{ _MMIO(0x9888), 0x45800000 },
+	{ _MMIO(0x9888), 0x53800000 },
+	{ _MMIO(0x9888), 0x47800420 },
+	{ _MMIO(0x9888), 0x21800000 },
+	{ _MMIO(0x9888), 0x31800000 },
+	{ _MMIO(0x9888), 0x3f800421 },
+	{ _MMIO(0x9888), 0x41800000 },
+};
+
+static const struct i915_oa_reg mux_config_compute_extended_1_subslices_0x08[] = {
+	{ _MMIO(0x9888), 0x14bd0160 },
+	{ _MMIO(0x9888), 0x16bd2800 },
+	{ _MMIO(0x9888), 0x18bd0120 },
+	{ _MMIO(0x9888), 0x10d800e0 },
+	{ _MMIO(0x9888), 0x00dcc000 },
+	{ _MMIO(0x9888), 0x06dc8000 },
+	{ _MMIO(0x9888), 0x08dcc000 },
+	{ _MMIO(0x9888), 0x0adcc000 },
+	{ _MMIO(0x9888), 0x0cdcc000 },
+	{ _MMIO(0x9888), 0x0edcc000 },
+	{ _MMIO(0x9888), 0x02dcc000 },
+	{ _MMIO(0x9888), 0x04dcc000 },
+	{ _MMIO(0x9888), 0x00bd0011 },
+	{ _MMIO(0x9888), 0x06bd0900 },
+	{ _MMIO(0x9888), 0x08bd0a13 },
+	{ _MMIO(0x9888), 0x0abd0b15 },
+	{ _MMIO(0x9888), 0x0cbd2317 },
+	{ _MMIO(0x9888), 0x04bd21b7 },
+	{ _MMIO(0x9888), 0x10bd0000 },
+	{ _MMIO(0x9888), 0x0ebd0000 },
+	{ _MMIO(0x9888), 0x1abd0000 },
+	{ _MMIO(0x9888), 0x0ed825c1 },
+	{ _MMIO(0x9888), 0x00d86100 },
+	{ _MMIO(0x9888), 0x02d8204c },
+	{ _MMIO(0x9888), 0x06d88000 },
+	{ _MMIO(0x9888), 0x08d8c000 },
+	{ _MMIO(0x9888), 0x0ad8c000 },
+	{ _MMIO(0x9888), 0x0cd8c000 },
+	{ _MMIO(0x9888), 0x04d8c000 },
+	{ _MMIO(0x9888), 0x00db4000 },
+	{ _MMIO(0x9888), 0x0edb4000 },
+	{ _MMIO(0x9888), 0x18db5400 },
+	{ _MMIO(0x9888), 0x1adb0155 },
+	{ _MMIO(0x9888), 0x02db4000 },
+	{ _MMIO(0x9888), 0x04db4000 },
+	{ _MMIO(0x9888), 0x06db4000 },
+	{ _MMIO(0x9888), 0x08db4000 },
+	{ _MMIO(0x9888), 0x0adb4000 },
+	{ _MMIO(0x9888), 0x0c9fa800 },
+	{ _MMIO(0x9888), 0x0e9faa2a },
+	{ _MMIO(0x9888), 0x109f02aa },
+	{ _MMIO(0x9888), 0x00b84000 },
+	{ _MMIO(0x9888), 0x0eb84000 },
+	{ _MMIO(0x9888), 0x16b84000 },
+	{ _MMIO(0x9888), 0x18b81555 },
+	{ _MMIO(0x9888), 0x02b84000 },
+	{ _MMIO(0x9888), 0x04b84000 },
+	{ _MMIO(0x9888), 0x06b84000 },
+	{ _MMIO(0x9888), 0x08b84000 },
+	{ _MMIO(0x9888), 0x0ab84000 },
+	{ _MMIO(0x9888), 0x00b9a000 },
+	{ _MMIO(0x9888), 0x06b98000 },
+	{ _MMIO(0x9888), 0x08b9a000 },
+	{ _MMIO(0x9888), 0x0ab9a000 },
+	{ _MMIO(0x9888), 0x0cb9a000 },
+	{ _MMIO(0x9888), 0x0eb9a000 },
+	{ _MMIO(0x9888), 0x02b9a000 },
+	{ _MMIO(0x9888), 0x04b9a000 },
+	{ _MMIO(0x9888), 0x01888000 },
+	{ _MMIO(0x9888), 0x0d88f800 },
+	{ _MMIO(0x9888), 0x0f88000f },
+	{ _MMIO(0x9888), 0x03888000 },
+	{ _MMIO(0x9888), 0x05888000 },
+	{ _MMIO(0x9888), 0x07888000 },
+	{ _MMIO(0x9888), 0x09888000 },
+	{ _MMIO(0x9888), 0x0b888000 },
+	{ _MMIO(0x9888), 0x238b5540 },
+	{ _MMIO(0x9888), 0x258baaa2 },
+	{ _MMIO(0x9888), 0x278b002a },
+	{ _MMIO(0x9888), 0x018c4000 },
+	{ _MMIO(0x9888), 0x0f8c4000 },
+	{ _MMIO(0x9888), 0x178c2000 },
+	{ _MMIO(0x9888), 0x198c5500 },
+	{ _MMIO(0x9888), 0x1b8c0015 },
+	{ _MMIO(0x9888), 0x038c4000 },
+	{ _MMIO(0x9888), 0x058c4000 },
+	{ _MMIO(0x9888), 0x078c4000 },
+	{ _MMIO(0x9888), 0x098c4000 },
+	{ _MMIO(0x9888), 0x0b8c4000 },
+	{ _MMIO(0x9888), 0x018da000 },
+	{ _MMIO(0x9888), 0x078d8000 },
+	{ _MMIO(0x9888), 0x098da000 },
+	{ _MMIO(0x9888), 0x0b8da000 },
+	{ _MMIO(0x9888), 0x0d8da000 },
+	{ _MMIO(0x9888), 0x0f8da000 },
+	{ _MMIO(0x9888), 0x038da000 },
+	{ _MMIO(0x9888), 0x058da000 },
+	{ _MMIO(0x9888), 0x1f85aa80 },
+	{ _MMIO(0x9888), 0x2185aaa2 },
+	{ _MMIO(0x9888), 0x2385002a },
+	{ _MMIO(0x9888), 0x01834000 },
+	{ _MMIO(0x9888), 0x0f834000 },
+	{ _MMIO(0x9888), 0x19835400 },
+	{ _MMIO(0x9888), 0x1b830155 },
+	{ _MMIO(0x9888), 0x03834000 },
+	{ _MMIO(0x9888), 0x05834000 },
+	{ _MMIO(0x9888), 0x07834000 },
+	{ _MMIO(0x9888), 0x09834000 },
+	{ _MMIO(0x9888), 0x0b834000 },
+	{ _MMIO(0x9888), 0x0184c000 },
+	{ _MMIO(0x9888), 0x07848000 },
+	{ _MMIO(0x9888), 0x0984c000 },
+	{ _MMIO(0x9888), 0x0b84c000 },
+	{ _MMIO(0x9888), 0x0d84c000 },
+	{ _MMIO(0x9888), 0x0f84c000 },
+	{ _MMIO(0x9888), 0x0384c000 },
+	{ _MMIO(0x9888), 0x0584c000 },
+	{ _MMIO(0x9888), 0x1180c000 },
+	{ _MMIO(0x9888), 0x17808000 },
+	{ _MMIO(0x9888), 0x1980c000 },
+	{ _MMIO(0x9888), 0x1b80c000 },
+	{ _MMIO(0x9888), 0x1d80c000 },
+	{ _MMIO(0x9888), 0x1f80c000 },
+	{ _MMIO(0x9888), 0x1380c000 },
+	{ _MMIO(0x9888), 0x1580c000 },
+	{ _MMIO(0xd24), 0x00000000 },
+	{ _MMIO(0x9888), 0x4d800000 },
+	{ _MMIO(0x9888), 0x3d800000 },
+	{ _MMIO(0x9888), 0x4f800000 },
+	{ _MMIO(0x9888), 0x43800000 },
+	{ _MMIO(0x9888), 0x51800000 },
+	{ _MMIO(0x9888), 0x45800000 },
+	{ _MMIO(0x9888), 0x53800000 },
+	{ _MMIO(0x9888), 0x47800420 },
+	{ _MMIO(0x9888), 0x21800000 },
+	{ _MMIO(0x9888), 0x31800000 },
+	{ _MMIO(0x9888), 0x3f800421 },
+	{ _MMIO(0x9888), 0x41800000 },
+};
+
+static const struct i915_oa_reg mux_config_compute_extended_3_subslices_0x10[] = {
+	{ _MMIO(0x9888), 0x10dc00e0 },
+	{ _MMIO(0x9888), 0x14db0160 },
+	{ _MMIO(0x9888), 0x16db2800 },
+	{ _MMIO(0x9888), 0x18db0120 },
+	{ _MMIO(0x9888), 0x0edc25c1 },
+	{ _MMIO(0x9888), 0x00dc6100 },
+	{ _MMIO(0x9888), 0x02dc204c },
+	{ _MMIO(0x9888), 0x06dc8000 },
+	{ _MMIO(0x9888), 0x08dcc000 },
+	{ _MMIO(0x9888), 0x0adcc000 },
+	{ _MMIO(0x9888), 0x0cdcc000 },
+	{ _MMIO(0x9888), 0x04dcc000 },
+	{ _MMIO(0x9888), 0x00db0011 },
+	{ _MMIO(0x9888), 0x06db0900 },
+	{ _MMIO(0x9888), 0x08db0a13 },
+	{ _MMIO(0x9888), 0x0adb0b15 },
+	{ _MMIO(0x9888), 0x0cdb2317 },
+	{ _MMIO(0x9888), 0x04db21b7 },
+	{ _MMIO(0x9888), 0x10db0000 },
+	{ _MMIO(0x9888), 0x0edb0000 },
+	{ _MMIO(0x9888), 0x1adb0000 },
+	{ _MMIO(0x9888), 0x0c9fa800 },
+	{ _MMIO(0x9888), 0x0e9faa2a },
+	{ _MMIO(0x9888), 0x109f02aa },
+	{ _MMIO(0x9888), 0x00b84000 },
+	{ _MMIO(0x9888), 0x0eb84000 },
+	{ _MMIO(0x9888), 0x16b84000 },
+	{ _MMIO(0x9888), 0x18b81555 },
+	{ _MMIO(0x9888), 0x02b84000 },
+	{ _MMIO(0x9888), 0x04b84000 },
+	{ _MMIO(0x9888), 0x06b84000 },
+	{ _MMIO(0x9888), 0x08b84000 },
+	{ _MMIO(0x9888), 0x0ab84000 },
+	{ _MMIO(0x9888), 0x00b9a000 },
+	{ _MMIO(0x9888), 0x06b98000 },
+	{ _MMIO(0x9888), 0x08b9a000 },
+	{ _MMIO(0x9888), 0x0ab9a000 },
+	{ _MMIO(0x9888), 0x0cb9a000 },
+	{ _MMIO(0x9888), 0x0eb9a000 },
+	{ _MMIO(0x9888), 0x02b9a000 },
+	{ _MMIO(0x9888), 0x04b9a000 },
+	{ _MMIO(0x9888), 0x01888000 },
+	{ _MMIO(0x9888), 0x0d88f800 },
+	{ _MMIO(0x9888), 0x0f88000f },
+	{ _MMIO(0x9888), 0x03888000 },
+	{ _MMIO(0x9888), 0x05888000 },
+	{ _MMIO(0x9888), 0x07888000 },
+	{ _MMIO(0x9888), 0x09888000 },
+	{ _MMIO(0x9888), 0x0b888000 },
+	{ _MMIO(0x9888), 0x238b5540 },
+	{ _MMIO(0x9888), 0x258baaa2 },
+	{ _MMIO(0x9888), 0x278b002a },
+	{ _MMIO(0x9888), 0x018c4000 },
+	{ _MMIO(0x9888), 0x0f8c4000 },
+	{ _MMIO(0x9888), 0x178c2000 },
+	{ _MMIO(0x9888), 0x198c5500 },
+	{ _MMIO(0x9888), 0x1b8c0015 },
+	{ _MMIO(0x9888), 0x038c4000 },
+	{ _MMIO(0x9888), 0x058c4000 },
+	{ _MMIO(0x9888), 0x078c4000 },
+	{ _MMIO(0x9888), 0x098c4000 },
+	{ _MMIO(0x9888), 0x0b8c4000 },
+	{ _MMIO(0x9888), 0x018da000 },
+	{ _MMIO(0x9888), 0x078d8000 },
+	{ _MMIO(0x9888), 0x098da000 },
+	{ _MMIO(0x9888), 0x0b8da000 },
+	{ _MMIO(0x9888), 0x0d8da000 },
+	{ _MMIO(0x9888), 0x0f8da000 },
+	{ _MMIO(0x9888), 0x038da000 },
+	{ _MMIO(0x9888), 0x058da000 },
+	{ _MMIO(0x9888), 0x1f85aa80 },
+	{ _MMIO(0x9888), 0x2185aaa2 },
+	{ _MMIO(0x9888), 0x2385002a },
+	{ _MMIO(0x9888), 0x01834000 },
+	{ _MMIO(0x9888), 0x0f834000 },
+	{ _MMIO(0x9888), 0x19835400 },
+	{ _MMIO(0x9888), 0x1b830155 },
+	{ _MMIO(0x9888), 0x03834000 },
+	{ _MMIO(0x9888), 0x05834000 },
+	{ _MMIO(0x9888), 0x07834000 },
+	{ _MMIO(0x9888), 0x09834000 },
+	{ _MMIO(0x9888), 0x0b834000 },
+	{ _MMIO(0x9888), 0x0184c000 },
+	{ _MMIO(0x9888), 0x07848000 },
+	{ _MMIO(0x9888), 0x0984c000 },
+	{ _MMIO(0x9888), 0x0b84c000 },
+	{ _MMIO(0x9888), 0x0d84c000 },
+	{ _MMIO(0x9888), 0x0f84c000 },
+	{ _MMIO(0x9888), 0x0384c000 },
+	{ _MMIO(0x9888), 0x0584c000 },
+	{ _MMIO(0x9888), 0x1180c000 },
+	{ _MMIO(0x9888), 0x17808000 },
+	{ _MMIO(0x9888), 0x1980c000 },
+	{ _MMIO(0x9888), 0x1b80c000 },
+	{ _MMIO(0x9888), 0x1d80c000 },
+	{ _MMIO(0x9888), 0x1f80c000 },
+	{ _MMIO(0x9888), 0x1380c000 },
+	{ _MMIO(0x9888), 0x1580c000 },
+	{ _MMIO(0xd24), 0x00000000 },
+	{ _MMIO(0x9888), 0x4d800000 },
+	{ _MMIO(0x9888), 0x3d800000 },
+	{ _MMIO(0x9888), 0x4f800000 },
+	{ _MMIO(0x9888), 0x43800000 },
+	{ _MMIO(0x9888), 0x51800000 },
+	{ _MMIO(0x9888), 0x45800000 },
+	{ _MMIO(0x9888), 0x53800000 },
+	{ _MMIO(0x9888), 0x47800420 },
+	{ _MMIO(0x9888), 0x21800000 },
+	{ _MMIO(0x9888), 0x31800000 },
+	{ _MMIO(0x9888), 0x3f800421 },
+	{ _MMIO(0x9888), 0x41800000 },
+};
+
+static const struct i915_oa_reg mux_config_compute_extended_5_subslices_0x20[] = {
+	{ _MMIO(0x9888), 0x10b800e0 },
+	{ _MMIO(0x9888), 0x14ba0160 },
+	{ _MMIO(0x9888), 0x16ba2800 },
+	{ _MMIO(0x9888), 0x18ba0120 },
+	{ _MMIO(0x9888), 0x0c9fa800 },
+	{ _MMIO(0x9888), 0x0e9faa2a },
+	{ _MMIO(0x9888), 0x109f02aa },
+	{ _MMIO(0x9888), 0x0eb8a5c1 },
+	{ _MMIO(0x9888), 0x00b8a100 },
+	{ _MMIO(0x9888), 0x02b8204c },
+	{ _MMIO(0x9888), 0x16b88000 },
+	{ _MMIO(0x9888), 0x18b802aa },
+	{ _MMIO(0x9888), 0x04b80000 },
+	{ _MMIO(0x9888), 0x06b80000 },
+	{ _MMIO(0x9888), 0x08b88000 },
+	{ _MMIO(0x9888), 0x0ab88000 },
+	{ _MMIO(0x9888), 0x00b9a000 },
+	{ _MMIO(0x9888), 0x06b98000 },
+	{ _MMIO(0x9888), 0x08b9a000 },
+	{ _MMIO(0x9888), 0x0ab9a000 },
+	{ _MMIO(0x9888), 0x0cb9a000 },
+	{ _MMIO(0x9888), 0x0eb9a000 },
+	{ _MMIO(0x9888), 0x02b9a000 },
+	{ _MMIO(0x9888), 0x04b9a000 },
+	{ _MMIO(0x9888), 0x00ba0011 },
+	{ _MMIO(0x9888), 0x06ba0900 },
+	{ _MMIO(0x9888), 0x08ba0a13 },
+	{ _MMIO(0x9888), 0x0aba0b15 },
+	{ _MMIO(0x9888), 0x0cba2317 },
+	{ _MMIO(0x9888), 0x04ba21b7 },
+	{ _MMIO(0x9888), 0x10ba0000 },
+	{ _MMIO(0x9888), 0x0eba0000 },
+	{ _MMIO(0x9888), 0x1aba0000 },
+	{ _MMIO(0x9888), 0x01888000 },
+	{ _MMIO(0x9888), 0x0d88f800 },
+	{ _MMIO(0x9888), 0x0f88000f },
+	{ _MMIO(0x9888), 0x03888000 },
+	{ _MMIO(0x9888), 0x05888000 },
+	{ _MMIO(0x9888), 0x07888000 },
+	{ _MMIO(0x9888), 0x09888000 },
+	{ _MMIO(0x9888), 0x0b888000 },
+	{ _MMIO(0x9888), 0x238b5540 },
+	{ _MMIO(0x9888), 0x258baaa2 },
+	{ _MMIO(0x9888), 0x278b002a },
+	{ _MMIO(0x9888), 0x018c4000 },
+	{ _MMIO(0x9888), 0x0f8c4000 },
+	{ _MMIO(0x9888), 0x178c2000 },
+	{ _MMIO(0x9888), 0x198c5500 },
+	{ _MMIO(0x9888), 0x1b8c0015 },
+	{ _MMIO(0x9888), 0x038c4000 },
+	{ _MMIO(0x9888), 0x058c4000 },
+	{ _MMIO(0x9888), 0x078c4000 },
+	{ _MMIO(0x9888), 0x098c4000 },
+	{ _MMIO(0x9888), 0x0b8c4000 },
+	{ _MMIO(0x9888), 0x018da000 },
+	{ _MMIO(0x9888), 0x078d8000 },
+	{ _MMIO(0x9888), 0x098da000 },
+	{ _MMIO(0x9888), 0x0b8da000 },
+	{ _MMIO(0x9888), 0x0d8da000 },
+	{ _MMIO(0x9888), 0x0f8da000 },
+	{ _MMIO(0x9888), 0x038da000 },
+	{ _MMIO(0x9888), 0x058da000 },
+	{ _MMIO(0x9888), 0x1f85aa80 },
+	{ _MMIO(0x9888), 0x2185aaa2 },
+	{ _MMIO(0x9888), 0x2385002a },
+	{ _MMIO(0x9888), 0x01834000 },
+	{ _MMIO(0x9888), 0x0f834000 },
+	{ _MMIO(0x9888), 0x19835400 },
+	{ _MMIO(0x9888), 0x1b830155 },
+	{ _MMIO(0x9888), 0x03834000 },
+	{ _MMIO(0x9888), 0x05834000 },
+	{ _MMIO(0x9888), 0x07834000 },
+	{ _MMIO(0x9888), 0x09834000 },
+	{ _MMIO(0x9888), 0x0b834000 },
+	{ _MMIO(0x9888), 0x0184c000 },
+	{ _MMIO(0x9888), 0x07848000 },
+	{ _MMIO(0x9888), 0x0984c000 },
+	{ _MMIO(0x9888), 0x0b84c000 },
+	{ _MMIO(0x9888), 0x0d84c000 },
+	{ _MMIO(0x9888), 0x0f84c000 },
+	{ _MMIO(0x9888), 0x0384c000 },
+	{ _MMIO(0x9888), 0x0584c000 },
+	{ _MMIO(0x9888), 0x1180c000 },
+	{ _MMIO(0x9888), 0x17808000 },
+	{ _MMIO(0x9888), 0x1980c000 },
+	{ _MMIO(0x9888), 0x1b80c000 },
+	{ _MMIO(0x9888), 0x1d80c000 },
+	{ _MMIO(0x9888), 0x1f80c000 },
+	{ _MMIO(0x9888), 0x1380c000 },
+	{ _MMIO(0x9888), 0x1580c000 },
+	{ _MMIO(0xd24), 0x00000000 },
+	{ _MMIO(0x9888), 0x4d800000 },
+	{ _MMIO(0x9888), 0x3d800000 },
+	{ _MMIO(0x9888), 0x4f800000 },
+	{ _MMIO(0x9888), 0x43800000 },
+	{ _MMIO(0x9888), 0x51800000 },
+	{ _MMIO(0x9888), 0x45800000 },
+	{ _MMIO(0x9888), 0x53800000 },
+	{ _MMIO(0x9888), 0x47800420 },
+	{ _MMIO(0x9888), 0x21800000 },
+	{ _MMIO(0x9888), 0x31800000 },
+	{ _MMIO(0x9888), 0x3f800421 },
+	{ _MMIO(0x9888), 0x41800000 },
+};
+
+static int
+get_compute_extended_mux_config(struct drm_i915_private *dev_priv,
+				const struct i915_oa_reg **regs,
+				int *lens)
+{
+	int n = 0;
+
+	BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 6);
+	BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 6);
+
+	if (INTEL_INFO(dev_priv)->sseu.subslice_mask & 0x01) {
+		regs[n] = mux_config_compute_extended_0_subslices_0x01;
+		lens[n] = ARRAY_SIZE(mux_config_compute_extended_0_subslices_0x01);
+		n++;
+	}
+	if (INTEL_INFO(dev_priv)->sseu.subslice_mask & 0x08) {
+		regs[n] = mux_config_compute_extended_1_subslices_0x08;
+		lens[n] = ARRAY_SIZE(mux_config_compute_extended_1_subslices_0x08);
+		n++;
+	}
+	if (INTEL_INFO(dev_priv)->sseu.subslice_mask & 0x02) {
+		regs[n] = mux_config_compute_extended_2_subslices_0x02;
+		lens[n] = ARRAY_SIZE(mux_config_compute_extended_2_subslices_0x02);
+		n++;
+	}
+	if (INTEL_INFO(dev_priv)->sseu.subslice_mask & 0x10) {
+		regs[n] = mux_config_compute_extended_3_subslices_0x10;
+		lens[n] = ARRAY_SIZE(mux_config_compute_extended_3_subslices_0x10);
+		n++;
+	}
+	if (INTEL_INFO(dev_priv)->sseu.subslice_mask & 0x04) {
+		regs[n] = mux_config_compute_extended_4_subslices_0x04;
+		lens[n] = ARRAY_SIZE(mux_config_compute_extended_4_subslices_0x04);
+		n++;
+	}
+	if (INTEL_INFO(dev_priv)->sseu.subslice_mask & 0x20) {
+		regs[n] = mux_config_compute_extended_5_subslices_0x20;
+		lens[n] = ARRAY_SIZE(mux_config_compute_extended_5_subslices_0x20);
+		n++;
+	}
+
+	return n;
+}
+
+static const struct i915_oa_reg b_counter_config_compute_l3_cache[] = {
+	{ _MMIO(0x2710), 0x00000000 },
+	{ _MMIO(0x2714), 0x30800000 },
+	{ _MMIO(0x2720), 0x00000000 },
+	{ _MMIO(0x2724), 0x30800000 },
+	{ _MMIO(0x2740), 0x00000000 },
+	{ _MMIO(0x2770), 0x0007fffa },
+	{ _MMIO(0x2774), 0x0000fefe },
+	{ _MMIO(0x2778), 0x0007fffa },
+	{ _MMIO(0x277c), 0x0000fefd },
+	{ _MMIO(0x2790), 0x0007fffa },
+	{ _MMIO(0x2794), 0x0000fbef },
+	{ _MMIO(0x2798), 0x0007fffa },
+	{ _MMIO(0x279c), 0x0000fbdf },
+};
+
+static const struct i915_oa_reg flex_eu_config_compute_l3_cache[] = {
+	{ _MMIO(0xe458), 0x00005004 },
+	{ _MMIO(0xe558), 0x00000003 },
+	{ _MMIO(0xe658), 0x00002001 },
+	{ _MMIO(0xe758), 0x00101100 },
+	{ _MMIO(0xe45c), 0x00201200 },
+	{ _MMIO(0xe55c), 0x00301300 },
+	{ _MMIO(0xe65c), 0x00401400 },
+};
+
+static const struct i915_oa_reg mux_config_compute_l3_cache[] = {
+	{ _MMIO(0x9888), 0x143f00b3 },
+	{ _MMIO(0x9888), 0x14bf00b3 },
+	{ _MMIO(0x9888), 0x138303c0 },
+	{ _MMIO(0x9888), 0x3b800060 },
+	{ _MMIO(0x9888), 0x3d800805 },
+	{ _MMIO(0x9888), 0x003f0029 },
+	{ _MMIO(0x9888), 0x063f1400 },
+	{ _MMIO(0x9888), 0x083f1225 },
+	{ _MMIO(0x9888), 0x0e3f1327 },
+	{ _MMIO(0x9888), 0x103f0000 },
+	{ _MMIO(0x9888), 0x005a4000 },
+	{ _MMIO(0x9888), 0x065a8000 },
+	{ _MMIO(0x9888), 0x085ac000 },
+	{ _MMIO(0x9888), 0x0e5ac000 },
+	{ _MMIO(0x9888), 0x001d4000 },
+	{ _MMIO(0x9888), 0x061d8000 },
+	{ _MMIO(0x9888), 0x081dc000 },
+	{ _MMIO(0x9888), 0x0e1dc000 },
+	{ _MMIO(0x9888), 0x0c1f0800 },
+	{ _MMIO(0x9888), 0x0e1f2a00 },
+	{ _MMIO(0x9888), 0x101f0280 },
+	{ _MMIO(0x9888), 0x00391000 },
+	{ _MMIO(0x9888), 0x06394000 },
+	{ _MMIO(0x9888), 0x08395000 },
+	{ _MMIO(0x9888), 0x0e395000 },
+	{ _MMIO(0x9888), 0x0abf1429 },
+	{ _MMIO(0x9888), 0x0cbf1225 },
+	{ _MMIO(0x9888), 0x00bf1380 },
+	{ _MMIO(0x9888), 0x02bf0026 },
+	{ _MMIO(0x9888), 0x10bf0000 },
+	{ _MMIO(0x9888), 0x0adac000 },
+	{ _MMIO(0x9888), 0x0cdac000 },
+	{ _MMIO(0x9888), 0x00da8000 },
+	{ _MMIO(0x9888), 0x02da4000 },
+	{ _MMIO(0x9888), 0x0a9dc000 },
+	{ _MMIO(0x9888), 0x0c9dc000 },
+	{ _MMIO(0x9888), 0x009d8000 },
+	{ _MMIO(0x9888), 0x029d4000 },
+	{ _MMIO(0x9888), 0x0e9f8000 },
+	{ _MMIO(0x9888), 0x109f002a },
+	{ _MMIO(0x9888), 0x0c9fa000 },
+	{ _MMIO(0x9888), 0x0ab95000 },
+	{ _MMIO(0x9888), 0x0cb95000 },
+	{ _MMIO(0x9888), 0x00b94000 },
+	{ _MMIO(0x9888), 0x02b91000 },
+	{ _MMIO(0x9888), 0x0d88c000 },
+	{ _MMIO(0x9888), 0x0f880003 },
+	{ _MMIO(0x9888), 0x03888000 },
+	{ _MMIO(0x9888), 0x05888000 },
+	{ _MMIO(0x9888), 0x018a8000 },
+	{ _MMIO(0x9888), 0x0f8a8000 },
+	{ _MMIO(0x9888), 0x198a8000 },
+	{ _MMIO(0x9888), 0x1b8a8020 },
+	{ _MMIO(0x9888), 0x1d8a0002 },
+	{ _MMIO(0x9888), 0x238b0520 },
+	{ _MMIO(0x9888), 0x258ba950 },
+	{ _MMIO(0x9888), 0x278b0016 },
+	{ _MMIO(0x9888), 0x198c5400 },
+	{ _MMIO(0x9888), 0x1b8c0001 },
+	{ _MMIO(0x9888), 0x038c4000 },
+	{ _MMIO(0x9888), 0x058c4000 },
+	{ _MMIO(0x9888), 0x0b8da000 },
+	{ _MMIO(0x9888), 0x0d8da000 },
+	{ _MMIO(0x9888), 0x018d8000 },
+	{ _MMIO(0x9888), 0x038d2000 },
+	{ _MMIO(0x9888), 0x1f85aa80 },
+	{ _MMIO(0x9888), 0x2185aaa0 },
+	{ _MMIO(0x9888), 0x2385002a },
+	{ _MMIO(0x9888), 0x03835180 },
+	{ _MMIO(0x9888), 0x05834022 },
+	{ _MMIO(0x9888), 0x11830000 },
+	{ _MMIO(0x9888), 0x01834000 },
+	{ _MMIO(0x9888), 0x0f834000 },
+	{ _MMIO(0x9888), 0x19835400 },
+	{ _MMIO(0x9888), 0x1b830155 },
+	{ _MMIO(0x9888), 0x07830000 },
+	{ _MMIO(0x9888), 0x09830000 },
+	{ _MMIO(0x9888), 0x0184c000 },
+	{ _MMIO(0x9888), 0x07848000 },
+	{ _MMIO(0x9888), 0x0984c000 },
+	{ _MMIO(0x9888), 0x0b84c000 },
+	{ _MMIO(0x9888), 0x0d84c000 },
+	{ _MMIO(0x9888), 0x0f84c000 },
+	{ _MMIO(0x9888), 0x0384c000 },
+	{ _MMIO(0x9888), 0x05844000 },
+	{ _MMIO(0x9888), 0x1b80c137 },
+	{ _MMIO(0x9888), 0x1d80c147 },
+	{ _MMIO(0x9888), 0x21800000 },
+	{ _MMIO(0x9888), 0x1180c000 },
+	{ _MMIO(0x9888), 0x17808000 },
+	{ _MMIO(0x9888), 0x1980c000 },
+	{ _MMIO(0x9888), 0x1f80c000 },
+	{ _MMIO(0x9888), 0x1380c000 },
+	{ _MMIO(0x9888), 0x15804000 },
+	{ _MMIO(0xd24), 0x00000000 },
+	{ _MMIO(0x9888), 0x4d801000 },
+	{ _MMIO(0x9888), 0x4f800111 },
+	{ _MMIO(0x9888), 0x43800842 },
+	{ _MMIO(0x9888), 0x51800000 },
+	{ _MMIO(0x9888), 0x45800000 },
+	{ _MMIO(0x9888), 0x53800000 },
+	{ _MMIO(0x9888), 0x47800840 },
+	{ _MMIO(0x9888), 0x31800000 },
+	{ _MMIO(0x9888), 0x3f800800 },
+	{ _MMIO(0x9888), 0x418014a2 },
+};
+
+static int
+get_compute_l3_cache_mux_config(struct drm_i915_private *dev_priv,
+				const struct i915_oa_reg **regs,
+				int *lens)
+{
+	int n = 0;
+
+	BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
+	BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
+
+	regs[n] = mux_config_compute_l3_cache;
+	lens[n] = ARRAY_SIZE(mux_config_compute_l3_cache);
+	n++;
+
+	return n;
+}
+
+static const struct i915_oa_reg b_counter_config_data_port_reads_coalescing[] = {
+	{ _MMIO(0x2724), 0xf0800000 },
+	{ _MMIO(0x2720), 0x00000000 },
+	{ _MMIO(0x2714), 0xf0800000 },
+	{ _MMIO(0x2710), 0x00000000 },
+	{ _MMIO(0x274c), 0xba98ba98 },
+	{ _MMIO(0x2748), 0xba98ba98 },
+	{ _MMIO(0x2744), 0x00003377 },
+	{ _MMIO(0x2740), 0x00000000 },
+	{ _MMIO(0x2770), 0x0007fff2 },
+	{ _MMIO(0x2774), 0x00007ff0 },
+	{ _MMIO(0x2778), 0x0007ffe2 },
+	{ _MMIO(0x277c), 0x00007ff0 },
+	{ _MMIO(0x2780), 0x0007ffc2 },
+	{ _MMIO(0x2784), 0x00007ff0 },
+	{ _MMIO(0x2788), 0x0007ff82 },
+	{ _MMIO(0x278c), 0x00007ff0 },
+	{ _MMIO(0x2790), 0x0007fffa },
+	{ _MMIO(0x2794), 0x0000bfef },
+	{ _MMIO(0x2798), 0x0007fffa },
+	{ _MMIO(0x279c), 0x0000bfdf },
+	{ _MMIO(0x27a0), 0x0007fffa },
+	{ _MMIO(0x27a4), 0x0000bfbf },
+	{ _MMIO(0x27a8), 0x0007fffa },
+	{ _MMIO(0x27ac), 0x0000bf7f },
+};
+
+static const struct i915_oa_reg flex_eu_config_data_port_reads_coalescing[] = {
+	{ _MMIO(0xe458), 0x00005004 },
+	{ _MMIO(0xe558), 0x00000003 },
+	{ _MMIO(0xe658), 0x00002001 },
+	{ _MMIO(0xe758), 0x00778008 },
+	{ _MMIO(0xe45c), 0x00088078 },
+	{ _MMIO(0xe55c), 0x00808708 },
+	{ _MMIO(0xe65c), 0x00a08908 },
+};
+
+static const struct i915_oa_reg mux_config_data_port_reads_coalescing_0_subslices_0x01[] = {
+	{ _MMIO(0x9888), 0x103d0005 },
+	{ _MMIO(0x9888), 0x163d240b },
+	{ _MMIO(0x9888), 0x1058022f },
+	{ _MMIO(0x9888), 0x185b5520 },
+	{ _MMIO(0x9888), 0x198b0003 },
+	{ _MMIO(0x9888), 0x005cc000 },
+	{ _MMIO(0x9888), 0x065cc000 },
+	{ _MMIO(0x9888), 0x085cc000 },
+	{ _MMIO(0x9888), 0x0a5cc000 },
+	{ _MMIO(0x9888), 0x0c5cc000 },
+	{ _MMIO(0x9888), 0x0e5cc000 },
+	{ _MMIO(0x9888), 0x025c4000 },
+	{ _MMIO(0x9888), 0x045c8000 },
+	{ _MMIO(0x9888), 0x003d0000 },
+	{ _MMIO(0x9888), 0x063d00b0 },
+	{ _MMIO(0x9888), 0x083d0182 },
+	{ _MMIO(0x9888), 0x0a3d10a0 },
+	{ _MMIO(0x9888), 0x0c3d11a2 },
+	{ _MMIO(0x9888), 0x0e3d0000 },
+	{ _MMIO(0x9888), 0x183d0000 },
+	{ _MMIO(0x9888), 0x1a3d0000 },
+	{ _MMIO(0x9888), 0x0e582242 },
+	{ _MMIO(0x9888), 0x00586700 },
+	{ _MMIO(0x9888), 0x0258004f },
+	{ _MMIO(0x9888), 0x0658c000 },
+	{ _MMIO(0x9888), 0x0858c000 },
+	{ _MMIO(0x9888), 0x0a58c000 },
+	{ _MMIO(0x9888), 0x0c58c000 },
+	{ _MMIO(0x9888), 0x045b6300 },
+	{ _MMIO(0x9888), 0x105b0000 },
+	{ _MMIO(0x9888), 0x005b4000 },
+	{ _MMIO(0x9888), 0x0e5b4000 },
+	{ _MMIO(0x9888), 0x1a5b0155 },
+	{ _MMIO(0x9888), 0x025b4000 },
+	{ _MMIO(0x9888), 0x0a5b0000 },
+	{ _MMIO(0x9888), 0x0c5b4000 },
+	{ _MMIO(0x9888), 0x0c1fa800 },
+	{ _MMIO(0x9888), 0x0e1faaa0 },
+	{ _MMIO(0x9888), 0x101f02aa },
+	{ _MMIO(0x9888), 0x00384000 },
+	{ _MMIO(0x9888), 0x0e384000 },
+	{ _MMIO(0x9888), 0x16384000 },
+	{ _MMIO(0x9888), 0x18381555 },
+	{ _MMIO(0x9888), 0x02384000 },
+	{ _MMIO(0x9888), 0x04384000 },
+	{ _MMIO(0x9888), 0x0a384000 },
+	{ _MMIO(0x9888), 0x0c384000 },
+	{ _MMIO(0x9888), 0x0039a000 },
+	{ _MMIO(0x9888), 0x0639a000 },
+	{ _MMIO(0x9888), 0x0839a000 },
+	{ _MMIO(0x9888), 0x0a39a000 },
+	{ _MMIO(0x9888), 0x0c39a000 },
+	{ _MMIO(0x9888), 0x0e39a000 },
+	{ _MMIO(0x9888), 0x02392000 },
+	{ _MMIO(0x9888), 0x04398000 },
+	{ _MMIO(0x9888), 0x018a8000 },
+	{ _MMIO(0x9888), 0x0f8a8000 },
+	{ _MMIO(0x9888), 0x198a8000 },
+	{ _MMIO(0x9888), 0x1b8aaaa0 },
+	{ _MMIO(0x9888), 0x1d8a0002 },
+	{ _MMIO(0x9888), 0x038a8000 },
+	{ _MMIO(0x9888), 0x058a8000 },
+	{ _MMIO(0x9888), 0x0b8a8000 },
+	{ _MMIO(0x9888), 0x0d8a8000 },
+	{ _MMIO(0x9888), 0x038b6300 },
+	{ _MMIO(0x9888), 0x058b0062 },
+	{ _MMIO(0x9888), 0x118b0000 },
+	{ _MMIO(0x9888), 0x238b02a0 },
+	{ _MMIO(0x9888), 0x258b5555 },
+	{ _MMIO(0x9888), 0x278b0015 },
+	{ _MMIO(0x9888), 0x1f85aa80 },
+	{ _MMIO(0x9888), 0x2185aaaa },
+	{ _MMIO(0x9888), 0x2385002a },
+	{ _MMIO(0x9888), 0x01834000 },
+	{ _MMIO(0x9888), 0x0f834000 },
+	{ _MMIO(0x9888), 0x19835400 },
+	{ _MMIO(0x9888), 0x1b830155 },
+	{ _MMIO(0x9888), 0x03834000 },
+	{ _MMIO(0x9888), 0x05834000 },
+	{ _MMIO(0x9888), 0x07834000 },
+	{ _MMIO(0x9888), 0x09834000 },
+	{ _MMIO(0x9888), 0x0b834000 },
+	{ _MMIO(0x9888), 0x0d834000 },
+	{ _MMIO(0x9888), 0x0184c000 },
+	{ _MMIO(0x9888), 0x0784c000 },
+	{ _MMIO(0x9888), 0x0984c000 },
+	{ _MMIO(0x9888), 0x0b84c000 },
+	{ _MMIO(0x9888), 0x0d84c000 },
+	{ _MMIO(0x9888), 0x0f84c000 },
+	{ _MMIO(0x9888), 0x0384c000 },
+	{ _MMIO(0x9888), 0x0584c000 },
+	{ _MMIO(0x9888), 0x1180c000 },
+	{ _MMIO(0x9888), 0x1780c000 },
+	{ _MMIO(0x9888), 0x1980c000 },
+	{ _MMIO(0x9888), 0x1b80c000 },
+	{ _MMIO(0x9888), 0x1d80c000 },
+	{ _MMIO(0x9888), 0x1f80c000 },
+	{ _MMIO(0x9888), 0x1380c000 },
+	{ _MMIO(0x9888), 0x1580c000 },
+	{ _MMIO(0xd24), 0x00000000 },
+	{ _MMIO(0x9888), 0x4d801000 },
+	{ _MMIO(0x9888), 0x3d800000 },
+	{ _MMIO(0x9888), 0x4f800001 },
+	{ _MMIO(0x9888), 0x43800000 },
+	{ _MMIO(0x9888), 0x51800000 },
+	{ _MMIO(0x9888), 0x45800000 },
+	{ _MMIO(0x9888), 0x53800000 },
+	{ _MMIO(0x9888), 0x47800420 },
+	{ _MMIO(0x9888), 0x21800000 },
+	{ _MMIO(0x9888), 0x31800000 },
+	{ _MMIO(0x9888), 0x3f800421 },
+	{ _MMIO(0x9888), 0x41800041 },
+};
+
+static int
+get_data_port_reads_coalescing_mux_config(struct drm_i915_private *dev_priv,
+					  const struct i915_oa_reg **regs,
+					  int *lens)
+{
+	int n = 0;
+
+	BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
+	BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
+
+	if (INTEL_INFO(dev_priv)->sseu.subslice_mask & 0x01) {
+		regs[n] = mux_config_data_port_reads_coalescing_0_subslices_0x01;
+		lens[n] = ARRAY_SIZE(mux_config_data_port_reads_coalescing_0_subslices_0x01);
+		n++;
+	}
+
+	return n;
+}
+
+static const struct i915_oa_reg b_counter_config_data_port_writes_coalescing[] = {
+	{ _MMIO(0x2724), 0xf0800000 },
+	{ _MMIO(0x2720), 0x00000000 },
+	{ _MMIO(0x2714), 0xf0800000 },
+	{ _MMIO(0x2710), 0x00000000 },
+	{ _MMIO(0x274c), 0xba98ba98 },
+	{ _MMIO(0x2748), 0xba98ba98 },
+	{ _MMIO(0x2744), 0x00003377 },
+	{ _MMIO(0x2740), 0x00000000 },
+	{ _MMIO(0x2770), 0x0007ff72 },
+	{ _MMIO(0x2774), 0x0000bfd0 },
+	{ _MMIO(0x2778), 0x0007ff62 },
+	{ _MMIO(0x277c), 0x0000bfd0 },
+	{ _MMIO(0x2780), 0x0007ff42 },
+	{ _MMIO(0x2784), 0x0000bfd0 },
+	{ _MMIO(0x2788), 0x0007ff02 },
+	{ _MMIO(0x278c), 0x0000bfd0 },
+	{ _MMIO(0x2790), 0x0005fff2 },
+	{ _MMIO(0x2794), 0x0000bfd0 },
+	{ _MMIO(0x2798), 0x0005ffe2 },
+	{ _MMIO(0x279c), 0x0000bfd0 },
+	{ _MMIO(0x27a0), 0x0005ffc2 },
+	{ _MMIO(0x27a4), 0x0000bfd0 },
+	{ _MMIO(0x27a8), 0x0005ff82 },
+	{ _MMIO(0x27ac), 0x0000bfd0 },
+};
+
+static const struct i915_oa_reg flex_eu_config_data_port_writes_coalescing[] = {
+	{ _MMIO(0xe458), 0x00005004 },
+	{ _MMIO(0xe558), 0x00000003 },
+	{ _MMIO(0xe658), 0x00002001 },
+	{ _MMIO(0xe758), 0x00778008 },
+	{ _MMIO(0xe45c), 0x00088078 },
+	{ _MMIO(0xe55c), 0x00808708 },
+	{ _MMIO(0xe65c), 0x00a08908 },
+};
+
+static const struct i915_oa_reg mux_config_data_port_writes_coalescing_0_subslices_0x01[] = {
+	{ _MMIO(0x9888), 0x103d0005 },
+	{ _MMIO(0x9888), 0x143d0120 },
+	{ _MMIO(0x9888), 0x163d2400 },
+	{ _MMIO(0x9888), 0x1058022f },
+	{ _MMIO(0x9888), 0x105b0000 },
+	{ _MMIO(0x9888), 0x198b0003 },
+	{ _MMIO(0x9888), 0x005cc000 },
+	{ _MMIO(0x9888), 0x065cc000 },
+	{ _MMIO(0x9888), 0x085cc000 },
+	{ _MMIO(0x9888), 0x0a5cc000 },
+	{ _MMIO(0x9888), 0x0e5cc000 },
+	{ _MMIO(0x9888), 0x025c4000 },
+	{ _MMIO(0x9888), 0x045c8000 },
+	{ _MMIO(0x9888), 0x003d0000 },
+	{ _MMIO(0x9888), 0x063d0094 },
+	{ _MMIO(0x9888), 0x083d0182 },
+	{ _MMIO(0x9888), 0x0a3d1814 },
+	{ _MMIO(0x9888), 0x0e3d0000 },
+	{ _MMIO(0x9888), 0x183d0000 },
+	{ _MMIO(0x9888), 0x1a3d0000 },
+	{ _MMIO(0x9888), 0x0c3d0000 },
+	{ _MMIO(0x9888), 0x0e582242 },
+	{ _MMIO(0x9888), 0x00586700 },
+	{ _MMIO(0x9888), 0x0258004f },
+	{ _MMIO(0x9888), 0x0658c000 },
+	{ _MMIO(0x9888), 0x0858c000 },
+	{ _MMIO(0x9888), 0x0a58c000 },
+	{ _MMIO(0x9888), 0x045b6a80 },
+	{ _MMIO(0x9888), 0x005b4000 },
+	{ _MMIO(0x9888), 0x0e5b4000 },
+	{ _MMIO(0x9888), 0x185b5400 },
+	{ _MMIO(0x9888), 0x1a5b0141 },
+	{ _MMIO(0x9888), 0x025b4000 },
+	{ _MMIO(0x9888), 0x0a5b0000 },
+	{ _MMIO(0x9888), 0x0c5b4000 },
+	{ _MMIO(0x9888), 0x0c1fa800 },
+	{ _MMIO(0x9888), 0x0e1faaa0 },
+	{ _MMIO(0x9888), 0x101f0282 },
+	{ _MMIO(0x9888), 0x00384000 },
+	{ _MMIO(0x9888), 0x0e384000 },
+	{ _MMIO(0x9888), 0x16384000 },
+	{ _MMIO(0x9888), 0x18381415 },
+	{ _MMIO(0x9888), 0x02384000 },
+	{ _MMIO(0x9888), 0x04384000 },
+	{ _MMIO(0x9888), 0x0a384000 },
+	{ _MMIO(0x9888), 0x0c384000 },
+	{ _MMIO(0x9888), 0x0039a000 },
+	{ _MMIO(0x9888), 0x0639a000 },
+	{ _MMIO(0x9888), 0x0839a000 },
+	{ _MMIO(0x9888), 0x0a39a000 },
+	{ _MMIO(0x9888), 0x0e39a000 },
+	{ _MMIO(0x9888), 0x02392000 },
+	{ _MMIO(0x9888), 0x04398000 },
+	{ _MMIO(0x9888), 0x018a8000 },
+	{ _MMIO(0x9888), 0x0f8a8000 },
+	{ _MMIO(0x9888), 0x198a8000 },
+	{ _MMIO(0x9888), 0x1b8a82a0 },
+	{ _MMIO(0x9888), 0x1d8a0002 },
+	{ _MMIO(0x9888), 0x038a8000 },
+	{ _MMIO(0x9888), 0x058a8000 },
+	{ _MMIO(0x9888), 0x0b8a8000 },
+	{ _MMIO(0x9888), 0x0d8a8000 },
+	{ _MMIO(0x9888), 0x038b6300 },
+	{ _MMIO(0x9888), 0x058b0062 },
+	{ _MMIO(0x9888), 0x118b0000 },
+	{ _MMIO(0x9888), 0x238b02a0 },
+	{ _MMIO(0x9888), 0x258b1555 },
+	{ _MMIO(0x9888), 0x278b0014 },
+	{ _MMIO(0x9888), 0x1f85aa80 },
+	{ _MMIO(0x9888), 0x21852aaa },
+	{ _MMIO(0x9888), 0x23850028 },
+	{ _MMIO(0x9888), 0x01834000 },
+	{ _MMIO(0x9888), 0x0f834000 },
+	{ _MMIO(0x9888), 0x19835400 },
+	{ _MMIO(0x9888), 0x1b830141 },
+	{ _MMIO(0x9888), 0x03834000 },
+	{ _MMIO(0x9888), 0x05834000 },
+	{ _MMIO(0x9888), 0x07834000 },
+	{ _MMIO(0x9888), 0x09834000 },
+	{ _MMIO(0x9888), 0x0b834000 },
+	{ _MMIO(0x9888), 0x0d834000 },
+	{ _MMIO(0x9888), 0x0184c000 },
+	{ _MMIO(0x9888), 0x0784c000 },
+	{ _MMIO(0x9888), 0x0984c000 },
+	{ _MMIO(0x9888), 0x0b84c000 },
+	{ _MMIO(0x9888), 0x0f84c000 },
+	{ _MMIO(0x9888), 0x0384c000 },
+	{ _MMIO(0x9888), 0x0584c000 },
+	{ _MMIO(0x9888), 0x1180c000 },
+	{ _MMIO(0x9888), 0x1780c000 },
+	{ _MMIO(0x9888), 0x1980c000 },
+	{ _MMIO(0x9888), 0x1b80c000 },
+	{ _MMIO(0x9888), 0x1f80c000 },
+	{ _MMIO(0x9888), 0x1380c000 },
+	{ _MMIO(0x9888), 0x1580c000 },
+	{ _MMIO(0xd24), 0x00000000 },
+	{ _MMIO(0x9888), 0x4d801000 },
+	{ _MMIO(0x9888), 0x3d800000 },
+	{ _MMIO(0x9888), 0x4f800001 },
+	{ _MMIO(0x9888), 0x43800000 },
+	{ _MMIO(0x9888), 0x51800000 },
+	{ _MMIO(0x9888), 0x45800000 },
+	{ _MMIO(0x9888), 0x21800000 },
+	{ _MMIO(0x9888), 0x31800000 },
+	{ _MMIO(0x9888), 0x53800000 },
+	{ _MMIO(0x9888), 0x47800420 },
+	{ _MMIO(0x9888), 0x3f800421 },
+	{ _MMIO(0x9888), 0x41800041 },
+};
+
+static int
+get_data_port_writes_coalescing_mux_config(struct drm_i915_private *dev_priv,
+					   const struct i915_oa_reg **regs,
+					   int *lens)
+{
+	int n = 0;
+
+	BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
+	BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
+
+	if (INTEL_INFO(dev_priv)->sseu.subslice_mask & 0x01) {
+		regs[n] = mux_config_data_port_writes_coalescing_0_subslices_0x01;
+		lens[n] = ARRAY_SIZE(mux_config_data_port_writes_coalescing_0_subslices_0x01);
+		n++;
+	}
+
+	return n;
+}
+
+static const struct i915_oa_reg b_counter_config_hdc_and_sf[] = {
+	{ _MMIO(0x2740), 0x00000000 },
+	{ _MMIO(0x2744), 0x00800000 },
+	{ _MMIO(0x2710), 0x00000000 },
+	{ _MMIO(0x2714), 0x10800000 },
+	{ _MMIO(0x2720), 0x00000000 },
+	{ _MMIO(0x2724), 0x00800000 },
+	{ _MMIO(0x2770), 0x00000002 },
+	{ _MMIO(0x2774), 0x0000fff7 },
+};
+
+static const struct i915_oa_reg flex_eu_config_hdc_and_sf[] = {
+	{ _MMIO(0xe458), 0x00005004 },
+	{ _MMIO(0xe558), 0x00010003 },
+	{ _MMIO(0xe658), 0x00012011 },
+	{ _MMIO(0xe758), 0x00015014 },
+	{ _MMIO(0xe45c), 0x00051050 },
+	{ _MMIO(0xe55c), 0x00053052 },
+	{ _MMIO(0xe65c), 0x00055054 },
+};
+
+static const struct i915_oa_reg mux_config_hdc_and_sf[] = {
+	{ _MMIO(0x9888), 0x105c0232 },
+	{ _MMIO(0x9888), 0x10580232 },
+	{ _MMIO(0x9888), 0x10380232 },
+	{ _MMIO(0x9888), 0x10dc0232 },
+	{ _MMIO(0x9888), 0x10d80232 },
+	{ _MMIO(0x9888), 0x10b80232 },
+	{ _MMIO(0x9888), 0x118e4400 },
+	{ _MMIO(0x9888), 0x025c6080 },
+	{ _MMIO(0x9888), 0x045c004b },
+	{ _MMIO(0x9888), 0x005c8000 },
+	{ _MMIO(0x9888), 0x00582080 },
+	{ _MMIO(0x9888), 0x0258004b },
+	{ _MMIO(0x9888), 0x025b4000 },
+	{ _MMIO(0x9888), 0x045b4000 },
+	{ _MMIO(0x9888), 0x0c1fa000 },
+	{ _MMIO(0x9888), 0x0e1f00aa },
+	{ _MMIO(0x9888), 0x04386080 },
+	{ _MMIO(0x9888), 0x0638404b },
+	{ _MMIO(0x9888), 0x02384000 },
+	{ _MMIO(0x9888), 0x08384000 },
+	{ _MMIO(0x9888), 0x0a380000 },
+	{ _MMIO(0x9888), 0x0c380000 },
+	{ _MMIO(0x9888), 0x00398000 },
+	{ _MMIO(0x9888), 0x0239a000 },
+	{ _MMIO(0x9888), 0x0439a000 },
+	{ _MMIO(0x9888), 0x06392000 },
+	{ _MMIO(0x9888), 0x0cdc25c1 },
+	{ _MMIO(0x9888), 0x0adcc000 },
+	{ _MMIO(0x9888), 0x0ad825c1 },
+	{ _MMIO(0x9888), 0x18db4000 },
+	{ _MMIO(0x9888), 0x1adb0001 },
+	{ _MMIO(0x9888), 0x0e9f8000 },
+	{ _MMIO(0x9888), 0x109f02aa },
+	{ _MMIO(0x9888), 0x0eb825c1 },
+	{ _MMIO(0x9888), 0x18b80154 },
+	{ _MMIO(0x9888), 0x0ab9a000 },
+	{ _MMIO(0x9888), 0x0cb9a000 },
+	{ _MMIO(0x9888), 0x0eb9a000 },
+	{ _MMIO(0x9888), 0x0d88c000 },
+	{ _MMIO(0x9888), 0x0f88000f },
+	{ _MMIO(0x9888), 0x038a8000 },
+	{ _MMIO(0x9888), 0x058a8000 },
+	{ _MMIO(0x9888), 0x078a8000 },
+	{ _MMIO(0x9888), 0x098a8000 },
+	{ _MMIO(0x9888), 0x0b8a8000 },
+	{ _MMIO(0x9888), 0x0d8a8000 },
+	{ _MMIO(0x9888), 0x258baa05 },
+	{ _MMIO(0x9888), 0x278b002a },
+	{ _MMIO(0x9888), 0x238b2a80 },
+	{ _MMIO(0x9888), 0x198c5400 },
+	{ _MMIO(0x9888), 0x1b8c0015 },
+	{ _MMIO(0x9888), 0x098dc000 },
+	{ _MMIO(0x9888), 0x0b8da000 },
+	{ _MMIO(0x9888), 0x0d8da000 },
+	{ _MMIO(0x9888), 0x0f8da000 },
+	{ _MMIO(0x9888), 0x098e05c0 },
+	{ _MMIO(0x9888), 0x058e0000 },
+	{ _MMIO(0x9888), 0x198f0020 },
+	{ _MMIO(0x9888), 0x2185aa0a },
+	{ _MMIO(0x9888), 0x2385002a },
+	{ _MMIO(0x9888), 0x1f85aa00 },
+	{ _MMIO(0x9888), 0x19835000 },
+	{ _MMIO(0x9888), 0x1b830155 },
+	{ _MMIO(0x9888), 0x03834000 },
+	{ _MMIO(0x9888), 0x05834000 },
+	{ _MMIO(0x9888), 0x07834000 },
+	{ _MMIO(0x9888), 0x09834000 },
+	{ _MMIO(0x9888), 0x0b834000 },
+	{ _MMIO(0x9888), 0x0d834000 },
+	{ _MMIO(0x9888), 0x09848000 },
+	{ _MMIO(0x9888), 0x0b84c000 },
+	{ _MMIO(0x9888), 0x0d84c000 },
+	{ _MMIO(0x9888), 0x0f84c000 },
+	{ _MMIO(0x9888), 0x01848000 },
+	{ _MMIO(0x9888), 0x0384c000 },
+	{ _MMIO(0x9888), 0x0584c000 },
+	{ _MMIO(0x9888), 0x07844000 },
+	{ _MMIO(0x9888), 0x19808000 },
+	{ _MMIO(0x9888), 0x1b80c000 },
+	{ _MMIO(0x9888), 0x1d80c000 },
+	{ _MMIO(0x9888), 0x1f80c000 },
+	{ _MMIO(0x9888), 0x11808000 },
+	{ _MMIO(0x9888), 0x1380c000 },
+	{ _MMIO(0x9888), 0x1580c000 },
+	{ _MMIO(0x9888), 0x17804000 },
+	{ _MMIO(0x9888), 0x51800040 },
+	{ _MMIO(0x9888), 0x43800400 },
+	{ _MMIO(0x9888), 0x45800800 },
+	{ _MMIO(0x9888), 0x53800000 },
+	{ _MMIO(0x9888), 0x47800c62 },
+	{ _MMIO(0x9888), 0x21800000 },
+	{ _MMIO(0x9888), 0x31800000 },
+	{ _MMIO(0x9888), 0x4d800000 },
+	{ _MMIO(0x9888), 0x3f801042 },
+	{ _MMIO(0x9888), 0x4f800000 },
+	{ _MMIO(0x9888), 0x418014a4 },
+};
+
+static int
+get_hdc_and_sf_mux_config(struct drm_i915_private *dev_priv,
+			  const struct i915_oa_reg **regs,
+			  int *lens)
+{
+	int n = 0;
+
+	BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
+	BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
+
+	regs[n] = mux_config_hdc_and_sf;
+	lens[n] = ARRAY_SIZE(mux_config_hdc_and_sf);
+	n++;
+
+	return n;
+}
+
+static const struct i915_oa_reg b_counter_config_l3_1[] = {
+	{ _MMIO(0x2740), 0x00000000 },
+	{ _MMIO(0x2744), 0x00800000 },
+	{ _MMIO(0x2710), 0x00000000 },
+	{ _MMIO(0x2714), 0xf0800000 },
+	{ _MMIO(0x2720), 0x00000000 },
+	{ _MMIO(0x2724), 0xf0800000 },
+	{ _MMIO(0x2770), 0x00100070 },
+	{ _MMIO(0x2774), 0x0000fff1 },
+	{ _MMIO(0x2778), 0x00014002 },
+	{ _MMIO(0x277c), 0x0000c3ff },
+	{ _MMIO(0x2780), 0x00010002 },
+	{ _MMIO(0x2784), 0x0000c7ff },
+	{ _MMIO(0x2788), 0x00004002 },
+	{ _MMIO(0x278c), 0x0000d3ff },
+	{ _MMIO(0x2790), 0x00100700 },
+	{ _MMIO(0x2794), 0x0000ff1f },
+	{ _MMIO(0x2798), 0x00001402 },
+	{ _MMIO(0x279c), 0x0000fc3f },
+	{ _MMIO(0x27a0), 0x00001002 },
+	{ _MMIO(0x27a4), 0x0000fc7f },
+	{ _MMIO(0x27a8), 0x00000402 },
+	{ _MMIO(0x27ac), 0x0000fd3f },
+};
+
+static const struct i915_oa_reg flex_eu_config_l3_1[] = {
+	{ _MMIO(0xe458), 0x00005004 },
+	{ _MMIO(0xe558), 0x00010003 },
+	{ _MMIO(0xe658), 0x00012011 },
+	{ _MMIO(0xe758), 0x00015014 },
+	{ _MMIO(0xe45c), 0x00051050 },
+	{ _MMIO(0xe55c), 0x00053052 },
+	{ _MMIO(0xe65c), 0x00055054 },
+};
+
+static const struct i915_oa_reg mux_config_l3_1[] = {
+	{ _MMIO(0x9888), 0x10bf03da },
+	{ _MMIO(0x9888), 0x14bf0001 },
+	{ _MMIO(0x9888), 0x12980340 },
+	{ _MMIO(0x9888), 0x12990340 },
+	{ _MMIO(0x9888), 0x0cbf1187 },
+	{ _MMIO(0x9888), 0x0ebf1205 },
+	{ _MMIO(0x9888), 0x00bf0500 },
+	{ _MMIO(0x9888), 0x02bf042b },
+	{ _MMIO(0x9888), 0x04bf002c },
+	{ _MMIO(0x9888), 0x0cdac000 },
+	{ _MMIO(0x9888), 0x0edac000 },
+	{ _MMIO(0x9888), 0x00da8000 },
+	{ _MMIO(0x9888), 0x02dac000 },
+	{ _MMIO(0x9888), 0x04da4000 },
+	{ _MMIO(0x9888), 0x04983400 },
+	{ _MMIO(0x9888), 0x10980000 },
+	{ _MMIO(0x9888), 0x06990034 },
+	{ _MMIO(0x9888), 0x10990000 },
+	{ _MMIO(0x9888), 0x0c9dc000 },
+	{ _MMIO(0x9888), 0x0e9dc000 },
+	{ _MMIO(0x9888), 0x009d8000 },
+	{ _MMIO(0x9888), 0x029dc000 },
+	{ _MMIO(0x9888), 0x049d4000 },
+	{ _MMIO(0x9888), 0x109f02a8 },
+	{ _MMIO(0x9888), 0x0c9fa000 },
+	{ _MMIO(0x9888), 0x0e9f00ba },
+	{ _MMIO(0x9888), 0x0cb88000 },
+	{ _MMIO(0x9888), 0x0cb95000 },
+	{ _MMIO(0x9888), 0x0eb95000 },
+	{ _MMIO(0x9888), 0x00b94000 },
+	{ _MMIO(0x9888), 0x02b95000 },
+	{ _MMIO(0x9888), 0x04b91000 },
+	{ _MMIO(0x9888), 0x06b92000 },
+	{ _MMIO(0x9888), 0x0cba4000 },
+	{ _MMIO(0x9888), 0x0f88000f },
+	{ _MMIO(0x9888), 0x03888000 },
+	{ _MMIO(0x9888), 0x05888000 },
+	{ _MMIO(0x9888), 0x07888000 },
+	{ _MMIO(0x9888), 0x09888000 },
+	{ _MMIO(0x9888), 0x0b888000 },
+	{ _MMIO(0x9888), 0x0d880400 },
+	{ _MMIO(0x9888), 0x258b800a },
+	{ _MMIO(0x9888), 0x278b002a },
+	{ _MMIO(0x9888), 0x238b5500 },
+	{ _MMIO(0x9888), 0x198c4000 },
+	{ _MMIO(0x9888), 0x1b8c0015 },
+	{ _MMIO(0x9888), 0x038c4000 },
+	{ _MMIO(0x9888), 0x058c4000 },
+	{ _MMIO(0x9888), 0x078c4000 },
+	{ _MMIO(0x9888), 0x098c4000 },
+	{ _MMIO(0x9888), 0x0b8c4000 },
+	{ _MMIO(0x9888), 0x0d8c4000 },
+	{ _MMIO(0x9888), 0x0d8da000 },
+	{ _MMIO(0x9888), 0x0f8da000 },
+	{ _MMIO(0x9888), 0x018d8000 },
+	{ _MMIO(0x9888), 0x038da000 },
+	{ _MMIO(0x9888), 0x058da000 },
+	{ _MMIO(0x9888), 0x078d2000 },
+	{ _MMIO(0x9888), 0x2185800a },
+	{ _MMIO(0x9888), 0x2385002a },
+	{ _MMIO(0x9888), 0x1f85aa00 },
+	{ _MMIO(0x9888), 0x1b830154 },
+	{ _MMIO(0x9888), 0x03834000 },
+	{ _MMIO(0x9888), 0x05834000 },
+	{ _MMIO(0x9888), 0x07834000 },
+	{ _MMIO(0x9888), 0x09834000 },
+	{ _MMIO(0x9888), 0x0b834000 },
+	{ _MMIO(0x9888), 0x0d834000 },
+	{ _MMIO(0x9888), 0x0d84c000 },
+	{ _MMIO(0x9888), 0x0f84c000 },
+	{ _MMIO(0x9888), 0x01848000 },
+	{ _MMIO(0x9888), 0x0384c000 },
+	{ _MMIO(0x9888), 0x0584c000 },
+	{ _MMIO(0x9888), 0x07844000 },
+	{ _MMIO(0x9888), 0x1d80c000 },
+	{ _MMIO(0x9888), 0x1f80c000 },
+	{ _MMIO(0x9888), 0x11808000 },
+	{ _MMIO(0x9888), 0x1380c000 },
+	{ _MMIO(0x9888), 0x1580c000 },
+	{ _MMIO(0x9888), 0x17804000 },
+	{ _MMIO(0x9888), 0x53800000 },
+	{ _MMIO(0x9888), 0x45800000 },
+	{ _MMIO(0x9888), 0x47800000 },
+	{ _MMIO(0x9888), 0x21800000 },
+	{ _MMIO(0x9888), 0x31800000 },
+	{ _MMIO(0x9888), 0x4d800000 },
+	{ _MMIO(0x9888), 0x3f800000 },
+	{ _MMIO(0x9888), 0x4f800000 },
+	{ _MMIO(0x9888), 0x41800060 },
+};
+
+static int
+get_l3_1_mux_config(struct drm_i915_private *dev_priv,
+		    const struct i915_oa_reg **regs,
+		    int *lens)
+{
+	int n = 0;
+
+	BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
+	BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
+
+	regs[n] = mux_config_l3_1;
+	lens[n] = ARRAY_SIZE(mux_config_l3_1);
+	n++;
+
+	return n;
+}
+
+static const struct i915_oa_reg b_counter_config_l3_2[] = {
+	{ _MMIO(0x2740), 0x00000000 },
+	{ _MMIO(0x2744), 0x00800000 },
+	{ _MMIO(0x2710), 0x00000000 },
+	{ _MMIO(0x2714), 0xf0800000 },
+	{ _MMIO(0x2720), 0x00000000 },
+	{ _MMIO(0x2724), 0xf0800000 },
+	{ _MMIO(0x2770), 0x00100070 },
+	{ _MMIO(0x2774), 0x0000fff1 },
+	{ _MMIO(0x2778), 0x00014002 },
+	{ _MMIO(0x277c), 0x0000c3ff },
+	{ _MMIO(0x2780), 0x00010002 },
+	{ _MMIO(0x2784), 0x0000c7ff },
+	{ _MMIO(0x2788), 0x00004002 },
+	{ _MMIO(0x278c), 0x0000d3ff },
+	{ _MMIO(0x2790), 0x00100700 },
+	{ _MMIO(0x2794), 0x0000ff1f },
+	{ _MMIO(0x2798), 0x00001402 },
+	{ _MMIO(0x279c), 0x0000fc3f },
+	{ _MMIO(0x27a0), 0x00001002 },
+	{ _MMIO(0x27a4), 0x0000fc7f },
+	{ _MMIO(0x27a8), 0x00000402 },
+	{ _MMIO(0x27ac), 0x0000fd3f },
+};
+
+static const struct i915_oa_reg flex_eu_config_l3_2[] = {
+	{ _MMIO(0xe458), 0x00005004 },
+	{ _MMIO(0xe558), 0x00010003 },
+	{ _MMIO(0xe658), 0x00012011 },
+	{ _MMIO(0xe758), 0x00015014 },
+	{ _MMIO(0xe45c), 0x00051050 },
+	{ _MMIO(0xe55c), 0x00053052 },
+	{ _MMIO(0xe65c), 0x00055054 },
+};
+
+static const struct i915_oa_reg mux_config_l3_2[] = {
+	{ _MMIO(0x9888), 0x103f03da },
+	{ _MMIO(0x9888), 0x143f0001 },
+	{ _MMIO(0x9888), 0x12180340 },
+	{ _MMIO(0x9888), 0x12190340 },
+	{ _MMIO(0x9888), 0x0c3f1187 },
+	{ _MMIO(0x9888), 0x0e3f1205 },
+	{ _MMIO(0x9888), 0x003f0500 },
+	{ _MMIO(0x9888), 0x023f042b },
+	{ _MMIO(0x9888), 0x043f002c },
+	{ _MMIO(0x9888), 0x0c5ac000 },
+	{ _MMIO(0x9888), 0x0e5ac000 },
+	{ _MMIO(0x9888), 0x005a8000 },
+	{ _MMIO(0x9888), 0x025ac000 },
+	{ _MMIO(0x9888), 0x045a4000 },
+	{ _MMIO(0x9888), 0x04183400 },
+	{ _MMIO(0x9888), 0x10180000 },
+	{ _MMIO(0x9888), 0x06190034 },
+	{ _MMIO(0x9888), 0x10190000 },
+	{ _MMIO(0x9888), 0x0c1dc000 },
+	{ _MMIO(0x9888), 0x0e1dc000 },
+	{ _MMIO(0x9888), 0x001d8000 },
+	{ _MMIO(0x9888), 0x021dc000 },
+	{ _MMIO(0x9888), 0x041d4000 },
+	{ _MMIO(0x9888), 0x101f02a8 },
+	{ _MMIO(0x9888), 0x0c1fa000 },
+	{ _MMIO(0x9888), 0x0e1f00ba },
+	{ _MMIO(0x9888), 0x0c388000 },
+	{ _MMIO(0x9888), 0x0c395000 },
+	{ _MMIO(0x9888), 0x0e395000 },
+	{ _MMIO(0x9888), 0x00394000 },
+	{ _MMIO(0x9888), 0x02395000 },
+	{ _MMIO(0x9888), 0x04391000 },
+	{ _MMIO(0x9888), 0x06392000 },
+	{ _MMIO(0x9888), 0x0c3a4000 },
+	{ _MMIO(0x9888), 0x1b8aa800 },
+	{ _MMIO(0x9888), 0x1d8a0002 },
+	{ _MMIO(0x9888), 0x038a8000 },
+	{ _MMIO(0x9888), 0x058a8000 },
+	{ _MMIO(0x9888), 0x078a8000 },
+	{ _MMIO(0x9888), 0x098a8000 },
+	{ _MMIO(0x9888), 0x0b8a8000 },
+	{ _MMIO(0x9888), 0x0d8a8000 },
+	{ _MMIO(0x9888), 0x258b4005 },
+	{ _MMIO(0x9888), 0x278b0015 },
+	{ _MMIO(0x9888), 0x238b2a80 },
+	{ _MMIO(0x9888), 0x2185800a },
+	{ _MMIO(0x9888), 0x2385002a },
+	{ _MMIO(0x9888), 0x1f85aa00 },
+	{ _MMIO(0x9888), 0x1b830154 },
+	{ _MMIO(0x9888), 0x03834000 },
+	{ _MMIO(0x9888), 0x05834000 },
+	{ _MMIO(0x9888), 0x07834000 },
+	{ _MMIO(0x9888), 0x09834000 },
+	{ _MMIO(0x9888), 0x0b834000 },
+	{ _MMIO(0x9888), 0x0d834000 },
+	{ _MMIO(0x9888), 0x0d84c000 },
+	{ _MMIO(0x9888), 0x0f84c000 },
+	{ _MMIO(0x9888), 0x01848000 },
+	{ _MMIO(0x9888), 0x0384c000 },
+	{ _MMIO(0x9888), 0x0584c000 },
+	{ _MMIO(0x9888), 0x07844000 },
+	{ _MMIO(0x9888), 0x1d80c000 },
+	{ _MMIO(0x9888), 0x1f80c000 },
+	{ _MMIO(0x9888), 0x11808000 },
+	{ _MMIO(0x9888), 0x1380c000 },
+	{ _MMIO(0x9888), 0x1580c000 },
+	{ _MMIO(0x9888), 0x17804000 },
+	{ _MMIO(0x9888), 0x53800000 },
+	{ _MMIO(0x9888), 0x45800000 },
+	{ _MMIO(0x9888), 0x47800000 },
+	{ _MMIO(0x9888), 0x21800000 },
+	{ _MMIO(0x9888), 0x31800000 },
+	{ _MMIO(0x9888), 0x4d800000 },
+	{ _MMIO(0x9888), 0x3f800000 },
+	{ _MMIO(0x9888), 0x4f800000 },
+	{ _MMIO(0x9888), 0x41800060 },
+};
+
+static int
+get_l3_2_mux_config(struct drm_i915_private *dev_priv,
+		    const struct i915_oa_reg **regs,
+		    int *lens)
+{
+	int n = 0;
+
+	BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
+	BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
+
+	regs[n] = mux_config_l3_2;
+	lens[n] = ARRAY_SIZE(mux_config_l3_2);
+	n++;
+
+	return n;
+}
+
+static const struct i915_oa_reg b_counter_config_l3_3[] = {
+	{ _MMIO(0x2740), 0x00000000 },
+	{ _MMIO(0x2744), 0x00800000 },
+	{ _MMIO(0x2710), 0x00000000 },
+	{ _MMIO(0x2714), 0xf0800000 },
+	{ _MMIO(0x2720), 0x00000000 },
+	{ _MMIO(0x2724), 0xf0800000 },
+	{ _MMIO(0x2770), 0x00100070 },
+	{ _MMIO(0x2774), 0x0000fff1 },
+	{ _MMIO(0x2778), 0x00014002 },
+	{ _MMIO(0x277c), 0x0000c3ff },
+	{ _MMIO(0x2780), 0x00010002 },
+	{ _MMIO(0x2784), 0x0000c7ff },
+	{ _MMIO(0x2788), 0x00004002 },
+	{ _MMIO(0x278c), 0x0000d3ff },
+	{ _MMIO(0x2790), 0x00100700 },
+	{ _MMIO(0x2794), 0x0000ff1f },
+	{ _MMIO(0x2798), 0x00001402 },
+	{ _MMIO(0x279c), 0x0000fc3f },
+	{ _MMIO(0x27a0), 0x00001002 },
+	{ _MMIO(0x27a4), 0x0000fc7f },
+	{ _MMIO(0x27a8), 0x00000402 },
+	{ _MMIO(0x27ac), 0x0000fd3f },
+};
+
+static const struct i915_oa_reg flex_eu_config_l3_3[] = {
+	{ _MMIO(0xe458), 0x00005004 },
+	{ _MMIO(0xe558), 0x00010003 },
+	{ _MMIO(0xe658), 0x00012011 },
+	{ _MMIO(0xe758), 0x00015014 },
+	{ _MMIO(0xe45c), 0x00051050 },
+	{ _MMIO(0xe55c), 0x00053052 },
+	{ _MMIO(0xe65c), 0x00055054 },
+};
+
+static const struct i915_oa_reg mux_config_l3_3[] = {
+	{ _MMIO(0x9888), 0x121b0340 },
+	{ _MMIO(0x9888), 0x103f0274 },
+	{ _MMIO(0x9888), 0x123f0000 },
+	{ _MMIO(0x9888), 0x129b0340 },
+	{ _MMIO(0x9888), 0x10bf0274 },
+	{ _MMIO(0x9888), 0x12bf0000 },
+	{ _MMIO(0x9888), 0x041b3400 },
+	{ _MMIO(0x9888), 0x101b0000 },
+	{ _MMIO(0x9888), 0x045c8000 },
+	{ _MMIO(0x9888), 0x0a3d4000 },
+	{ _MMIO(0x9888), 0x003f0080 },
+	{ _MMIO(0x9888), 0x023f0793 },
+	{ _MMIO(0x9888), 0x043f0014 },
+	{ _MMIO(0x9888), 0x04588000 },
+	{ _MMIO(0x9888), 0x005a8000 },
+	{ _MMIO(0x9888), 0x025ac000 },
+	{ _MMIO(0x9888), 0x045a4000 },
+	{ _MMIO(0x9888), 0x0a5b4000 },
+	{ _MMIO(0x9888), 0x001d8000 },
+	{ _MMIO(0x9888), 0x021dc000 },
+	{ _MMIO(0x9888), 0x041d4000 },
+	{ _MMIO(0x9888), 0x0c1fa000 },
+	{ _MMIO(0x9888), 0x0e1f002a },
+	{ _MMIO(0x9888), 0x0a384000 },
+	{ _MMIO(0x9888), 0x00394000 },
+	{ _MMIO(0x9888), 0x02395000 },
+	{ _MMIO(0x9888), 0x04399000 },
+	{ _MMIO(0x9888), 0x069b0034 },
+	{ _MMIO(0x9888), 0x109b0000 },
+	{ _MMIO(0x9888), 0x06dc4000 },
+	{ _MMIO(0x9888), 0x0cbd4000 },
+	{ _MMIO(0x9888), 0x0cbf0981 },
+	{ _MMIO(0x9888), 0x0ebf0a0f },
+	{ _MMIO(0x9888), 0x06d84000 },
+	{ _MMIO(0x9888), 0x0cdac000 },
+	{ _MMIO(0x9888), 0x0edac000 },
+	{ _MMIO(0x9888), 0x0cdb4000 },
+	{ _MMIO(0x9888), 0x0c9dc000 },
+	{ _MMIO(0x9888), 0x0e9dc000 },
+	{ _MMIO(0x9888), 0x109f02a8 },
+	{ _MMIO(0x9888), 0x0e9f0080 },
+	{ _MMIO(0x9888), 0x0cb84000 },
+	{ _MMIO(0x9888), 0x0cb95000 },
+	{ _MMIO(0x9888), 0x0eb95000 },
+	{ _MMIO(0x9888), 0x06b92000 },
+	{ _MMIO(0x9888), 0x0f88000f },
+	{ _MMIO(0x9888), 0x0d880400 },
+	{ _MMIO(0x9888), 0x038a8000 },
+	{ _MMIO(0x9888), 0x058a8000 },
+	{ _MMIO(0x9888), 0x078a8000 },
+	{ _MMIO(0x9888), 0x098a8000 },
+	{ _MMIO(0x9888), 0x0b8a8000 },
+	{ _MMIO(0x9888), 0x258b8009 },
+	{ _MMIO(0x9888), 0x278b002a },
+	{ _MMIO(0x9888), 0x238b2a80 },
+	{ _MMIO(0x9888), 0x198c4000 },
+	{ _MMIO(0x9888), 0x1b8c0015 },
+	{ _MMIO(0x9888), 0x0d8c4000 },
+	{ _MMIO(0x9888), 0x0d8da000 },
+	{ _MMIO(0x9888), 0x0f8da000 },
+	{ _MMIO(0x9888), 0x078d2000 },
+	{ _MMIO(0x9888), 0x2185800a },
+	{ _MMIO(0x9888), 0x2385002a },
+	{ _MMIO(0x9888), 0x1f85aa00 },
+	{ _MMIO(0x9888), 0x1b830154 },
+	{ _MMIO(0x9888), 0x03834000 },
+	{ _MMIO(0x9888), 0x05834000 },
+	{ _MMIO(0x9888), 0x07834000 },
+	{ _MMIO(0x9888), 0x09834000 },
+	{ _MMIO(0x9888), 0x0b834000 },
+	{ _MMIO(0x9888), 0x0d834000 },
+	{ _MMIO(0x9888), 0x0d84c000 },
+	{ _MMIO(0x9888), 0x0f84c000 },
+	{ _MMIO(0x9888), 0x01848000 },
+	{ _MMIO(0x9888), 0x0384c000 },
+	{ _MMIO(0x9888), 0x0584c000 },
+	{ _MMIO(0x9888), 0x07844000 },
+	{ _MMIO(0x9888), 0x1d80c000 },
+	{ _MMIO(0x9888), 0x1f80c000 },
+	{ _MMIO(0x9888), 0x11808000 },
+	{ _MMIO(0x9888), 0x1380c000 },
+	{ _MMIO(0x9888), 0x1580c000 },
+	{ _MMIO(0x9888), 0x17804000 },
+	{ _MMIO(0x9888), 0x53800000 },
+	{ _MMIO(0x9888), 0x45800c00 },
+	{ _MMIO(0x9888), 0x47800c63 },
+	{ _MMIO(0x9888), 0x21800000 },
+	{ _MMIO(0x9888), 0x31800000 },
+	{ _MMIO(0x9888), 0x4d800000 },
+	{ _MMIO(0x9888), 0x3f8014a5 },
+	{ _MMIO(0x9888), 0x4f800000 },
+	{ _MMIO(0x9888), 0x41800045 },
+};
+
+static int
+get_l3_3_mux_config(struct drm_i915_private *dev_priv,
+		    const struct i915_oa_reg **regs,
+		    int *lens)
+{
+	int n = 0;
+
+	BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
+	BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
+
+	regs[n] = mux_config_l3_3;
+	lens[n] = ARRAY_SIZE(mux_config_l3_3);
+	n++;
+
+	return n;
+}
+
+static const struct i915_oa_reg b_counter_config_l3_4[] = {
+	{ _MMIO(0x2740), 0x00000000 },
+	{ _MMIO(0x2744), 0x00800000 },
+	{ _MMIO(0x2710), 0x00000000 },
+	{ _MMIO(0x2714), 0xf0800000 },
+	{ _MMIO(0x2720), 0x00000000 },
+	{ _MMIO(0x2724), 0xf0800000 },
+	{ _MMIO(0x2770), 0x00100070 },
+	{ _MMIO(0x2774), 0x0000fff1 },
+	{ _MMIO(0x2778), 0x00014002 },
+	{ _MMIO(0x277c), 0x0000c3ff },
+	{ _MMIO(0x2780), 0x00010002 },
+	{ _MMIO(0x2784), 0x0000c7ff },
+	{ _MMIO(0x2788), 0x00004002 },
+	{ _MMIO(0x278c), 0x0000d3ff },
+	{ _MMIO(0x2790), 0x00100700 },
+	{ _MMIO(0x2794), 0x0000ff1f },
+	{ _MMIO(0x2798), 0x00001402 },
+	{ _MMIO(0x279c), 0x0000fc3f },
+	{ _MMIO(0x27a0), 0x00001002 },
+	{ _MMIO(0x27a4), 0x0000fc7f },
+	{ _MMIO(0x27a8), 0x00000402 },
+	{ _MMIO(0x27ac), 0x0000fd3f },
+};
+
+static const struct i915_oa_reg flex_eu_config_l3_4[] = {
+	{ _MMIO(0xe458), 0x00005004 },
+	{ _MMIO(0xe558), 0x00010003 },
+	{ _MMIO(0xe658), 0x00012011 },
+	{ _MMIO(0xe758), 0x00015014 },
+	{ _MMIO(0xe45c), 0x00051050 },
+	{ _MMIO(0xe55c), 0x00053052 },
+	{ _MMIO(0xe65c), 0x00055054 },
+};
+
+static const struct i915_oa_reg mux_config_l3_4[] = {
+	{ _MMIO(0x9888), 0x121a0340 },
+	{ _MMIO(0x9888), 0x103f0017 },
+	{ _MMIO(0x9888), 0x123f0020 },
+	{ _MMIO(0x9888), 0x129a0340 },
+	{ _MMIO(0x9888), 0x10bf0017 },
+	{ _MMIO(0x9888), 0x12bf0020 },
+	{ _MMIO(0x9888), 0x041a3400 },
+	{ _MMIO(0x9888), 0x101a0000 },
+	{ _MMIO(0x9888), 0x043b8000 },
+	{ _MMIO(0x9888), 0x0a3e0010 },
+	{ _MMIO(0x9888), 0x003f0200 },
+	{ _MMIO(0x9888), 0x023f0113 },
+	{ _MMIO(0x9888), 0x043f0014 },
+	{ _MMIO(0x9888), 0x02592000 },
+	{ _MMIO(0x9888), 0x005a8000 },
+	{ _MMIO(0x9888), 0x025ac000 },
+	{ _MMIO(0x9888), 0x045a4000 },
+	{ _MMIO(0x9888), 0x0a1c8000 },
+	{ _MMIO(0x9888), 0x001d8000 },
+	{ _MMIO(0x9888), 0x021dc000 },
+	{ _MMIO(0x9888), 0x041d4000 },
+	{ _MMIO(0x9888), 0x0a1e8000 },
+	{ _MMIO(0x9888), 0x0c1fa000 },
+	{ _MMIO(0x9888), 0x0e1f001a },
+	{ _MMIO(0x9888), 0x00394000 },
+	{ _MMIO(0x9888), 0x02395000 },
+	{ _MMIO(0x9888), 0x04391000 },
+	{ _MMIO(0x9888), 0x069a0034 },
+	{ _MMIO(0x9888), 0x109a0000 },
+	{ _MMIO(0x9888), 0x06bb4000 },
+	{ _MMIO(0x9888), 0x0abe0040 },
+	{ _MMIO(0x9888), 0x0cbf0984 },
+	{ _MMIO(0x9888), 0x0ebf0a02 },
+	{ _MMIO(0x9888), 0x02d94000 },
+	{ _MMIO(0x9888), 0x0cdac000 },
+	{ _MMIO(0x9888), 0x0edac000 },
+	{ _MMIO(0x9888), 0x0c9c0400 },
+	{ _MMIO(0x9888), 0x0c9dc000 },
+	{ _MMIO(0x9888), 0x0e9dc000 },
+	{ _MMIO(0x9888), 0x0c9e0400 },
+	{ _MMIO(0x9888), 0x109f02a8 },
+	{ _MMIO(0x9888), 0x0e9f0040 },
+	{ _MMIO(0x9888), 0x0cb95000 },
+	{ _MMIO(0x9888), 0x0eb95000 },
+	{ _MMIO(0x9888), 0x0f88000f },
+	{ _MMIO(0x9888), 0x0d880400 },
+	{ _MMIO(0x9888), 0x038a8000 },
+	{ _MMIO(0x9888), 0x058a8000 },
+	{ _MMIO(0x9888), 0x078a8000 },
+	{ _MMIO(0x9888), 0x098a8000 },
+	{ _MMIO(0x9888), 0x0b8a8000 },
+	{ _MMIO(0x9888), 0x258b8009 },
+	{ _MMIO(0x9888), 0x278b002a },
+	{ _MMIO(0x9888), 0x238b2a80 },
+	{ _MMIO(0x9888), 0x198c4000 },
+	{ _MMIO(0x9888), 0x1b8c0015 },
+	{ _MMIO(0x9888), 0x0d8c4000 },
+	{ _MMIO(0x9888), 0x0d8da000 },
+	{ _MMIO(0x9888), 0x0f8da000 },
+	{ _MMIO(0x9888), 0x078d2000 },
+	{ _MMIO(0x9888), 0x2185800a },
+	{ _MMIO(0x9888), 0x2385002a },
+	{ _MMIO(0x9888), 0x1f85aa00 },
+	{ _MMIO(0x9888), 0x1b830154 },
+	{ _MMIO(0x9888), 0x03834000 },
+	{ _MMIO(0x9888), 0x05834000 },
+	{ _MMIO(0x9888), 0x07834000 },
+	{ _MMIO(0x9888), 0x09834000 },
+	{ _MMIO(0x9888), 0x0b834000 },
+	{ _MMIO(0x9888), 0x0d834000 },
+	{ _MMIO(0x9888), 0x0d84c000 },
+	{ _MMIO(0x9888), 0x0f84c000 },
+	{ _MMIO(0x9888), 0x01848000 },
+	{ _MMIO(0x9888), 0x0384c000 },
+	{ _MMIO(0x9888), 0x0584c000 },
+	{ _MMIO(0x9888), 0x07844000 },
+	{ _MMIO(0x9888), 0x1d80c000 },
+	{ _MMIO(0x9888), 0x1f80c000 },
+	{ _MMIO(0x9888), 0x11808000 },
+	{ _MMIO(0x9888), 0x1380c000 },
+	{ _MMIO(0x9888), 0x1580c000 },
+	{ _MMIO(0x9888), 0x17804000 },
+	{ _MMIO(0x9888), 0x53800000 },
+	{ _MMIO(0x9888), 0x45800800 },
+	{ _MMIO(0x9888), 0x47800842 },
+	{ _MMIO(0x9888), 0x21800000 },
+	{ _MMIO(0x9888), 0x31800000 },
+	{ _MMIO(0x9888), 0x4d800000 },
+	{ _MMIO(0x9888), 0x3f801084 },
+	{ _MMIO(0x9888), 0x4f800000 },
+	{ _MMIO(0x9888), 0x41800044 },
+};
+
+static int
+get_l3_4_mux_config(struct drm_i915_private *dev_priv,
+		    const struct i915_oa_reg **regs,
+		    int *lens)
+{
+	int n = 0;
+
+	BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
+	BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
+
+	regs[n] = mux_config_l3_4;
+	lens[n] = ARRAY_SIZE(mux_config_l3_4);
+	n++;
+
+	return n;
+}
+
+static const struct i915_oa_reg b_counter_config_rasterizer_and_pixel_backend[] = {
+	{ _MMIO(0x2740), 0x00000000 },
+	{ _MMIO(0x2744), 0x00800000 },
+	{ _MMIO(0x2710), 0x00000000 },
+	{ _MMIO(0x2714), 0xf0800000 },
+	{ _MMIO(0x2720), 0x00000000 },
+	{ _MMIO(0x2724), 0x30800000 },
+	{ _MMIO(0x2770), 0x00006000 },
+	{ _MMIO(0x2774), 0x0000f3ff },
+	{ _MMIO(0x2778), 0x00001800 },
+	{ _MMIO(0x277c), 0x0000fcff },
+	{ _MMIO(0x2780), 0x00000600 },
+	{ _MMIO(0x2784), 0x0000ff3f },
+	{ _MMIO(0x2788), 0x00000180 },
+	{ _MMIO(0x278c), 0x0000ffcf },
+	{ _MMIO(0x2790), 0x00000060 },
+	{ _MMIO(0x2794), 0x0000fff3 },
+	{ _MMIO(0x2798), 0x00000018 },
+	{ _MMIO(0x279c), 0x0000fffc },
+};
+
+static const struct i915_oa_reg flex_eu_config_rasterizer_and_pixel_backend[] = {
+	{ _MMIO(0xe458), 0x00005004 },
+	{ _MMIO(0xe558), 0x00010003 },
+	{ _MMIO(0xe658), 0x00012011 },
+	{ _MMIO(0xe758), 0x00015014 },
+	{ _MMIO(0xe45c), 0x00051050 },
+	{ _MMIO(0xe55c), 0x00053052 },
+	{ _MMIO(0xe65c), 0x00055054 },
+};
+
+static const struct i915_oa_reg mux_config_rasterizer_and_pixel_backend[] = {
+	{ _MMIO(0x9888), 0x143b000e },
+	{ _MMIO(0x9888), 0x043c55c0 },
+	{ _MMIO(0x9888), 0x0a1e0280 },
+	{ _MMIO(0x9888), 0x0c1e0408 },
+	{ _MMIO(0x9888), 0x10390000 },
+	{ _MMIO(0x9888), 0x12397a1f },
+	{ _MMIO(0x9888), 0x14bb000e },
+	{ _MMIO(0x9888), 0x04bc5000 },
+	{ _MMIO(0x9888), 0x0a9e0296 },
+	{ _MMIO(0x9888), 0x0c9e0008 },
+	{ _MMIO(0x9888), 0x10b90000 },
+	{ _MMIO(0x9888), 0x12b97a1f },
+	{ _MMIO(0x9888), 0x063b0042 },
+	{ _MMIO(0x9888), 0x103b0000 },
+	{ _MMIO(0x9888), 0x083c0000 },
+	{ _MMIO(0x9888), 0x0a3e0040 },
+	{ _MMIO(0x9888), 0x043f8000 },
+	{ _MMIO(0x9888), 0x02594000 },
+	{ _MMIO(0x9888), 0x045a8000 },
+	{ _MMIO(0x9888), 0x0c1c0400 },
+	{ _MMIO(0x9888), 0x041d8000 },
+	{ _MMIO(0x9888), 0x081e02c0 },
+	{ _MMIO(0x9888), 0x0e1e0000 },
+	{ _MMIO(0x9888), 0x0c1fa800 },
+	{ _MMIO(0x9888), 0x0e1f0260 },
+	{ _MMIO(0x9888), 0x101f0014 },
+	{ _MMIO(0x9888), 0x003905e0 },
+	{ _MMIO(0x9888), 0x06390bc0 },
+	{ _MMIO(0x9888), 0x02390018 },
+	{ _MMIO(0x9888), 0x04394000 },
+	{ _MMIO(0x9888), 0x04bb0042 },
+	{ _MMIO(0x9888), 0x10bb0000 },
+	{ _MMIO(0x9888), 0x02bc05c0 },
+	{ _MMIO(0x9888), 0x08bc0000 },
+	{ _MMIO(0x9888), 0x0abe0004 },
+	{ _MMIO(0x9888), 0x02bf8000 },
+	{ _MMIO(0x9888), 0x02d91000 },
+	{ _MMIO(0x9888), 0x02da8000 },
+	{ _MMIO(0x9888), 0x089c8000 },
+	{ _MMIO(0x9888), 0x029d8000 },
+	{ _MMIO(0x9888), 0x089e8000 },
+	{ _MMIO(0x9888), 0x0e9e0000 },
+	{ _MMIO(0x9888), 0x0e9fa806 },
+	{ _MMIO(0x9888), 0x109f0142 },
+	{ _MMIO(0x9888), 0x08b90617 },
+	{ _MMIO(0x9888), 0x0ab90be0 },
+	{ _MMIO(0x9888), 0x02b94000 },
+	{ _MMIO(0x9888), 0x0d88f000 },
+	{ _MMIO(0x9888), 0x0f88000c },
+	{ _MMIO(0x9888), 0x07888000 },
+	{ _MMIO(0x9888), 0x09888000 },
+	{ _MMIO(0x9888), 0x018a8000 },
+	{ _MMIO(0x9888), 0x0f8a8000 },
+	{ _MMIO(0x9888), 0x1b8a2800 },
+	{ _MMIO(0x9888), 0x038a8000 },
+	{ _MMIO(0x9888), 0x058a8000 },
+	{ _MMIO(0x9888), 0x0b8a8000 },
+	{ _MMIO(0x9888), 0x0d8a8000 },
+	{ _MMIO(0x9888), 0x238b52a0 },
+	{ _MMIO(0x9888), 0x258b6a95 },
+	{ _MMIO(0x9888), 0x278b0029 },
+	{ _MMIO(0x9888), 0x178c2000 },
+	{ _MMIO(0x9888), 0x198c1500 },
+	{ _MMIO(0x9888), 0x1b8c0014 },
+	{ _MMIO(0x9888), 0x078c4000 },
+	{ _MMIO(0x9888), 0x098c4000 },
+	{ _MMIO(0x9888), 0x098da000 },
+	{ _MMIO(0x9888), 0x0b8da000 },
+	{ _MMIO(0x9888), 0x0f8da000 },
+	{ _MMIO(0x9888), 0x038d8000 },
+	{ _MMIO(0x9888), 0x058d2000 },
+	{ _MMIO(0x9888), 0x1f85aa80 },
+	{ _MMIO(0x9888), 0x2185aaaa },
+	{ _MMIO(0x9888), 0x2385002a },
+	{ _MMIO(0x9888), 0x01834000 },
+	{ _MMIO(0x9888), 0x0f834000 },
+	{ _MMIO(0x9888), 0x19835400 },
+	{ _MMIO(0x9888), 0x1b830155 },
+	{ _MMIO(0x9888), 0x03834000 },
+	{ _MMIO(0x9888), 0x05834000 },
+	{ _MMIO(0x9888), 0x07834000 },
+	{ _MMIO(0x9888), 0x09834000 },
+	{ _MMIO(0x9888), 0x0b834000 },
+	{ _MMIO(0x9888), 0x0d834000 },
+	{ _MMIO(0x9888), 0x0184c000 },
+	{ _MMIO(0x9888), 0x0784c000 },
+	{ _MMIO(0x9888), 0x0984c000 },
+	{ _MMIO(0x9888), 0x0b84c000 },
+	{ _MMIO(0x9888), 0x0d84c000 },
+	{ _MMIO(0x9888), 0x0f84c000 },
+	{ _MMIO(0x9888), 0x0384c000 },
+	{ _MMIO(0x9888), 0x0584c000 },
+	{ _MMIO(0x9888), 0x1180c000 },
+	{ _MMIO(0x9888), 0x1780c000 },
+	{ _MMIO(0x9888), 0x1980c000 },
+	{ _MMIO(0x9888), 0x1b80c000 },
+	{ _MMIO(0x9888), 0x1d80c000 },
+	{ _MMIO(0x9888), 0x1f80c000 },
+	{ _MMIO(0x9888), 0x1380c000 },
+	{ _MMIO(0x9888), 0x1580c000 },
+	{ _MMIO(0x9888), 0x4d800444 },
+	{ _MMIO(0x9888), 0x3d800000 },
+	{ _MMIO(0x9888), 0x4f804000 },
+	{ _MMIO(0x9888), 0x43801080 },
+	{ _MMIO(0x9888), 0x51800000 },
+	{ _MMIO(0x9888), 0x45800084 },
+	{ _MMIO(0x9888), 0x53800044 },
+	{ _MMIO(0x9888), 0x47801080 },
+	{ _MMIO(0x9888), 0x21800000 },
+	{ _MMIO(0x9888), 0x31800000 },
+	{ _MMIO(0x9888), 0x3f800000 },
+	{ _MMIO(0x9888), 0x41800840 },
+};
+
+static int
+get_rasterizer_and_pixel_backend_mux_config(struct drm_i915_private *dev_priv,
+					    const struct i915_oa_reg **regs,
+					    int *lens)
+{
+	int n = 0;
+
+	BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
+	BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
+
+	regs[n] = mux_config_rasterizer_and_pixel_backend;
+	lens[n] = ARRAY_SIZE(mux_config_rasterizer_and_pixel_backend);
+	n++;
+
+	return n;
+}
+
+static const struct i915_oa_reg b_counter_config_sampler_1[] = {
+	{ _MMIO(0x2740), 0x00000000 },
+	{ _MMIO(0x2744), 0x00800000 },
+	{ _MMIO(0x2710), 0x00000000 },
+	{ _MMIO(0x2714), 0x70800000 },
+	{ _MMIO(0x2720), 0x00000000 },
+	{ _MMIO(0x2724), 0x00800000 },
+	{ _MMIO(0x2770), 0x0000c000 },
+	{ _MMIO(0x2774), 0x0000e7ff },
+	{ _MMIO(0x2778), 0x00003000 },
+	{ _MMIO(0x277c), 0x0000f9ff },
+	{ _MMIO(0x2780), 0x00000c00 },
+	{ _MMIO(0x2784), 0x0000fe7f },
+};
+
+static const struct i915_oa_reg flex_eu_config_sampler_1[] = {
+	{ _MMIO(0xe458), 0x00005004 },
+	{ _MMIO(0xe558), 0x00010003 },
+	{ _MMIO(0xe658), 0x00012011 },
+	{ _MMIO(0xe758), 0x00015014 },
+	{ _MMIO(0xe45c), 0x00051050 },
+	{ _MMIO(0xe55c), 0x00053052 },
+	{ _MMIO(0xe65c), 0x00055054 },
+};
+
+static const struct i915_oa_reg mux_config_sampler_1[] = {
+	{ _MMIO(0x9888), 0x18921400 },
+	{ _MMIO(0x9888), 0x149500ab },
+	{ _MMIO(0x9888), 0x18b21400 },
+	{ _MMIO(0x9888), 0x14b500ab },
+	{ _MMIO(0x9888), 0x18d21400 },
+	{ _MMIO(0x9888), 0x14d500ab },
+	{ _MMIO(0x9888), 0x0cdc8000 },
+	{ _MMIO(0x9888), 0x0edc4000 },
+	{ _MMIO(0x9888), 0x02dcc000 },
+	{ _MMIO(0x9888), 0x04dcc000 },
+	{ _MMIO(0x9888), 0x1abd00a0 },
+	{ _MMIO(0x9888), 0x0abd8000 },
+	{ _MMIO(0x9888), 0x0cd88000 },
+	{ _MMIO(0x9888), 0x0ed84000 },
+	{ _MMIO(0x9888), 0x04d88000 },
+	{ _MMIO(0x9888), 0x1adb0050 },
+	{ _MMIO(0x9888), 0x04db8000 },
+	{ _MMIO(0x9888), 0x06db8000 },
+	{ _MMIO(0x9888), 0x08db8000 },
+	{ _MMIO(0x9888), 0x0adb4000 },
+	{ _MMIO(0x9888), 0x109f02a0 },
+	{ _MMIO(0x9888), 0x0c9fa000 },
+	{ _MMIO(0x9888), 0x0e9f00aa },
+	{ _MMIO(0x9888), 0x18b82500 },
+	{ _MMIO(0x9888), 0x02b88000 },
+	{ _MMIO(0x9888), 0x04b84000 },
+	{ _MMIO(0x9888), 0x06b84000 },
+	{ _MMIO(0x9888), 0x08b84000 },
+	{ _MMIO(0x9888), 0x0ab84000 },
+	{ _MMIO(0x9888), 0x0cb88000 },
+	{ _MMIO(0x9888), 0x0cb98000 },
+	{ _MMIO(0x9888), 0x0eb9a000 },
+	{ _MMIO(0x9888), 0x00b98000 },
+	{ _MMIO(0x9888), 0x02b9a000 },
+	{ _MMIO(0x9888), 0x04b9a000 },
+	{ _MMIO(0x9888), 0x06b92000 },
+	{ _MMIO(0x9888), 0x1aba0200 },
+	{ _MMIO(0x9888), 0x02ba8000 },
+	{ _MMIO(0x9888), 0x0cba8000 },
+	{ _MMIO(0x9888), 0x04908000 },
+	{ _MMIO(0x9888), 0x04918000 },
+	{ _MMIO(0x9888), 0x04927300 },
+	{ _MMIO(0x9888), 0x10920000 },
+	{ _MMIO(0x9888), 0x1893000a },
+	{ _MMIO(0x9888), 0x0a934000 },
+	{ _MMIO(0x9888), 0x0a946000 },
+	{ _MMIO(0x9888), 0x0c959000 },
+	{ _MMIO(0x9888), 0x0e950098 },
+	{ _MMIO(0x9888), 0x10950000 },
+	{ _MMIO(0x9888), 0x04b04000 },
+	{ _MMIO(0x9888), 0x04b14000 },
+	{ _MMIO(0x9888), 0x04b20073 },
+	{ _MMIO(0x9888), 0x10b20000 },
+	{ _MMIO(0x9888), 0x04b38000 },
+	{ _MMIO(0x9888), 0x06b38000 },
+	{ _MMIO(0x9888), 0x08b34000 },
+	{ _MMIO(0x9888), 0x04b4c000 },
+	{ _MMIO(0x9888), 0x02b59890 },
+	{ _MMIO(0x9888), 0x10b50000 },
+	{ _MMIO(0x9888), 0x06d04000 },
+	{ _MMIO(0x9888), 0x06d14000 },
+	{ _MMIO(0x9888), 0x06d20073 },
+	{ _MMIO(0x9888), 0x10d20000 },
+	{ _MMIO(0x9888), 0x18d30020 },
+	{ _MMIO(0x9888), 0x02d38000 },
+	{ _MMIO(0x9888), 0x0cd34000 },
+	{ _MMIO(0x9888), 0x0ad48000 },
+	{ _MMIO(0x9888), 0x04d42000 },
+	{ _MMIO(0x9888), 0x0ed59000 },
+	{ _MMIO(0x9888), 0x00d59800 },
+	{ _MMIO(0x9888), 0x10d50000 },
+	{ _MMIO(0x9888), 0x0f88000e },
+	{ _MMIO(0x9888), 0x03888000 },
+	{ _MMIO(0x9888), 0x05888000 },
+	{ _MMIO(0x9888), 0x07888000 },
+	{ _MMIO(0x9888), 0x09888000 },
+	{ _MMIO(0x9888), 0x0b888000 },
+	{ _MMIO(0x9888), 0x0d880400 },
+	{ _MMIO(0x9888), 0x278b002a },
+	{ _MMIO(0x9888), 0x238b5500 },
+	{ _MMIO(0x9888), 0x258b000a },
+	{ _MMIO(0x9888), 0x1b8c0015 },
+	{ _MMIO(0x9888), 0x038c4000 },
+	{ _MMIO(0x9888), 0x058c4000 },
+	{ _MMIO(0x9888), 0x078c4000 },
+	{ _MMIO(0x9888), 0x098c4000 },
+	{ _MMIO(0x9888), 0x0b8c4000 },
+	{ _MMIO(0x9888), 0x0d8c4000 },
+	{ _MMIO(0x9888), 0x0d8d8000 },
+	{ _MMIO(0x9888), 0x0f8da000 },
+	{ _MMIO(0x9888), 0x018d8000 },
+	{ _MMIO(0x9888), 0x038da000 },
+	{ _MMIO(0x9888), 0x058da000 },
+	{ _MMIO(0x9888), 0x078d2000 },
+	{ _MMIO(0x9888), 0x2385002a },
+	{ _MMIO(0x9888), 0x1f85aa00 },
+	{ _MMIO(0x9888), 0x2185000a },
+	{ _MMIO(0x9888), 0x1b830150 },
+	{ _MMIO(0x9888), 0x03834000 },
+	{ _MMIO(0x9888), 0x05834000 },
+	{ _MMIO(0x9888), 0x07834000 },
+	{ _MMIO(0x9888), 0x09834000 },
+	{ _MMIO(0x9888), 0x0b834000 },
+	{ _MMIO(0x9888), 0x0d834000 },
+	{ _MMIO(0x9888), 0x0d848000 },
+	{ _MMIO(0x9888), 0x0f84c000 },
+	{ _MMIO(0x9888), 0x01848000 },
+	{ _MMIO(0x9888), 0x0384c000 },
+	{ _MMIO(0x9888), 0x0584c000 },
+	{ _MMIO(0x9888), 0x07844000 },
+	{ _MMIO(0x9888), 0x1d808000 },
+	{ _MMIO(0x9888), 0x1f80c000 },
+	{ _MMIO(0x9888), 0x11808000 },
+	{ _MMIO(0x9888), 0x1380c000 },
+	{ _MMIO(0x9888), 0x1580c000 },
+	{ _MMIO(0x9888), 0x17804000 },
+	{ _MMIO(0x9888), 0x53800000 },
+	{ _MMIO(0x9888), 0x47801021 },
+	{ _MMIO(0x9888), 0x21800000 },
+	{ _MMIO(0x9888), 0x31800000 },
+	{ _MMIO(0x9888), 0x4d800000 },
+	{ _MMIO(0x9888), 0x3f800c64 },
+	{ _MMIO(0x9888), 0x4f800000 },
+	{ _MMIO(0x9888), 0x41800c02 },
+};
+
+static int
+get_sampler_1_mux_config(struct drm_i915_private *dev_priv,
+			 const struct i915_oa_reg **regs,
+			 int *lens)
+{
+	int n = 0;
+
+	BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
+	BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
+
+	regs[n] = mux_config_sampler_1;
+	lens[n] = ARRAY_SIZE(mux_config_sampler_1);
+	n++;
+
+	return n;
+}
+
+static const struct i915_oa_reg b_counter_config_sampler_2[] = {
+	{ _MMIO(0x2740), 0x00000000 },
+	{ _MMIO(0x2744), 0x00800000 },
+	{ _MMIO(0x2710), 0x00000000 },
+	{ _MMIO(0x2714), 0x70800000 },
+	{ _MMIO(0x2720), 0x00000000 },
+	{ _MMIO(0x2724), 0x00800000 },
+	{ _MMIO(0x2770), 0x0000c000 },
+	{ _MMIO(0x2774), 0x0000e7ff },
+	{ _MMIO(0x2778), 0x00003000 },
+	{ _MMIO(0x277c), 0x0000f9ff },
+	{ _MMIO(0x2780), 0x00000c00 },
+	{ _MMIO(0x2784), 0x0000fe7f },
+};
+
+static const struct i915_oa_reg flex_eu_config_sampler_2[] = {
+	{ _MMIO(0xe458), 0x00005004 },
+	{ _MMIO(0xe558), 0x00010003 },
+	{ _MMIO(0xe658), 0x00012011 },
+	{ _MMIO(0xe758), 0x00015014 },
+	{ _MMIO(0xe45c), 0x00051050 },
+	{ _MMIO(0xe55c), 0x00053052 },
+	{ _MMIO(0xe65c), 0x00055054 },
+};
+
+static const struct i915_oa_reg mux_config_sampler_2[] = {
+	{ _MMIO(0x9888), 0x18121400 },
+	{ _MMIO(0x9888), 0x141500ab },
+	{ _MMIO(0x9888), 0x18321400 },
+	{ _MMIO(0x9888), 0x143500ab },
+	{ _MMIO(0x9888), 0x18521400 },
+	{ _MMIO(0x9888), 0x145500ab },
+	{ _MMIO(0x9888), 0x0c5c8000 },
+	{ _MMIO(0x9888), 0x0e5c4000 },
+	{ _MMIO(0x9888), 0x025cc000 },
+	{ _MMIO(0x9888), 0x045cc000 },
+	{ _MMIO(0x9888), 0x1a3d00a0 },
+	{ _MMIO(0x9888), 0x0a3d8000 },
+	{ _MMIO(0x9888), 0x0c588000 },
+	{ _MMIO(0x9888), 0x0e584000 },
+	{ _MMIO(0x9888), 0x04588000 },
+	{ _MMIO(0x9888), 0x1a5b0050 },
+	{ _MMIO(0x9888), 0x045b8000 },
+	{ _MMIO(0x9888), 0x065b8000 },
+	{ _MMIO(0x9888), 0x085b8000 },
+	{ _MMIO(0x9888), 0x0a5b4000 },
+	{ _MMIO(0x9888), 0x101f02a0 },
+	{ _MMIO(0x9888), 0x0c1fa000 },
+	{ _MMIO(0x9888), 0x0e1f00aa },
+	{ _MMIO(0x9888), 0x18382500 },
+	{ _MMIO(0x9888), 0x02388000 },
+	{ _MMIO(0x9888), 0x04384000 },
+	{ _MMIO(0x9888), 0x06384000 },
+	{ _MMIO(0x9888), 0x08384000 },
+	{ _MMIO(0x9888), 0x0a384000 },
+	{ _MMIO(0x9888), 0x0c388000 },
+	{ _MMIO(0x9888), 0x0c398000 },
+	{ _MMIO(0x9888), 0x0e39a000 },
+	{ _MMIO(0x9888), 0x00398000 },
+	{ _MMIO(0x9888), 0x0239a000 },
+	{ _MMIO(0x9888), 0x0439a000 },
+	{ _MMIO(0x9888), 0x06392000 },
+	{ _MMIO(0x9888), 0x1a3a0200 },
+	{ _MMIO(0x9888), 0x023a8000 },
+	{ _MMIO(0x9888), 0x0c3a8000 },
+	{ _MMIO(0x9888), 0x04108000 },
+	{ _MMIO(0x9888), 0x04118000 },
+	{ _MMIO(0x9888), 0x04127300 },
+	{ _MMIO(0x9888), 0x10120000 },
+	{ _MMIO(0x9888), 0x1813000a },
+	{ _MMIO(0x9888), 0x0a134000 },
+	{ _MMIO(0x9888), 0x0a146000 },
+	{ _MMIO(0x9888), 0x0c159000 },
+	{ _MMIO(0x9888), 0x0e150098 },
+	{ _MMIO(0x9888), 0x10150000 },
+	{ _MMIO(0x9888), 0x04304000 },
+	{ _MMIO(0x9888), 0x04314000 },
+	{ _MMIO(0x9888), 0x04320073 },
+	{ _MMIO(0x9888), 0x10320000 },
+	{ _MMIO(0x9888), 0x04338000 },
+	{ _MMIO(0x9888), 0x06338000 },
+	{ _MMIO(0x9888), 0x08334000 },
+	{ _MMIO(0x9888), 0x0434c000 },
+	{ _MMIO(0x9888), 0x02359890 },
+	{ _MMIO(0x9888), 0x10350000 },
+	{ _MMIO(0x9888), 0x06504000 },
+	{ _MMIO(0x9888), 0x06514000 },
+	{ _MMIO(0x9888), 0x06520073 },
+	{ _MMIO(0x9888), 0x10520000 },
+	{ _MMIO(0x9888), 0x18530020 },
+	{ _MMIO(0x9888), 0x02538000 },
+	{ _MMIO(0x9888), 0x0c534000 },
+	{ _MMIO(0x9888), 0x0a548000 },
+	{ _MMIO(0x9888), 0x04542000 },
+	{ _MMIO(0x9888), 0x0e559000 },
+	{ _MMIO(0x9888), 0x00559800 },
+	{ _MMIO(0x9888), 0x10550000 },
+	{ _MMIO(0x9888), 0x1b8aa000 },
+	{ _MMIO(0x9888), 0x1d8a0002 },
+	{ _MMIO(0x9888), 0x038a8000 },
+	{ _MMIO(0x9888), 0x058a8000 },
+	{ _MMIO(0x9888), 0x078a8000 },
+	{ _MMIO(0x9888), 0x098a8000 },
+	{ _MMIO(0x9888), 0x0b8a8000 },
+	{ _MMIO(0x9888), 0x0d8a8000 },
+	{ _MMIO(0x9888), 0x278b0015 },
+	{ _MMIO(0x9888), 0x238b2a80 },
+	{ _MMIO(0x9888), 0x258b0005 },
+	{ _MMIO(0x9888), 0x2385002a },
+	{ _MMIO(0x9888), 0x1f85aa00 },
+	{ _MMIO(0x9888), 0x2185000a },
+	{ _MMIO(0x9888), 0x1b830150 },
+	{ _MMIO(0x9888), 0x03834000 },
+	{ _MMIO(0x9888), 0x05834000 },
+	{ _MMIO(0x9888), 0x07834000 },
+	{ _MMIO(0x9888), 0x09834000 },
+	{ _MMIO(0x9888), 0x0b834000 },
+	{ _MMIO(0x9888), 0x0d834000 },
+	{ _MMIO(0x9888), 0x0d848000 },
+	{ _MMIO(0x9888), 0x0f84c000 },
+	{ _MMIO(0x9888), 0x01848000 },
+	{ _MMIO(0x9888), 0x0384c000 },
+	{ _MMIO(0x9888), 0x0584c000 },
+	{ _MMIO(0x9888), 0x07844000 },
+	{ _MMIO(0x9888), 0x1d808000 },
+	{ _MMIO(0x9888), 0x1f80c000 },
+	{ _MMIO(0x9888), 0x11808000 },
+	{ _MMIO(0x9888), 0x1380c000 },
+	{ _MMIO(0x9888), 0x1580c000 },
+	{ _MMIO(0x9888), 0x17804000 },
+	{ _MMIO(0x9888), 0x53800000 },
+	{ _MMIO(0x9888), 0x47801021 },
+	{ _MMIO(0x9888), 0x21800000 },
+	{ _MMIO(0x9888), 0x31800000 },
+	{ _MMIO(0x9888), 0x4d800000 },
+	{ _MMIO(0x9888), 0x3f800c64 },
+	{ _MMIO(0x9888), 0x4f800000 },
+	{ _MMIO(0x9888), 0x41800c02 },
+};
+
+static int
+get_sampler_2_mux_config(struct drm_i915_private *dev_priv,
+			 const struct i915_oa_reg **regs,
+			 int *lens)
+{
+	int n = 0;
+
+	BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
+	BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
+
+	regs[n] = mux_config_sampler_2;
+	lens[n] = ARRAY_SIZE(mux_config_sampler_2);
+	n++;
+
+	return n;
+}
+
+static const struct i915_oa_reg b_counter_config_tdl_1[] = {
+	{ _MMIO(0x2740), 0x00000000 },
+	{ _MMIO(0x2744), 0x00800000 },
+	{ _MMIO(0x2710), 0x00000000 },
+	{ _MMIO(0x2714), 0xf0800000 },
+	{ _MMIO(0x2720), 0x00000000 },
+	{ _MMIO(0x2724), 0x30800000 },
+	{ _MMIO(0x2770), 0x00000002 },
+	{ _MMIO(0x2774), 0x0000fdff },
+	{ _MMIO(0x2778), 0x00000000 },
+	{ _MMIO(0x277c), 0x0000fe7f },
+	{ _MMIO(0x2780), 0x00000002 },
+	{ _MMIO(0x2784), 0x0000ffbf },
+	{ _MMIO(0x2788), 0x00000000 },
+	{ _MMIO(0x278c), 0x0000ffcf },
+	{ _MMIO(0x2790), 0x00000002 },
+	{ _MMIO(0x2794), 0x0000fff7 },
+	{ _MMIO(0x2798), 0x00000000 },
+	{ _MMIO(0x279c), 0x0000fff9 },
+};
+
+static const struct i915_oa_reg flex_eu_config_tdl_1[] = {
+	{ _MMIO(0xe458), 0x00005004 },
+	{ _MMIO(0xe558), 0x00010003 },
+	{ _MMIO(0xe658), 0x00012011 },
+	{ _MMIO(0xe758), 0x00015014 },
+	{ _MMIO(0xe45c), 0x00051050 },
+	{ _MMIO(0xe55c), 0x00053052 },
+	{ _MMIO(0xe65c), 0x00055054 },
+};
+
+static const struct i915_oa_reg mux_config_tdl_1[] = {
+	{ _MMIO(0x9888), 0x16154d60 },
+	{ _MMIO(0x9888), 0x16352e60 },
+	{ _MMIO(0x9888), 0x16554d60 },
+	{ _MMIO(0x9888), 0x16950000 },
+	{ _MMIO(0x9888), 0x16b50000 },
+	{ _MMIO(0x9888), 0x16d50000 },
+	{ _MMIO(0x9888), 0x005c8000 },
+	{ _MMIO(0x9888), 0x045cc000 },
+	{ _MMIO(0x9888), 0x065c4000 },
+	{ _MMIO(0x9888), 0x083d8000 },
+	{ _MMIO(0x9888), 0x0a3d8000 },
+	{ _MMIO(0x9888), 0x0458c000 },
+	{ _MMIO(0x9888), 0x025b8000 },
+	{ _MMIO(0x9888), 0x085b4000 },
+	{ _MMIO(0x9888), 0x0a5b4000 },
+	{ _MMIO(0x9888), 0x0c5b8000 },
+	{ _MMIO(0x9888), 0x0c1fa000 },
+	{ _MMIO(0x9888), 0x0e1f00aa },
+	{ _MMIO(0x9888), 0x02384000 },
+	{ _MMIO(0x9888), 0x04388000 },
+	{ _MMIO(0x9888), 0x06388000 },
+	{ _MMIO(0x9888), 0x08384000 },
+	{ _MMIO(0x9888), 0x0a384000 },
+	{ _MMIO(0x9888), 0x0c384000 },
+	{ _MMIO(0x9888), 0x00398000 },
+	{ _MMIO(0x9888), 0x0239a000 },
+	{ _MMIO(0x9888), 0x0439a000 },
+	{ _MMIO(0x9888), 0x06392000 },
+	{ _MMIO(0x9888), 0x043a8000 },
+	{ _MMIO(0x9888), 0x063a8000 },
+	{ _MMIO(0x9888), 0x08138000 },
+	{ _MMIO(0x9888), 0x0a138000 },
+	{ _MMIO(0x9888), 0x06143000 },
+	{ _MMIO(0x9888), 0x0415cfc7 },
+	{ _MMIO(0x9888), 0x10150000 },
+	{ _MMIO(0x9888), 0x02338000 },
+	{ _MMIO(0x9888), 0x0c338000 },
+	{ _MMIO(0x9888), 0x04342000 },
+	{ _MMIO(0x9888), 0x06344000 },
+	{ _MMIO(0x9888), 0x0035c700 },
+	{ _MMIO(0x9888), 0x063500cf },
+	{ _MMIO(0x9888), 0x10350000 },
+	{ _MMIO(0x9888), 0x04538000 },
+	{ _MMIO(0x9888), 0x06538000 },
+	{ _MMIO(0x9888), 0x0454c000 },
+	{ _MMIO(0x9888), 0x0255cfc7 },
+	{ _MMIO(0x9888), 0x10550000 },
+	{ _MMIO(0x9888), 0x06dc8000 },
+	{ _MMIO(0x9888), 0x08dc4000 },
+	{ _MMIO(0x9888), 0x0cdcc000 },
+	{ _MMIO(0x9888), 0x0edcc000 },
+	{ _MMIO(0x9888), 0x1abd00a8 },
+	{ _MMIO(0x9888), 0x0cd8c000 },
+	{ _MMIO(0x9888), 0x0ed84000 },
+	{ _MMIO(0x9888), 0x0edb8000 },
+	{ _MMIO(0x9888), 0x18db0800 },
+	{ _MMIO(0x9888), 0x1adb0254 },
+	{ _MMIO(0x9888), 0x0e9faa00 },
+	{ _MMIO(0x9888), 0x109f02aa },
+	{ _MMIO(0x9888), 0x0eb84000 },
+	{ _MMIO(0x9888), 0x16b84000 },
+	{ _MMIO(0x9888), 0x18b8156a },
+	{ _MMIO(0x9888), 0x06b98000 },
+	{ _MMIO(0x9888), 0x08b9a000 },
+	{ _MMIO(0x9888), 0x0ab9a000 },
+	{ _MMIO(0x9888), 0x0cb9a000 },
+	{ _MMIO(0x9888), 0x0eb9a000 },
+	{ _MMIO(0x9888), 0x18baa000 },
+	{ _MMIO(0x9888), 0x1aba0002 },
+	{ _MMIO(0x9888), 0x16934000 },
+	{ _MMIO(0x9888), 0x1893000a },
+	{ _MMIO(0x9888), 0x0a947000 },
+	{ _MMIO(0x9888), 0x0c95c5c1 },
+	{ _MMIO(0x9888), 0x0e9500c3 },
+	{ _MMIO(0x9888), 0x10950000 },
+	{ _MMIO(0x9888), 0x0eb38000 },
+	{ _MMIO(0x9888), 0x16b30040 },
+	{ _MMIO(0x9888), 0x18b30020 },
+	{ _MMIO(0x9888), 0x06b48000 },
+	{ _MMIO(0x9888), 0x08b41000 },
+	{ _MMIO(0x9888), 0x0ab48000 },
+	{ _MMIO(0x9888), 0x06b5c500 },
+	{ _MMIO(0x9888), 0x08b500c3 },
+	{ _MMIO(0x9888), 0x0eb5c100 },
+	{ _MMIO(0x9888), 0x10b50000 },
+	{ _MMIO(0x9888), 0x16d31500 },
+	{ _MMIO(0x9888), 0x08d4e000 },
+	{ _MMIO(0x9888), 0x08d5c100 },
+	{ _MMIO(0x9888), 0x0ad5c3c5 },
+	{ _MMIO(0x9888), 0x10d50000 },
+	{ _MMIO(0x9888), 0x0d88f800 },
+	{ _MMIO(0x9888), 0x0f88000f },
+	{ _MMIO(0x9888), 0x038a8000 },
+	{ _MMIO(0x9888), 0x058a8000 },
+	{ _MMIO(0x9888), 0x078a8000 },
+	{ _MMIO(0x9888), 0x098a8000 },
+	{ _MMIO(0x9888), 0x0b8a8000 },
+	{ _MMIO(0x9888), 0x0d8a8000 },
+	{ _MMIO(0x9888), 0x258baaa5 },
+	{ _MMIO(0x9888), 0x278b002a },
+	{ _MMIO(0x9888), 0x238b2a80 },
+	{ _MMIO(0x9888), 0x0f8c4000 },
+	{ _MMIO(0x9888), 0x178c2000 },
+	{ _MMIO(0x9888), 0x198c5500 },
+	{ _MMIO(0x9888), 0x1b8c0015 },
+	{ _MMIO(0x9888), 0x078d8000 },
+	{ _MMIO(0x9888), 0x098da000 },
+	{ _MMIO(0x9888), 0x0b8da000 },
+	{ _MMIO(0x9888), 0x0d8da000 },
+	{ _MMIO(0x9888), 0x0f8da000 },
+	{ _MMIO(0x9888), 0x2185aaaa },
+	{ _MMIO(0x9888), 0x2385002a },
+	{ _MMIO(0x9888), 0x1f85aa00 },
+	{ _MMIO(0x9888), 0x0f834000 },
+	{ _MMIO(0x9888), 0x19835400 },
+	{ _MMIO(0x9888), 0x1b830155 },
+	{ _MMIO(0x9888), 0x03834000 },
+	{ _MMIO(0x9888), 0x05834000 },
+	{ _MMIO(0x9888), 0x07834000 },
+	{ _MMIO(0x9888), 0x09834000 },
+	{ _MMIO(0x9888), 0x0b834000 },
+	{ _MMIO(0x9888), 0x0d834000 },
+	{ _MMIO(0x9888), 0x0784c000 },
+	{ _MMIO(0x9888), 0x0984c000 },
+	{ _MMIO(0x9888), 0x0b84c000 },
+	{ _MMIO(0x9888), 0x0d84c000 },
+	{ _MMIO(0x9888), 0x0f84c000 },
+	{ _MMIO(0x9888), 0x01848000 },
+	{ _MMIO(0x9888), 0x0384c000 },
+	{ _MMIO(0x9888), 0x0584c000 },
+	{ _MMIO(0x9888), 0x1780c000 },
+	{ _MMIO(0x9888), 0x1980c000 },
+	{ _MMIO(0x9888), 0x1b80c000 },
+	{ _MMIO(0x9888), 0x1d80c000 },
+	{ _MMIO(0x9888), 0x1f80c000 },
+	{ _MMIO(0x9888), 0x11808000 },
+	{ _MMIO(0x9888), 0x1380c000 },
+	{ _MMIO(0x9888), 0x1580c000 },
+	{ _MMIO(0x9888), 0x4f800000 },
+	{ _MMIO(0x9888), 0x43800c42 },
+	{ _MMIO(0x9888), 0x51800000 },
+	{ _MMIO(0x9888), 0x45800063 },
+	{ _MMIO(0x9888), 0x53800000 },
+	{ _MMIO(0x9888), 0x47800800 },
+	{ _MMIO(0x9888), 0x21800000 },
+	{ _MMIO(0x9888), 0x31800000 },
+	{ _MMIO(0x9888), 0x4d800000 },
+	{ _MMIO(0x9888), 0x3f8014a4 },
+	{ _MMIO(0x9888), 0x41801042 },
+};
+
+static int
+get_tdl_1_mux_config(struct drm_i915_private *dev_priv,
+		     const struct i915_oa_reg **regs,
+		     int *lens)
+{
+	int n = 0;
+
+	BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
+	BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
+
+	regs[n] = mux_config_tdl_1;
+	lens[n] = ARRAY_SIZE(mux_config_tdl_1);
+	n++;
+
+	return n;
+}
+
+static const struct i915_oa_reg b_counter_config_tdl_2[] = {
+	{ _MMIO(0x2740), 0x00000000 },
+	{ _MMIO(0x2744), 0x00800000 },
+	{ _MMIO(0x2710), 0x00000000 },
+	{ _MMIO(0x2714), 0xf0800000 },
+	{ _MMIO(0x2720), 0x00000000 },
+	{ _MMIO(0x2724), 0x30800000 },
+	{ _MMIO(0x2770), 0x00000002 },
+	{ _MMIO(0x2774), 0x0000fdff },
+	{ _MMIO(0x2778), 0x00000000 },
+	{ _MMIO(0x277c), 0x0000fe7f },
+	{ _MMIO(0x2780), 0x00000000 },
+	{ _MMIO(0x2784), 0x0000ff9f },
+	{ _MMIO(0x2788), 0x00000000 },
+	{ _MMIO(0x278c), 0x0000ffe7 },
+	{ _MMIO(0x2790), 0x00000002 },
+	{ _MMIO(0x2794), 0x0000fffb },
+	{ _MMIO(0x2798), 0x00000002 },
+	{ _MMIO(0x279c), 0x0000fffd },
+};
+
+static const struct i915_oa_reg flex_eu_config_tdl_2[] = {
+	{ _MMIO(0xe458), 0x00005004 },
+	{ _MMIO(0xe558), 0x00010003 },
+	{ _MMIO(0xe658), 0x00012011 },
+	{ _MMIO(0xe758), 0x00015014 },
+	{ _MMIO(0xe45c), 0x00051050 },
+	{ _MMIO(0xe55c), 0x00053052 },
+	{ _MMIO(0xe65c), 0x00055054 },
+};
+
+static const struct i915_oa_reg mux_config_tdl_2[] = {
+	{ _MMIO(0x9888), 0x16150000 },
+	{ _MMIO(0x9888), 0x16350000 },
+	{ _MMIO(0x9888), 0x16550000 },
+	{ _MMIO(0x9888), 0x16952e60 },
+	{ _MMIO(0x9888), 0x16b54d60 },
+	{ _MMIO(0x9888), 0x16d52e60 },
+	{ _MMIO(0x9888), 0x065c8000 },
+	{ _MMIO(0x9888), 0x085cc000 },
+	{ _MMIO(0x9888), 0x0a5cc000 },
+	{ _MMIO(0x9888), 0x0c5c4000 },
+	{ _MMIO(0x9888), 0x0e3d8000 },
+	{ _MMIO(0x9888), 0x183da000 },
+	{ _MMIO(0x9888), 0x06588000 },
+	{ _MMIO(0x9888), 0x08588000 },
+	{ _MMIO(0x9888), 0x0a584000 },
+	{ _MMIO(0x9888), 0x0e5b4000 },
+	{ _MMIO(0x9888), 0x185b5800 },
+	{ _MMIO(0x9888), 0x1a5b000a },
+	{ _MMIO(0x9888), 0x0e1faa00 },
+	{ _MMIO(0x9888), 0x101f02aa },
+	{ _MMIO(0x9888), 0x0e384000 },
+	{ _MMIO(0x9888), 0x16384000 },
+	{ _MMIO(0x9888), 0x18382a55 },
+	{ _MMIO(0x9888), 0x06398000 },
+	{ _MMIO(0x9888), 0x0839a000 },
+	{ _MMIO(0x9888), 0x0a39a000 },
+	{ _MMIO(0x9888), 0x0c39a000 },
+	{ _MMIO(0x9888), 0x0e39a000 },
+	{ _MMIO(0x9888), 0x1a3a02a0 },
+	{ _MMIO(0x9888), 0x0e138000 },
+	{ _MMIO(0x9888), 0x16130500 },
+	{ _MMIO(0x9888), 0x06148000 },
+	{ _MMIO(0x9888), 0x08146000 },
+	{ _MMIO(0x9888), 0x0615c100 },
+	{ _MMIO(0x9888), 0x0815c500 },
+	{ _MMIO(0x9888), 0x0a1500c3 },
+	{ _MMIO(0x9888), 0x10150000 },
+	{ _MMIO(0x9888), 0x16335040 },
+	{ _MMIO(0x9888), 0x08349000 },
+	{ _MMIO(0x9888), 0x0a341000 },
+	{ _MMIO(0x9888), 0x083500c1 },
+	{ _MMIO(0x9888), 0x0a35c500 },
+	{ _MMIO(0x9888), 0x0c3500c3 },
+	{ _MMIO(0x9888), 0x10350000 },
+	{ _MMIO(0x9888), 0x1853002a },
+	{ _MMIO(0x9888), 0x0a54e000 },
+	{ _MMIO(0x9888), 0x0c55c500 },
+	{ _MMIO(0x9888), 0x0e55c1c3 },
+	{ _MMIO(0x9888), 0x10550000 },
+	{ _MMIO(0x9888), 0x00dc8000 },
+	{ _MMIO(0x9888), 0x02dcc000 },
+	{ _MMIO(0x9888), 0x04dc4000 },
+	{ _MMIO(0x9888), 0x04bd8000 },
+	{ _MMIO(0x9888), 0x06bd8000 },
+	{ _MMIO(0x9888), 0x02d8c000 },
+	{ _MMIO(0x9888), 0x02db8000 },
+	{ _MMIO(0x9888), 0x04db4000 },
+	{ _MMIO(0x9888), 0x06db4000 },
+	{ _MMIO(0x9888), 0x08db8000 },
+	{ _MMIO(0x9888), 0x0c9fa000 },
+	{ _MMIO(0x9888), 0x0e9f00aa },
+	{ _MMIO(0x9888), 0x02b84000 },
+	{ _MMIO(0x9888), 0x04b84000 },
+	{ _MMIO(0x9888), 0x06b84000 },
+	{ _MMIO(0x9888), 0x08b84000 },
+	{ _MMIO(0x9888), 0x0ab88000 },
+	{ _MMIO(0x9888), 0x0cb88000 },
+	{ _MMIO(0x9888), 0x00b98000 },
+	{ _MMIO(0x9888), 0x02b9a000 },
+	{ _MMIO(0x9888), 0x04b9a000 },
+	{ _MMIO(0x9888), 0x06b92000 },
+	{ _MMIO(0x9888), 0x0aba8000 },
+	{ _MMIO(0x9888), 0x0cba8000 },
+	{ _MMIO(0x9888), 0x04938000 },
+	{ _MMIO(0x9888), 0x06938000 },
+	{ _MMIO(0x9888), 0x0494c000 },
+	{ _MMIO(0x9888), 0x0295cfc7 },
+	{ _MMIO(0x9888), 0x10950000 },
+	{ _MMIO(0x9888), 0x02b38000 },
+	{ _MMIO(0x9888), 0x08b38000 },
+	{ _MMIO(0x9888), 0x04b42000 },
+	{ _MMIO(0x9888), 0x06b41000 },
+	{ _MMIO(0x9888), 0x00b5c700 },
+	{ _MMIO(0x9888), 0x04b500cf },
+	{ _MMIO(0x9888), 0x10b50000 },
+	{ _MMIO(0x9888), 0x0ad38000 },
+	{ _MMIO(0x9888), 0x0cd38000 },
+	{ _MMIO(0x9888), 0x06d46000 },
+	{ _MMIO(0x9888), 0x04d5c700 },
+	{ _MMIO(0x9888), 0x06d500cf },
+	{ _MMIO(0x9888), 0x10d50000 },
+	{ _MMIO(0x9888), 0x03888000 },
+	{ _MMIO(0x9888), 0x05888000 },
+	{ _MMIO(0x9888), 0x07888000 },
+	{ _MMIO(0x9888), 0x09888000 },
+	{ _MMIO(0x9888), 0x0b888000 },
+	{ _MMIO(0x9888), 0x0d880400 },
+	{ _MMIO(0x9888), 0x0f8a8000 },
+	{ _MMIO(0x9888), 0x198a8000 },
+	{ _MMIO(0x9888), 0x1b8aaaa0 },
+	{ _MMIO(0x9888), 0x1d8a0002 },
+	{ _MMIO(0x9888), 0x258b555a },
+	{ _MMIO(0x9888), 0x278b0015 },
+	{ _MMIO(0x9888), 0x238b5500 },
+	{ _MMIO(0x9888), 0x038c4000 },
+	{ _MMIO(0x9888), 0x058c4000 },
+	{ _MMIO(0x9888), 0x078c4000 },
+	{ _MMIO(0x9888), 0x098c4000 },
+	{ _MMIO(0x9888), 0x0b8c4000 },
+	{ _MMIO(0x9888), 0x0d8c4000 },
+	{ _MMIO(0x9888), 0x018d8000 },
+	{ _MMIO(0x9888), 0x038da000 },
+	{ _MMIO(0x9888), 0x058da000 },
+	{ _MMIO(0x9888), 0x078d2000 },
+	{ _MMIO(0x9888), 0x2185aaaa },
+	{ _MMIO(0x9888), 0x2385002a },
+	{ _MMIO(0x9888), 0x1f85aa00 },
+	{ _MMIO(0x9888), 0x0f834000 },
+	{ _MMIO(0x9888), 0x19835400 },
+	{ _MMIO(0x9888), 0x1b830155 },
+	{ _MMIO(0x9888), 0x03834000 },
+	{ _MMIO(0x9888), 0x05834000 },
+	{ _MMIO(0x9888), 0x07834000 },
+	{ _MMIO(0x9888), 0x09834000 },
+	{ _MMIO(0x9888), 0x0b834000 },
+	{ _MMIO(0x9888), 0x0d834000 },
+	{ _MMIO(0x9888), 0x0784c000 },
+	{ _MMIO(0x9888), 0x0984c000 },
+	{ _MMIO(0x9888), 0x0b84c000 },
+	{ _MMIO(0x9888), 0x0d84c000 },
+	{ _MMIO(0x9888), 0x0f84c000 },
+	{ _MMIO(0x9888), 0x01848000 },
+	{ _MMIO(0x9888), 0x0384c000 },
+	{ _MMIO(0x9888), 0x0584c000 },
+	{ _MMIO(0x9888), 0x1780c000 },
+	{ _MMIO(0x9888), 0x1980c000 },
+	{ _MMIO(0x9888), 0x1b80c000 },
+	{ _MMIO(0x9888), 0x1d80c000 },
+	{ _MMIO(0x9888), 0x1f80c000 },
+	{ _MMIO(0x9888), 0x11808000 },
+	{ _MMIO(0x9888), 0x1380c000 },
+	{ _MMIO(0x9888), 0x1580c000 },
+	{ _MMIO(0x9888), 0x4f800000 },
+	{ _MMIO(0x9888), 0x43800882 },
+	{ _MMIO(0x9888), 0x51800000 },
+	{ _MMIO(0x9888), 0x45801082 },
+	{ _MMIO(0x9888), 0x53800000 },
+	{ _MMIO(0x9888), 0x478014a5 },
+	{ _MMIO(0x9888), 0x21800000 },
+	{ _MMIO(0x9888), 0x31800000 },
+	{ _MMIO(0x9888), 0x4d800000 },
+	{ _MMIO(0x9888), 0x3f800002 },
+	{ _MMIO(0x9888), 0x41800c62 },
+};
+
+static int
+get_tdl_2_mux_config(struct drm_i915_private *dev_priv,
+		     const struct i915_oa_reg **regs,
+		     int *lens)
+{
+	int n = 0;
+
+	BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
+	BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
+
+	regs[n] = mux_config_tdl_2;
+	lens[n] = ARRAY_SIZE(mux_config_tdl_2);
+	n++;
+
+	return n;
+}
+
+static const struct i915_oa_reg b_counter_config_compute_extra[] = {
+	{ _MMIO(0x2740), 0x00000000 },
+	{ _MMIO(0x2744), 0x00800000 },
+	{ _MMIO(0x2710), 0x00000000 },
+	{ _MMIO(0x2714), 0x00800000 },
+	{ _MMIO(0x2720), 0x00000000 },
+	{ _MMIO(0x2724), 0x00800000 },
+};
+
+static const struct i915_oa_reg flex_eu_config_compute_extra[] = {
+	{ _MMIO(0xe458), 0x00001000 },
+	{ _MMIO(0xe558), 0x00003002 },
+	{ _MMIO(0xe658), 0x00005004 },
+	{ _MMIO(0xe758), 0x00011010 },
+	{ _MMIO(0xe45c), 0x00050012 },
+	{ _MMIO(0xe55c), 0x00052051 },
+	{ _MMIO(0xe65c), 0x00000008 },
+};
+
+static const struct i915_oa_reg mux_config_compute_extra[] = {
+	{ _MMIO(0x9888), 0x161503e0 },
+	{ _MMIO(0x9888), 0x163503e0 },
+	{ _MMIO(0x9888), 0x165503e0 },
+	{ _MMIO(0x9888), 0x169503e0 },
+	{ _MMIO(0x9888), 0x16b503e0 },
+	{ _MMIO(0x9888), 0x16d503e0 },
+	{ _MMIO(0x9888), 0x045cc000 },
+	{ _MMIO(0x9888), 0x083d8000 },
+	{ _MMIO(0x9888), 0x04584000 },
+	{ _MMIO(0x9888), 0x085b4000 },
+	{ _MMIO(0x9888), 0x0a5b8000 },
+	{ _MMIO(0x9888), 0x0e1f00a8 },
+	{ _MMIO(0x9888), 0x08384000 },
+	{ _MMIO(0x9888), 0x0a384000 },
+	{ _MMIO(0x9888), 0x0c388000 },
+	{ _MMIO(0x9888), 0x0439a000 },
+	{ _MMIO(0x9888), 0x06392000 },
+	{ _MMIO(0x9888), 0x0c3a8000 },
+	{ _MMIO(0x9888), 0x08138000 },
+	{ _MMIO(0x9888), 0x06141000 },
+	{ _MMIO(0x9888), 0x041500c3 },
+	{ _MMIO(0x9888), 0x10150000 },
+	{ _MMIO(0x9888), 0x0a338000 },
+	{ _MMIO(0x9888), 0x06342000 },
+	{ _MMIO(0x9888), 0x0435c300 },
+	{ _MMIO(0x9888), 0x10350000 },
+	{ _MMIO(0x9888), 0x0c538000 },
+	{ _MMIO(0x9888), 0x06544000 },
+	{ _MMIO(0x9888), 0x065500c3 },
+	{ _MMIO(0x9888), 0x10550000 },
+	{ _MMIO(0x9888), 0x00dc8000 },
+	{ _MMIO(0x9888), 0x02dc4000 },
+	{ _MMIO(0x9888), 0x02bd8000 },
+	{ _MMIO(0x9888), 0x00d88000 },
+	{ _MMIO(0x9888), 0x02db4000 },
+	{ _MMIO(0x9888), 0x04db8000 },
+	{ _MMIO(0x9888), 0x0c9fa000 },
+	{ _MMIO(0x9888), 0x0e9f0002 },
+	{ _MMIO(0x9888), 0x02b84000 },
+	{ _MMIO(0x9888), 0x04b84000 },
+	{ _MMIO(0x9888), 0x06b88000 },
+	{ _MMIO(0x9888), 0x00b98000 },
+	{ _MMIO(0x9888), 0x02b9a000 },
+	{ _MMIO(0x9888), 0x06ba8000 },
+	{ _MMIO(0x9888), 0x02938000 },
+	{ _MMIO(0x9888), 0x04942000 },
+	{ _MMIO(0x9888), 0x0095c300 },
+	{ _MMIO(0x9888), 0x10950000 },
+	{ _MMIO(0x9888), 0x04b38000 },
+	{ _MMIO(0x9888), 0x04b44000 },
+	{ _MMIO(0x9888), 0x02b500c3 },
+	{ _MMIO(0x9888), 0x10b50000 },
+	{ _MMIO(0x9888), 0x06d38000 },
+	{ _MMIO(0x9888), 0x04d48000 },
+	{ _MMIO(0x9888), 0x02d5c300 },
+	{ _MMIO(0x9888), 0x10d50000 },
+	{ _MMIO(0x9888), 0x03888000 },
+	{ _MMIO(0x9888), 0x05888000 },
+	{ _MMIO(0x9888), 0x07888000 },
+	{ _MMIO(0x9888), 0x098a8000 },
+	{ _MMIO(0x9888), 0x0b8a8000 },
+	{ _MMIO(0x9888), 0x0d8a8000 },
+	{ _MMIO(0x9888), 0x238b3500 },
+	{ _MMIO(0x9888), 0x258b0005 },
+	{ _MMIO(0x9888), 0x038c4000 },
+	{ _MMIO(0x9888), 0x058c4000 },
+	{ _MMIO(0x9888), 0x078c4000 },
+	{ _MMIO(0x9888), 0x018d8000 },
+	{ _MMIO(0x9888), 0x038da000 },
+	{ _MMIO(0x9888), 0x1f85aa00 },
+	{ _MMIO(0x9888), 0x2185000a },
+	{ _MMIO(0x9888), 0x03834000 },
+	{ _MMIO(0x9888), 0x05834000 },
+	{ _MMIO(0x9888), 0x07834000 },
+	{ _MMIO(0x9888), 0x09834000 },
+	{ _MMIO(0x9888), 0x0b834000 },
+	{ _MMIO(0x9888), 0x0d834000 },
+	{ _MMIO(0x9888), 0x01848000 },
+	{ _MMIO(0x9888), 0x0384c000 },
+	{ _MMIO(0x9888), 0x0584c000 },
+	{ _MMIO(0x9888), 0x07844000 },
+	{ _MMIO(0x9888), 0x11808000 },
+	{ _MMIO(0x9888), 0x1380c000 },
+	{ _MMIO(0x9888), 0x1580c000 },
+	{ _MMIO(0x9888), 0x17804000 },
+	{ _MMIO(0x9888), 0x21800000 },
+	{ _MMIO(0x9888), 0x4d800000 },
+	{ _MMIO(0x9888), 0x3f800c40 },
+	{ _MMIO(0x9888), 0x4f800000 },
+	{ _MMIO(0x9888), 0x41801482 },
+	{ _MMIO(0x9888), 0x31800000 },
+};
+
+static int
+get_compute_extra_mux_config(struct drm_i915_private *dev_priv,
+			     const struct i915_oa_reg **regs,
+			     int *lens)
+{
+	int n = 0;
+
+	BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
+	BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
+
+	regs[n] = mux_config_compute_extra;
+	lens[n] = ARRAY_SIZE(mux_config_compute_extra);
+	n++;
+
+	return n;
+}
+
+static const struct i915_oa_reg b_counter_config_vme_pipe[] = {
+	{ _MMIO(0x2740), 0x00000000 },
+	{ _MMIO(0x2710), 0x00000000 },
+	{ _MMIO(0x2714), 0xf0800000 },
+	{ _MMIO(0x2720), 0x00000000 },
+	{ _MMIO(0x2724), 0x30800000 },
+	{ _MMIO(0x2770), 0x00100030 },
+	{ _MMIO(0x2774), 0x0000fff9 },
+	{ _MMIO(0x2778), 0x00000002 },
+	{ _MMIO(0x277c), 0x0000fffc },
+	{ _MMIO(0x2780), 0x00000002 },
+	{ _MMIO(0x2784), 0x0000fff3 },
+	{ _MMIO(0x2788), 0x00100180 },
+	{ _MMIO(0x278c), 0x0000ffcf },
+	{ _MMIO(0x2790), 0x00000002 },
+	{ _MMIO(0x2794), 0x0000ffcf },
+	{ _MMIO(0x2798), 0x00000002 },
+	{ _MMIO(0x279c), 0x0000ff3f },
+};
+
+static const struct i915_oa_reg flex_eu_config_vme_pipe[] = {
+	{ _MMIO(0xe458), 0x00005004 },
+	{ _MMIO(0xe558), 0x00008003 },
+};
+
+static const struct i915_oa_reg mux_config_vme_pipe[] = {
+	{ _MMIO(0x9888), 0x14100812 },
+	{ _MMIO(0x9888), 0x14125800 },
+	{ _MMIO(0x9888), 0x161200c0 },
+	{ _MMIO(0x9888), 0x14300812 },
+	{ _MMIO(0x9888), 0x14325800 },
+	{ _MMIO(0x9888), 0x163200c0 },
+	{ _MMIO(0x9888), 0x005c4000 },
+	{ _MMIO(0x9888), 0x065c8000 },
+	{ _MMIO(0x9888), 0x085cc000 },
+	{ _MMIO(0x9888), 0x0a5cc000 },
+	{ _MMIO(0x9888), 0x0c5cc000 },
+	{ _MMIO(0x9888), 0x003d8000 },
+	{ _MMIO(0x9888), 0x0e3d8000 },
+	{ _MMIO(0x9888), 0x183d2800 },
+	{ _MMIO(0x9888), 0x00584000 },
+	{ _MMIO(0x9888), 0x06588000 },
+	{ _MMIO(0x9888), 0x0858c000 },
+	{ _MMIO(0x9888), 0x005b4000 },
+	{ _MMIO(0x9888), 0x0e5b4000 },
+	{ _MMIO(0x9888), 0x185b9400 },
+	{ _MMIO(0x9888), 0x1a5b002a },
+	{ _MMIO(0x9888), 0x0c1f0800 },
+	{ _MMIO(0x9888), 0x0e1faa00 },
+	{ _MMIO(0x9888), 0x101f002a },
+	{ _MMIO(0x9888), 0x00384000 },
+	{ _MMIO(0x9888), 0x0e384000 },
+	{ _MMIO(0x9888), 0x16384000 },
+	{ _MMIO(0x9888), 0x18380155 },
+	{ _MMIO(0x9888), 0x00392000 },
+	{ _MMIO(0x9888), 0x06398000 },
+	{ _MMIO(0x9888), 0x0839a000 },
+	{ _MMIO(0x9888), 0x0a39a000 },
+	{ _MMIO(0x9888), 0x0c39a000 },
+	{ _MMIO(0x9888), 0x00100047 },
+	{ _MMIO(0x9888), 0x06101a80 },
+	{ _MMIO(0x9888), 0x10100000 },
+	{ _MMIO(0x9888), 0x0810c000 },
+	{ _MMIO(0x9888), 0x0811c000 },
+	{ _MMIO(0x9888), 0x08126151 },
+	{ _MMIO(0x9888), 0x10120000 },
+	{ _MMIO(0x9888), 0x00134000 },
+	{ _MMIO(0x9888), 0x0e134000 },
+	{ _MMIO(0x9888), 0x161300a0 },
+	{ _MMIO(0x9888), 0x0a301ac7 },
+	{ _MMIO(0x9888), 0x10300000 },
+	{ _MMIO(0x9888), 0x0c30c000 },
+	{ _MMIO(0x9888), 0x0c31c000 },
+	{ _MMIO(0x9888), 0x0c326151 },
+	{ _MMIO(0x9888), 0x10320000 },
+	{ _MMIO(0x9888), 0x16332a00 },
+	{ _MMIO(0x9888), 0x18330001 },
+	{ _MMIO(0x9888), 0x018a8000 },
+	{ _MMIO(0x9888), 0x0f8a8000 },
+	{ _MMIO(0x9888), 0x198a8000 },
+	{ _MMIO(0x9888), 0x1b8a2aa0 },
+	{ _MMIO(0x9888), 0x238b0020 },
+	{ _MMIO(0x9888), 0x258b5550 },
+	{ _MMIO(0x9888), 0x278b0001 },
+	{ _MMIO(0x9888), 0x1f850080 },
+	{ _MMIO(0x9888), 0x2185aaa0 },
+	{ _MMIO(0x9888), 0x23850002 },
+	{ _MMIO(0x9888), 0x01834000 },
+	{ _MMIO(0x9888), 0x0f834000 },
+	{ _MMIO(0x9888), 0x19835400 },
+	{ _MMIO(0x9888), 0x1b830015 },
+	{ _MMIO(0x9888), 0x01844000 },
+	{ _MMIO(0x9888), 0x07848000 },
+	{ _MMIO(0x9888), 0x0984c000 },
+	{ _MMIO(0x9888), 0x0b84c000 },
+	{ _MMIO(0x9888), 0x0d84c000 },
+	{ _MMIO(0x9888), 0x11804000 },
+	{ _MMIO(0x9888), 0x17808000 },
+	{ _MMIO(0x9888), 0x1980c000 },
+	{ _MMIO(0x9888), 0x1b80c000 },
+	{ _MMIO(0x9888), 0x1d80c000 },
+	{ _MMIO(0x9888), 0x4d800000 },
+	{ _MMIO(0x9888), 0x3d800800 },
+	{ _MMIO(0x9888), 0x4f800000 },
+	{ _MMIO(0x9888), 0x43800002 },
+	{ _MMIO(0x9888), 0x51800000 },
+	{ _MMIO(0x9888), 0x45800884 },
+	{ _MMIO(0x9888), 0x53800000 },
+	{ _MMIO(0x9888), 0x47800002 },
+	{ _MMIO(0x9888), 0x21800000 },
+	{ _MMIO(0x9888), 0x31800000 },
+};
+
+static int
+get_vme_pipe_mux_config(struct drm_i915_private *dev_priv,
+			const struct i915_oa_reg **regs,
+			int *lens)
+{
+	int n = 0;
+
+	BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
+	BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
+
+	regs[n] = mux_config_vme_pipe;
+	lens[n] = ARRAY_SIZE(mux_config_vme_pipe);
+	n++;
+
+	return n;
+}
+
+static const struct i915_oa_reg b_counter_config_test_oa[] = {
+	{ _MMIO(0x2740), 0x00000000 },
+	{ _MMIO(0x2744), 0x00800000 },
+	{ _MMIO(0x2714), 0xf0800000 },
+	{ _MMIO(0x2710), 0x00000000 },
+	{ _MMIO(0x2724), 0xf0800000 },
+	{ _MMIO(0x2720), 0x00000000 },
+	{ _MMIO(0x2770), 0x00000004 },
+	{ _MMIO(0x2774), 0x00000000 },
+	{ _MMIO(0x2778), 0x00000003 },
+	{ _MMIO(0x277c), 0x00000000 },
+	{ _MMIO(0x2780), 0x00000007 },
+	{ _MMIO(0x2784), 0x00000000 },
+	{ _MMIO(0x2788), 0x00100002 },
+	{ _MMIO(0x278c), 0x0000fff7 },
+	{ _MMIO(0x2790), 0x00100002 },
+	{ _MMIO(0x2794), 0x0000ffcf },
+	{ _MMIO(0x2798), 0x00100082 },
+	{ _MMIO(0x279c), 0x0000ffef },
+	{ _MMIO(0x27a0), 0x001000c2 },
+	{ _MMIO(0x27a4), 0x0000ffe7 },
+	{ _MMIO(0x27a8), 0x00100001 },
+	{ _MMIO(0x27ac), 0x0000ffe7 },
+};
+
+static const struct i915_oa_reg flex_eu_config_test_oa[] = {
+};
+
+static const struct i915_oa_reg mux_config_test_oa[] = {
+	{ _MMIO(0x9888), 0x198b0000 },
+	{ _MMIO(0x9888), 0x078b0066 },
+	{ _MMIO(0x9888), 0x118b0000 },
+	{ _MMIO(0x9888), 0x258b0000 },
+	{ _MMIO(0x9888), 0x21850008 },
+	{ _MMIO(0x9888), 0x0d834000 },
+	{ _MMIO(0x9888), 0x07844000 },
+	{ _MMIO(0x9888), 0x17804000 },
+	{ _MMIO(0x9888), 0x21800000 },
+	{ _MMIO(0x9888), 0x4f800000 },
+	{ _MMIO(0x9888), 0x41800000 },
+	{ _MMIO(0x9888), 0x31800000 },
+};
+
+static int
+get_test_oa_mux_config(struct drm_i915_private *dev_priv,
+		       const struct i915_oa_reg **regs,
+		       int *lens)
+{
+	int n = 0;
+
+	BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
+	BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
+
+	regs[n] = mux_config_test_oa;
+	lens[n] = ARRAY_SIZE(mux_config_test_oa);
+	n++;
+
+	return n;
+}
+
+int i915_oa_select_metric_set_bdw(struct drm_i915_private *dev_priv)
+{
+	dev_priv->perf.oa.n_mux_configs = 0;
+	dev_priv->perf.oa.b_counter_regs = NULL;
+	dev_priv->perf.oa.b_counter_regs_len = 0;
+	dev_priv->perf.oa.flex_regs = NULL;
+	dev_priv->perf.oa.flex_regs_len = 0;
+
+	switch (dev_priv->perf.oa.metrics_set) {
+	case METRIC_SET_ID_RENDER_BASIC:
+		dev_priv->perf.oa.n_mux_configs =
+			get_render_basic_mux_config(dev_priv,
+						    dev_priv->perf.oa.mux_regs,
+						    dev_priv->perf.oa.mux_regs_lens);
+		if (dev_priv->perf.oa.n_mux_configs == 0) {
+			DRM_DEBUG_DRIVER("No suitable MUX config for \"RENDER_BASIC\" metric set\n");
+
+			/* EINVAL because *_register_sysfs already checked this
+			 * and so it wouldn't have been advertised to userspace and
+			 * so shouldn't have been requested
+			 */
+			return -EINVAL;
+		}
+
+		dev_priv->perf.oa.b_counter_regs =
+			b_counter_config_render_basic;
+		dev_priv->perf.oa.b_counter_regs_len =
+			ARRAY_SIZE(b_counter_config_render_basic);
+
+		dev_priv->perf.oa.flex_regs =
+			flex_eu_config_render_basic;
+		dev_priv->perf.oa.flex_regs_len =
+			ARRAY_SIZE(flex_eu_config_render_basic);
+
+		return 0;
+	case METRIC_SET_ID_COMPUTE_BASIC:
+		dev_priv->perf.oa.n_mux_configs =
+			get_compute_basic_mux_config(dev_priv,
+						     dev_priv->perf.oa.mux_regs,
+						     dev_priv->perf.oa.mux_regs_lens);
+		if (dev_priv->perf.oa.n_mux_configs == 0) {
+			DRM_DEBUG_DRIVER("No suitable MUX config for \"COMPUTE_BASIC\" metric set\n");
+
+			/* EINVAL because *_register_sysfs already checked this
+			 * and so it wouldn't have been advertised to userspace and
+			 * so shouldn't have been requested
+			 */
+			return -EINVAL;
+		}
+
+		dev_priv->perf.oa.b_counter_regs =
+			b_counter_config_compute_basic;
+		dev_priv->perf.oa.b_counter_regs_len =
+			ARRAY_SIZE(b_counter_config_compute_basic);
+
+		dev_priv->perf.oa.flex_regs =
+			flex_eu_config_compute_basic;
+		dev_priv->perf.oa.flex_regs_len =
+			ARRAY_SIZE(flex_eu_config_compute_basic);
+
+		return 0;
+	case METRIC_SET_ID_RENDER_PIPE_PROFILE:
+		dev_priv->perf.oa.n_mux_configs =
+			get_render_pipe_profile_mux_config(dev_priv,
+							   dev_priv->perf.oa.mux_regs,
+							   dev_priv->perf.oa.mux_regs_lens);
+		if (dev_priv->perf.oa.n_mux_configs == 0) {
+			DRM_DEBUG_DRIVER("No suitable MUX config for \"RENDER_PIPE_PROFILE\" metric set\n");
+
+			/* EINVAL because *_register_sysfs already checked this
+			 * and so it wouldn't have been advertised to userspace and
+			 * so shouldn't have been requested
+			 */
+			return -EINVAL;
+		}
+
+		dev_priv->perf.oa.b_counter_regs =
+			b_counter_config_render_pipe_profile;
+		dev_priv->perf.oa.b_counter_regs_len =
+			ARRAY_SIZE(b_counter_config_render_pipe_profile);
+
+		dev_priv->perf.oa.flex_regs =
+			flex_eu_config_render_pipe_profile;
+		dev_priv->perf.oa.flex_regs_len =
+			ARRAY_SIZE(flex_eu_config_render_pipe_profile);
+
+		return 0;
+	case METRIC_SET_ID_MEMORY_READS:
+		dev_priv->perf.oa.n_mux_configs =
+			get_memory_reads_mux_config(dev_priv,
+						    dev_priv->perf.oa.mux_regs,
+						    dev_priv->perf.oa.mux_regs_lens);
+		if (dev_priv->perf.oa.n_mux_configs == 0) {
+			DRM_DEBUG_DRIVER("No suitable MUX config for \"MEMORY_READS\" metric set\n");
+
+			/* EINVAL because *_register_sysfs already checked this
+			 * and so it wouldn't have been advertised to userspace and
+			 * so shouldn't have been requested
+			 */
+			return -EINVAL;
+		}
+
+		dev_priv->perf.oa.b_counter_regs =
+			b_counter_config_memory_reads;
+		dev_priv->perf.oa.b_counter_regs_len =
+			ARRAY_SIZE(b_counter_config_memory_reads);
+
+		dev_priv->perf.oa.flex_regs =
+			flex_eu_config_memory_reads;
+		dev_priv->perf.oa.flex_regs_len =
+			ARRAY_SIZE(flex_eu_config_memory_reads);
+
+		return 0;
+	case METRIC_SET_ID_MEMORY_WRITES:
+		dev_priv->perf.oa.n_mux_configs =
+			get_memory_writes_mux_config(dev_priv,
+						     dev_priv->perf.oa.mux_regs,
+						     dev_priv->perf.oa.mux_regs_lens);
+		if (dev_priv->perf.oa.n_mux_configs == 0) {
+			DRM_DEBUG_DRIVER("No suitable MUX config for \"MEMORY_WRITES\" metric set\n");
+
+			/* EINVAL because *_register_sysfs already checked this
+			 * and so it wouldn't have been advertised to userspace and
+			 * so shouldn't have been requested
+			 */
+			return -EINVAL;
+		}
+
+		dev_priv->perf.oa.b_counter_regs =
+			b_counter_config_memory_writes;
+		dev_priv->perf.oa.b_counter_regs_len =
+			ARRAY_SIZE(b_counter_config_memory_writes);
+
+		dev_priv->perf.oa.flex_regs =
+			flex_eu_config_memory_writes;
+		dev_priv->perf.oa.flex_regs_len =
+			ARRAY_SIZE(flex_eu_config_memory_writes);
+
+		return 0;
+	case METRIC_SET_ID_COMPUTE_EXTENDED:
+		dev_priv->perf.oa.n_mux_configs =
+			get_compute_extended_mux_config(dev_priv,
+							dev_priv->perf.oa.mux_regs,
+							dev_priv->perf.oa.mux_regs_lens);
+		if (dev_priv->perf.oa.n_mux_configs == 0) {
+			DRM_DEBUG_DRIVER("No suitable MUX config for \"COMPUTE_EXTENDED\" metric set\n");
+
+			/* EINVAL because *_register_sysfs already checked this
+			 * and so it wouldn't have been advertised to userspace and
+			 * so shouldn't have been requested
+			 */
+			return -EINVAL;
+		}
+
+		dev_priv->perf.oa.b_counter_regs =
+			b_counter_config_compute_extended;
+		dev_priv->perf.oa.b_counter_regs_len =
+			ARRAY_SIZE(b_counter_config_compute_extended);
+
+		dev_priv->perf.oa.flex_regs =
+			flex_eu_config_compute_extended;
+		dev_priv->perf.oa.flex_regs_len =
+			ARRAY_SIZE(flex_eu_config_compute_extended);
+
+		return 0;
+	case METRIC_SET_ID_COMPUTE_L3_CACHE:
+		dev_priv->perf.oa.n_mux_configs =
+			get_compute_l3_cache_mux_config(dev_priv,
+							dev_priv->perf.oa.mux_regs,
+							dev_priv->perf.oa.mux_regs_lens);
+		if (dev_priv->perf.oa.n_mux_configs == 0) {
+			DRM_DEBUG_DRIVER("No suitable MUX config for \"COMPUTE_L3_CACHE\" metric set\n");
+
+			/* EINVAL because *_register_sysfs already checked this
+			 * and so it wouldn't have been advertised to userspace and
+			 * so shouldn't have been requested
+			 */
+			return -EINVAL;
+		}
+
+		dev_priv->perf.oa.b_counter_regs =
+			b_counter_config_compute_l3_cache;
+		dev_priv->perf.oa.b_counter_regs_len =
+			ARRAY_SIZE(b_counter_config_compute_l3_cache);
+
+		dev_priv->perf.oa.flex_regs =
+			flex_eu_config_compute_l3_cache;
+		dev_priv->perf.oa.flex_regs_len =
+			ARRAY_SIZE(flex_eu_config_compute_l3_cache);
+
+		return 0;
+	case METRIC_SET_ID_DATA_PORT_READS_COALESCING:
+		dev_priv->perf.oa.n_mux_configs =
+			get_data_port_reads_coalescing_mux_config(dev_priv,
+								  dev_priv->perf.oa.mux_regs,
+								  dev_priv->perf.oa.mux_regs_lens);
+		if (dev_priv->perf.oa.n_mux_configs == 0) {
+			DRM_DEBUG_DRIVER("No suitable MUX config for \"DATA_PORT_READS_COALESCING\" metric set\n");
+
+			/* EINVAL because *_register_sysfs already checked this
+			 * and so it wouldn't have been advertised to userspace and
+			 * so shouldn't have been requested
+			 */
+			return -EINVAL;
+		}
+
+		dev_priv->perf.oa.b_counter_regs =
+			b_counter_config_data_port_reads_coalescing;
+		dev_priv->perf.oa.b_counter_regs_len =
+			ARRAY_SIZE(b_counter_config_data_port_reads_coalescing);
+
+		dev_priv->perf.oa.flex_regs =
+			flex_eu_config_data_port_reads_coalescing;
+		dev_priv->perf.oa.flex_regs_len =
+			ARRAY_SIZE(flex_eu_config_data_port_reads_coalescing);
+
+		return 0;
+	case METRIC_SET_ID_DATA_PORT_WRITES_COALESCING:
+		dev_priv->perf.oa.n_mux_configs =
+			get_data_port_writes_coalescing_mux_config(dev_priv,
+								   dev_priv->perf.oa.mux_regs,
+								   dev_priv->perf.oa.mux_regs_lens);
+		if (dev_priv->perf.oa.n_mux_configs == 0) {
+			DRM_DEBUG_DRIVER("No suitable MUX config for \"DATA_PORT_WRITES_COALESCING\" metric set\n");
+
+			/* EINVAL because *_register_sysfs already checked this
+			 * and so it wouldn't have been advertised to userspace and
+			 * so shouldn't have been requested
+			 */
+			return -EINVAL;
+		}
+
+		dev_priv->perf.oa.b_counter_regs =
+			b_counter_config_data_port_writes_coalescing;
+		dev_priv->perf.oa.b_counter_regs_len =
+			ARRAY_SIZE(b_counter_config_data_port_writes_coalescing);
+
+		dev_priv->perf.oa.flex_regs =
+			flex_eu_config_data_port_writes_coalescing;
+		dev_priv->perf.oa.flex_regs_len =
+			ARRAY_SIZE(flex_eu_config_data_port_writes_coalescing);
+
+		return 0;
+	case METRIC_SET_ID_HDC_AND_SF:
+		dev_priv->perf.oa.n_mux_configs =
+			get_hdc_and_sf_mux_config(dev_priv,
+						  dev_priv->perf.oa.mux_regs,
+						  dev_priv->perf.oa.mux_regs_lens);
+		if (dev_priv->perf.oa.n_mux_configs == 0) {
+			DRM_DEBUG_DRIVER("No suitable MUX config for \"HDC_AND_SF\" metric set\n");
+
+			/* EINVAL because *_register_sysfs already checked this
+			 * and so it wouldn't have been advertised to userspace and
+			 * so shouldn't have been requested
+			 */
+			return -EINVAL;
+		}
+
+		dev_priv->perf.oa.b_counter_regs =
+			b_counter_config_hdc_and_sf;
+		dev_priv->perf.oa.b_counter_regs_len =
+			ARRAY_SIZE(b_counter_config_hdc_and_sf);
+
+		dev_priv->perf.oa.flex_regs =
+			flex_eu_config_hdc_and_sf;
+		dev_priv->perf.oa.flex_regs_len =
+			ARRAY_SIZE(flex_eu_config_hdc_and_sf);
+
+		return 0;
+	case METRIC_SET_ID_L3_1:
+		dev_priv->perf.oa.n_mux_configs =
+			get_l3_1_mux_config(dev_priv,
+					    dev_priv->perf.oa.mux_regs,
+					    dev_priv->perf.oa.mux_regs_lens);
+		if (dev_priv->perf.oa.n_mux_configs == 0) {
+			DRM_DEBUG_DRIVER("No suitable MUX config for \"L3_1\" metric set\n");
+
+			/* EINVAL because *_register_sysfs already checked this
+			 * and so it wouldn't have been advertised to userspace and
+			 * so shouldn't have been requested
+			 */
+			return -EINVAL;
+		}
+
+		dev_priv->perf.oa.b_counter_regs =
+			b_counter_config_l3_1;
+		dev_priv->perf.oa.b_counter_regs_len =
+			ARRAY_SIZE(b_counter_config_l3_1);
+
+		dev_priv->perf.oa.flex_regs =
+			flex_eu_config_l3_1;
+		dev_priv->perf.oa.flex_regs_len =
+			ARRAY_SIZE(flex_eu_config_l3_1);
+
+		return 0;
+	case METRIC_SET_ID_L3_2:
+		dev_priv->perf.oa.n_mux_configs =
+			get_l3_2_mux_config(dev_priv,
+					    dev_priv->perf.oa.mux_regs,
+					    dev_priv->perf.oa.mux_regs_lens);
+		if (dev_priv->perf.oa.n_mux_configs == 0) {
+			DRM_DEBUG_DRIVER("No suitable MUX config for \"L3_2\" metric set\n");
+
+			/* EINVAL because *_register_sysfs already checked this
+			 * and so it wouldn't have been advertised to userspace and
+			 * so shouldn't have been requested
+			 */
+			return -EINVAL;
+		}
+
+		dev_priv->perf.oa.b_counter_regs =
+			b_counter_config_l3_2;
+		dev_priv->perf.oa.b_counter_regs_len =
+			ARRAY_SIZE(b_counter_config_l3_2);
+
+		dev_priv->perf.oa.flex_regs =
+			flex_eu_config_l3_2;
+		dev_priv->perf.oa.flex_regs_len =
+			ARRAY_SIZE(flex_eu_config_l3_2);
+
+		return 0;
+	case METRIC_SET_ID_L3_3:
+		dev_priv->perf.oa.n_mux_configs =
+			get_l3_3_mux_config(dev_priv,
+					    dev_priv->perf.oa.mux_regs,
+					    dev_priv->perf.oa.mux_regs_lens);
+		if (dev_priv->perf.oa.n_mux_configs == 0) {
+			DRM_DEBUG_DRIVER("No suitable MUX config for \"L3_3\" metric set\n");
+
+			/* EINVAL because *_register_sysfs already checked this
+			 * and so it wouldn't have been advertised to userspace and
+			 * so shouldn't have been requested
+			 */
+			return -EINVAL;
+		}
+
+		dev_priv->perf.oa.b_counter_regs =
+			b_counter_config_l3_3;
+		dev_priv->perf.oa.b_counter_regs_len =
+			ARRAY_SIZE(b_counter_config_l3_3);
+
+		dev_priv->perf.oa.flex_regs =
+			flex_eu_config_l3_3;
+		dev_priv->perf.oa.flex_regs_len =
+			ARRAY_SIZE(flex_eu_config_l3_3);
+
+		return 0;
+	case METRIC_SET_ID_L3_4:
+		dev_priv->perf.oa.n_mux_configs =
+			get_l3_4_mux_config(dev_priv,
+					    dev_priv->perf.oa.mux_regs,
+					    dev_priv->perf.oa.mux_regs_lens);
+		if (dev_priv->perf.oa.n_mux_configs == 0) {
+			DRM_DEBUG_DRIVER("No suitable MUX config for \"L3_4\" metric set\n");
+
+			/* EINVAL because *_register_sysfs already checked this
+			 * and so it wouldn't have been advertised to userspace and
+			 * so shouldn't have been requested
+			 */
+			return -EINVAL;
+		}
+
+		dev_priv->perf.oa.b_counter_regs =
+			b_counter_config_l3_4;
+		dev_priv->perf.oa.b_counter_regs_len =
+			ARRAY_SIZE(b_counter_config_l3_4);
+
+		dev_priv->perf.oa.flex_regs =
+			flex_eu_config_l3_4;
+		dev_priv->perf.oa.flex_regs_len =
+			ARRAY_SIZE(flex_eu_config_l3_4);
+
+		return 0;
+	case METRIC_SET_ID_RASTERIZER_AND_PIXEL_BACKEND:
+		dev_priv->perf.oa.n_mux_configs =
+			get_rasterizer_and_pixel_backend_mux_config(dev_priv,
+								    dev_priv->perf.oa.mux_regs,
+								    dev_priv->perf.oa.mux_regs_lens);
+		if (dev_priv->perf.oa.n_mux_configs == 0) {
+			DRM_DEBUG_DRIVER("No suitable MUX config for \"RASTERIZER_AND_PIXEL_BACKEND\" metric set\n");
+
+			/* EINVAL because *_register_sysfs already checked this
+			 * and so it wouldn't have been advertised to userspace and
+			 * so shouldn't have been requested
+			 */
+			return -EINVAL;
+		}
+
+		dev_priv->perf.oa.b_counter_regs =
+			b_counter_config_rasterizer_and_pixel_backend;
+		dev_priv->perf.oa.b_counter_regs_len =
+			ARRAY_SIZE(b_counter_config_rasterizer_and_pixel_backend);
+
+		dev_priv->perf.oa.flex_regs =
+			flex_eu_config_rasterizer_and_pixel_backend;
+		dev_priv->perf.oa.flex_regs_len =
+			ARRAY_SIZE(flex_eu_config_rasterizer_and_pixel_backend);
+
+		return 0;
+	case METRIC_SET_ID_SAMPLER_1:
+		dev_priv->perf.oa.n_mux_configs =
+			get_sampler_1_mux_config(dev_priv,
+						 dev_priv->perf.oa.mux_regs,
+						 dev_priv->perf.oa.mux_regs_lens);
+		if (dev_priv->perf.oa.n_mux_configs == 0) {
+			DRM_DEBUG_DRIVER("No suitable MUX config for \"SAMPLER_1\" metric set\n");
+
+			/* EINVAL because *_register_sysfs already checked this
+			 * and so it wouldn't have been advertised to userspace and
+			 * so shouldn't have been requested
+			 */
+			return -EINVAL;
+		}
+
+		dev_priv->perf.oa.b_counter_regs =
+			b_counter_config_sampler_1;
+		dev_priv->perf.oa.b_counter_regs_len =
+			ARRAY_SIZE(b_counter_config_sampler_1);
+
+		dev_priv->perf.oa.flex_regs =
+			flex_eu_config_sampler_1;
+		dev_priv->perf.oa.flex_regs_len =
+			ARRAY_SIZE(flex_eu_config_sampler_1);
+
+		return 0;
+	case METRIC_SET_ID_SAMPLER_2:
+		dev_priv->perf.oa.n_mux_configs =
+			get_sampler_2_mux_config(dev_priv,
+						 dev_priv->perf.oa.mux_regs,
+						 dev_priv->perf.oa.mux_regs_lens);
+		if (dev_priv->perf.oa.n_mux_configs == 0) {
+			DRM_DEBUG_DRIVER("No suitable MUX config for \"SAMPLER_2\" metric set\n");
+
+			/* EINVAL because *_register_sysfs already checked this
+			 * and so it wouldn't have been advertised to userspace and
+			 * so shouldn't have been requested
+			 */
+			return -EINVAL;
+		}
+
+		dev_priv->perf.oa.b_counter_regs =
+			b_counter_config_sampler_2;
+		dev_priv->perf.oa.b_counter_regs_len =
+			ARRAY_SIZE(b_counter_config_sampler_2);
+
+		dev_priv->perf.oa.flex_regs =
+			flex_eu_config_sampler_2;
+		dev_priv->perf.oa.flex_regs_len =
+			ARRAY_SIZE(flex_eu_config_sampler_2);
+
+		return 0;
+	case METRIC_SET_ID_TDL_1:
+		dev_priv->perf.oa.n_mux_configs =
+			get_tdl_1_mux_config(dev_priv,
+					     dev_priv->perf.oa.mux_regs,
+					     dev_priv->perf.oa.mux_regs_lens);
+		if (dev_priv->perf.oa.n_mux_configs == 0) {
+			DRM_DEBUG_DRIVER("No suitable MUX config for \"TDL_1\" metric set\n");
+
+			/* EINVAL because *_register_sysfs already checked this
+			 * and so it wouldn't have been advertised to userspace and
+			 * so shouldn't have been requested
+			 */
+			return -EINVAL;
+		}
+
+		dev_priv->perf.oa.b_counter_regs =
+			b_counter_config_tdl_1;
+		dev_priv->perf.oa.b_counter_regs_len =
+			ARRAY_SIZE(b_counter_config_tdl_1);
+
+		dev_priv->perf.oa.flex_regs =
+			flex_eu_config_tdl_1;
+		dev_priv->perf.oa.flex_regs_len =
+			ARRAY_SIZE(flex_eu_config_tdl_1);
+
+		return 0;
+	case METRIC_SET_ID_TDL_2:
+		dev_priv->perf.oa.n_mux_configs =
+			get_tdl_2_mux_config(dev_priv,
+					     dev_priv->perf.oa.mux_regs,
+					     dev_priv->perf.oa.mux_regs_lens);
+		if (dev_priv->perf.oa.n_mux_configs == 0) {
+			DRM_DEBUG_DRIVER("No suitable MUX config for \"TDL_2\" metric set\n");
+
+			/* EINVAL because *_register_sysfs already checked this
+			 * and so it wouldn't have been advertised to userspace and
+			 * so shouldn't have been requested
+			 */
+			return -EINVAL;
+		}
+
+		dev_priv->perf.oa.b_counter_regs =
+			b_counter_config_tdl_2;
+		dev_priv->perf.oa.b_counter_regs_len =
+			ARRAY_SIZE(b_counter_config_tdl_2);
+
+		dev_priv->perf.oa.flex_regs =
+			flex_eu_config_tdl_2;
+		dev_priv->perf.oa.flex_regs_len =
+			ARRAY_SIZE(flex_eu_config_tdl_2);
+
+		return 0;
+	case METRIC_SET_ID_COMPUTE_EXTRA:
+		dev_priv->perf.oa.n_mux_configs =
+			get_compute_extra_mux_config(dev_priv,
+						     dev_priv->perf.oa.mux_regs,
+						     dev_priv->perf.oa.mux_regs_lens);
+		if (dev_priv->perf.oa.n_mux_configs == 0) {
+			DRM_DEBUG_DRIVER("No suitable MUX config for \"COMPUTE_EXTRA\" metric set\n");
+
+			/* EINVAL because *_register_sysfs already checked this
+			 * and so it wouldn't have been advertised to userspace and
+			 * so shouldn't have been requested
+			 */
+			return -EINVAL;
+		}
+
+		dev_priv->perf.oa.b_counter_regs =
+			b_counter_config_compute_extra;
+		dev_priv->perf.oa.b_counter_regs_len =
+			ARRAY_SIZE(b_counter_config_compute_extra);
+
+		dev_priv->perf.oa.flex_regs =
+			flex_eu_config_compute_extra;
+		dev_priv->perf.oa.flex_regs_len =
+			ARRAY_SIZE(flex_eu_config_compute_extra);
+
+		return 0;
+	case METRIC_SET_ID_VME_PIPE:
+		dev_priv->perf.oa.n_mux_configs =
+			get_vme_pipe_mux_config(dev_priv,
+						dev_priv->perf.oa.mux_regs,
+						dev_priv->perf.oa.mux_regs_lens);
+		if (dev_priv->perf.oa.n_mux_configs == 0) {
+			DRM_DEBUG_DRIVER("No suitable MUX config for \"VME_PIPE\" metric set\n");
+
+			/* EINVAL because *_register_sysfs already checked this
+			 * and so it wouldn't have been advertised to userspace and
+			 * so shouldn't have been requested
+			 */
+			return -EINVAL;
+		}
+
+		dev_priv->perf.oa.b_counter_regs =
+			b_counter_config_vme_pipe;
+		dev_priv->perf.oa.b_counter_regs_len =
+			ARRAY_SIZE(b_counter_config_vme_pipe);
+
+		dev_priv->perf.oa.flex_regs =
+			flex_eu_config_vme_pipe;
+		dev_priv->perf.oa.flex_regs_len =
+			ARRAY_SIZE(flex_eu_config_vme_pipe);
+
+		return 0;
+	case METRIC_SET_ID_TEST_OA:
+		dev_priv->perf.oa.n_mux_configs =
+			get_test_oa_mux_config(dev_priv,
+					       dev_priv->perf.oa.mux_regs,
+					       dev_priv->perf.oa.mux_regs_lens);
+		if (dev_priv->perf.oa.n_mux_configs == 0) {
+			DRM_DEBUG_DRIVER("No suitable MUX config for \"TEST_OA\" metric set\n");
+
+			/* EINVAL because *_register_sysfs already checked this
+			 * and so it wouldn't have been advertised to userspace and
+			 * so shouldn't have been requested
+			 */
+			return -EINVAL;
+		}
+
+		dev_priv->perf.oa.b_counter_regs =
+			b_counter_config_test_oa;
+		dev_priv->perf.oa.b_counter_regs_len =
+			ARRAY_SIZE(b_counter_config_test_oa);
+
+		dev_priv->perf.oa.flex_regs =
+			flex_eu_config_test_oa;
+		dev_priv->perf.oa.flex_regs_len =
+			ARRAY_SIZE(flex_eu_config_test_oa);
+
+		return 0;
+	default:
+		return -ENODEV;
+	}
+}
+
+static ssize_t
+show_render_basic_id(struct device *kdev, struct device_attribute *attr, char *buf)
+{
+	return sprintf(buf, "%d\n", METRIC_SET_ID_RENDER_BASIC);
+}
+
+static struct device_attribute dev_attr_render_basic_id = {
+	.attr = { .name = "id", .mode = 0444 },
+	.show = show_render_basic_id,
+	.store = NULL,
+};
+
+static struct attribute *attrs_render_basic[] = {
+	&dev_attr_render_basic_id.attr,
+	NULL,
+};
+
+static struct attribute_group group_render_basic = {
+	.name = "b541bd57-0e0f-4154-b4c0-5858010a2bf7",
+	.attrs =  attrs_render_basic,
+};
+
+static ssize_t
+show_compute_basic_id(struct device *kdev, struct device_attribute *attr, char *buf)
+{
+	return sprintf(buf, "%d\n", METRIC_SET_ID_COMPUTE_BASIC);
+}
+
+static struct device_attribute dev_attr_compute_basic_id = {
+	.attr = { .name = "id", .mode = 0444 },
+	.show = show_compute_basic_id,
+	.store = NULL,
+};
+
+static struct attribute *attrs_compute_basic[] = {
+	&dev_attr_compute_basic_id.attr,
+	NULL,
+};
+
+static struct attribute_group group_compute_basic = {
+	.name = "35fbc9b2-a891-40a6-a38d-022bb7057552",
+	.attrs =  attrs_compute_basic,
+};
+
+static ssize_t
+show_render_pipe_profile_id(struct device *kdev, struct device_attribute *attr, char *buf)
+{
+	return sprintf(buf, "%d\n", METRIC_SET_ID_RENDER_PIPE_PROFILE);
+}
+
+static struct device_attribute dev_attr_render_pipe_profile_id = {
+	.attr = { .name = "id", .mode = 0444 },
+	.show = show_render_pipe_profile_id,
+	.store = NULL,
+};
+
+static struct attribute *attrs_render_pipe_profile[] = {
+	&dev_attr_render_pipe_profile_id.attr,
+	NULL,
+};
+
+static struct attribute_group group_render_pipe_profile = {
+	.name = "233d0544-fff7-4281-8291-e02f222aff72",
+	.attrs =  attrs_render_pipe_profile,
+};
+
+static ssize_t
+show_memory_reads_id(struct device *kdev, struct device_attribute *attr, char *buf)
+{
+	return sprintf(buf, "%d\n", METRIC_SET_ID_MEMORY_READS);
+}
+
+static struct device_attribute dev_attr_memory_reads_id = {
+	.attr = { .name = "id", .mode = 0444 },
+	.show = show_memory_reads_id,
+	.store = NULL,
+};
+
+static struct attribute *attrs_memory_reads[] = {
+	&dev_attr_memory_reads_id.attr,
+	NULL,
+};
+
+static struct attribute_group group_memory_reads = {
+	.name = "2b255d48-2117-4fef-a8f7-f151e1d25a2c",
+	.attrs =  attrs_memory_reads,
+};
+
+static ssize_t
+show_memory_writes_id(struct device *kdev, struct device_attribute *attr, char *buf)
+{
+	return sprintf(buf, "%d\n", METRIC_SET_ID_MEMORY_WRITES);
+}
+
+static struct device_attribute dev_attr_memory_writes_id = {
+	.attr = { .name = "id", .mode = 0444 },
+	.show = show_memory_writes_id,
+	.store = NULL,
+};
+
+static struct attribute *attrs_memory_writes[] = {
+	&dev_attr_memory_writes_id.attr,
+	NULL,
+};
+
+static struct attribute_group group_memory_writes = {
+	.name = "f7fd3220-b466-4a4d-9f98-b0caf3f2394c",
+	.attrs =  attrs_memory_writes,
+};
+
+static ssize_t
+show_compute_extended_id(struct device *kdev, struct device_attribute *attr, char *buf)
+{
+	return sprintf(buf, "%d\n", METRIC_SET_ID_COMPUTE_EXTENDED);
+}
+
+static struct device_attribute dev_attr_compute_extended_id = {
+	.attr = { .name = "id", .mode = 0444 },
+	.show = show_compute_extended_id,
+	.store = NULL,
+};
+
+static struct attribute *attrs_compute_extended[] = {
+	&dev_attr_compute_extended_id.attr,
+	NULL,
+};
+
+static struct attribute_group group_compute_extended = {
+	.name = "e99ccaca-821c-4df9-97a7-96bdb7204e43",
+	.attrs =  attrs_compute_extended,
+};
+
+static ssize_t
+show_compute_l3_cache_id(struct device *kdev, struct device_attribute *attr, char *buf)
+{
+	return sprintf(buf, "%d\n", METRIC_SET_ID_COMPUTE_L3_CACHE);
+}
+
+static struct device_attribute dev_attr_compute_l3_cache_id = {
+	.attr = { .name = "id", .mode = 0444 },
+	.show = show_compute_l3_cache_id,
+	.store = NULL,
+};
+
+static struct attribute *attrs_compute_l3_cache[] = {
+	&dev_attr_compute_l3_cache_id.attr,
+	NULL,
+};
+
+static struct attribute_group group_compute_l3_cache = {
+	.name = "27a364dc-8225-4ecb-b607-d6f1925598d9",
+	.attrs =  attrs_compute_l3_cache,
+};
+
+static ssize_t
+show_data_port_reads_coalescing_id(struct device *kdev, struct device_attribute *attr, char *buf)
+{
+	return sprintf(buf, "%d\n", METRIC_SET_ID_DATA_PORT_READS_COALESCING);
+}
+
+static struct device_attribute dev_attr_data_port_reads_coalescing_id = {
+	.attr = { .name = "id", .mode = 0444 },
+	.show = show_data_port_reads_coalescing_id,
+	.store = NULL,
+};
+
+static struct attribute *attrs_data_port_reads_coalescing[] = {
+	&dev_attr_data_port_reads_coalescing_id.attr,
+	NULL,
+};
+
+static struct attribute_group group_data_port_reads_coalescing = {
+	.name = "857fc630-2f09-4804-85f1-084adfadd5ab",
+	.attrs =  attrs_data_port_reads_coalescing,
+};
+
+static ssize_t
+show_data_port_writes_coalescing_id(struct device *kdev, struct device_attribute *attr, char *buf)
+{
+	return sprintf(buf, "%d\n", METRIC_SET_ID_DATA_PORT_WRITES_COALESCING);
+}
+
+static struct device_attribute dev_attr_data_port_writes_coalescing_id = {
+	.attr = { .name = "id", .mode = 0444 },
+	.show = show_data_port_writes_coalescing_id,
+	.store = NULL,
+};
+
+static struct attribute *attrs_data_port_writes_coalescing[] = {
+	&dev_attr_data_port_writes_coalescing_id.attr,
+	NULL,
+};
+
+static struct attribute_group group_data_port_writes_coalescing = {
+	.name = "343ebc99-4a55-414c-8c17-d8e259cf5e20",
+	.attrs =  attrs_data_port_writes_coalescing,
+};
+
+static ssize_t
+show_hdc_and_sf_id(struct device *kdev, struct device_attribute *attr, char *buf)
+{
+	return sprintf(buf, "%d\n", METRIC_SET_ID_HDC_AND_SF);
+}
+
+static struct device_attribute dev_attr_hdc_and_sf_id = {
+	.attr = { .name = "id", .mode = 0444 },
+	.show = show_hdc_and_sf_id,
+	.store = NULL,
+};
+
+static struct attribute *attrs_hdc_and_sf[] = {
+	&dev_attr_hdc_and_sf_id.attr,
+	NULL,
+};
+
+static struct attribute_group group_hdc_and_sf = {
+	.name = "7bdafd88-a4fa-4ed5-bc09-1a977aa5be3e",
+	.attrs =  attrs_hdc_and_sf,
+};
+
+static ssize_t
+show_l3_1_id(struct device *kdev, struct device_attribute *attr, char *buf)
+{
+	return sprintf(buf, "%d\n", METRIC_SET_ID_L3_1);
+}
+
+static struct device_attribute dev_attr_l3_1_id = {
+	.attr = { .name = "id", .mode = 0444 },
+	.show = show_l3_1_id,
+	.store = NULL,
+};
+
+static struct attribute *attrs_l3_1[] = {
+	&dev_attr_l3_1_id.attr,
+	NULL,
+};
+
+static struct attribute_group group_l3_1 = {
+	.name = "9385ebb2-f34f-4aa5-aec5-7e9cbbea0f0b",
+	.attrs =  attrs_l3_1,
+};
+
+static ssize_t
+show_l3_2_id(struct device *kdev, struct device_attribute *attr, char *buf)
+{
+	return sprintf(buf, "%d\n", METRIC_SET_ID_L3_2);
+}
+
+static struct device_attribute dev_attr_l3_2_id = {
+	.attr = { .name = "id", .mode = 0444 },
+	.show = show_l3_2_id,
+	.store = NULL,
+};
+
+static struct attribute *attrs_l3_2[] = {
+	&dev_attr_l3_2_id.attr,
+	NULL,
+};
+
+static struct attribute_group group_l3_2 = {
+	.name = "446ae59b-ff2e-41c9-b49e-0184a54bf00a",
+	.attrs =  attrs_l3_2,
+};
+
+static ssize_t
+show_l3_3_id(struct device *kdev, struct device_attribute *attr, char *buf)
+{
+	return sprintf(buf, "%d\n", METRIC_SET_ID_L3_3);
+}
+
+static struct device_attribute dev_attr_l3_3_id = {
+	.attr = { .name = "id", .mode = 0444 },
+	.show = show_l3_3_id,
+	.store = NULL,
+};
+
+static struct attribute *attrs_l3_3[] = {
+	&dev_attr_l3_3_id.attr,
+	NULL,
+};
+
+static struct attribute_group group_l3_3 = {
+	.name = "84a7956f-1ea4-4d0d-837f-e39a0376e38c",
+	.attrs =  attrs_l3_3,
+};
+
+static ssize_t
+show_l3_4_id(struct device *kdev, struct device_attribute *attr, char *buf)
+{
+	return sprintf(buf, "%d\n", METRIC_SET_ID_L3_4);
+}
+
+static struct device_attribute dev_attr_l3_4_id = {
+	.attr = { .name = "id", .mode = 0444 },
+	.show = show_l3_4_id,
+	.store = NULL,
+};
+
+static struct attribute *attrs_l3_4[] = {
+	&dev_attr_l3_4_id.attr,
+	NULL,
+};
+
+static struct attribute_group group_l3_4 = {
+	.name = "92b493d9-df18-4bed-be06-5cac6f2a6f5f",
+	.attrs =  attrs_l3_4,
+};
+
+static ssize_t
+show_rasterizer_and_pixel_backend_id(struct device *kdev, struct device_attribute *attr, char *buf)
+{
+	return sprintf(buf, "%d\n", METRIC_SET_ID_RASTERIZER_AND_PIXEL_BACKEND);
+}
+
+static struct device_attribute dev_attr_rasterizer_and_pixel_backend_id = {
+	.attr = { .name = "id", .mode = 0444 },
+	.show = show_rasterizer_and_pixel_backend_id,
+	.store = NULL,
+};
+
+static struct attribute *attrs_rasterizer_and_pixel_backend[] = {
+	&dev_attr_rasterizer_and_pixel_backend_id.attr,
+	NULL,
+};
+
+static struct attribute_group group_rasterizer_and_pixel_backend = {
+	.name = "14345c35-cc46-40d0-bb04-6ed1fbb43679",
+	.attrs =  attrs_rasterizer_and_pixel_backend,
+};
+
+static ssize_t
+show_sampler_1_id(struct device *kdev, struct device_attribute *attr, char *buf)
+{
+	return sprintf(buf, "%d\n", METRIC_SET_ID_SAMPLER_1);
+}
+
+static struct device_attribute dev_attr_sampler_1_id = {
+	.attr = { .name = "id", .mode = 0444 },
+	.show = show_sampler_1_id,
+	.store = NULL,
+};
+
+static struct attribute *attrs_sampler_1[] = {
+	&dev_attr_sampler_1_id.attr,
+	NULL,
+};
+
+static struct attribute_group group_sampler_1 = {
+	.name = "f0c6ba37-d3d3-4211-91b5-226730312a54",
+	.attrs =  attrs_sampler_1,
+};
+
+static ssize_t
+show_sampler_2_id(struct device *kdev, struct device_attribute *attr, char *buf)
+{
+	return sprintf(buf, "%d\n", METRIC_SET_ID_SAMPLER_2);
+}
+
+static struct device_attribute dev_attr_sampler_2_id = {
+	.attr = { .name = "id", .mode = 0444 },
+	.show = show_sampler_2_id,
+	.store = NULL,
+};
+
+static struct attribute *attrs_sampler_2[] = {
+	&dev_attr_sampler_2_id.attr,
+	NULL,
+};
+
+static struct attribute_group group_sampler_2 = {
+	.name = "30bf3702-48cf-4bca-b412-7cf50bb2f564",
+	.attrs =  attrs_sampler_2,
+};
+
+static ssize_t
+show_tdl_1_id(struct device *kdev, struct device_attribute *attr, char *buf)
+{
+	return sprintf(buf, "%d\n", METRIC_SET_ID_TDL_1);
+}
+
+static struct device_attribute dev_attr_tdl_1_id = {
+	.attr = { .name = "id", .mode = 0444 },
+	.show = show_tdl_1_id,
+	.store = NULL,
+};
+
+static struct attribute *attrs_tdl_1[] = {
+	&dev_attr_tdl_1_id.attr,
+	NULL,
+};
+
+static struct attribute_group group_tdl_1 = {
+	.name = "238bec85-df05-44f3-b905-d166712f2451",
+	.attrs =  attrs_tdl_1,
+};
+
+static ssize_t
+show_tdl_2_id(struct device *kdev, struct device_attribute *attr, char *buf)
+{
+	return sprintf(buf, "%d\n", METRIC_SET_ID_TDL_2);
+}
+
+static struct device_attribute dev_attr_tdl_2_id = {
+	.attr = { .name = "id", .mode = 0444 },
+	.show = show_tdl_2_id,
+	.store = NULL,
+};
+
+static struct attribute *attrs_tdl_2[] = {
+	&dev_attr_tdl_2_id.attr,
+	NULL,
+};
+
+static struct attribute_group group_tdl_2 = {
+	.name = "24bf02cd-8693-4583-981c-c4165b33da01",
+	.attrs =  attrs_tdl_2,
+};
+
+static ssize_t
+show_compute_extra_id(struct device *kdev, struct device_attribute *attr, char *buf)
+{
+	return sprintf(buf, "%d\n", METRIC_SET_ID_COMPUTE_EXTRA);
+}
+
+static struct device_attribute dev_attr_compute_extra_id = {
+	.attr = { .name = "id", .mode = 0444 },
+	.show = show_compute_extra_id,
+	.store = NULL,
+};
+
+static struct attribute *attrs_compute_extra[] = {
+	&dev_attr_compute_extra_id.attr,
+	NULL,
+};
+
+static struct attribute_group group_compute_extra = {
+	.name = "8fb61ba2-2fbb-454c-a136-2dec5a8a595e",
+	.attrs =  attrs_compute_extra,
+};
+
+static ssize_t
+show_vme_pipe_id(struct device *kdev, struct device_attribute *attr, char *buf)
+{
+	return sprintf(buf, "%d\n", METRIC_SET_ID_VME_PIPE);
+}
+
+static struct device_attribute dev_attr_vme_pipe_id = {
+	.attr = { .name = "id", .mode = 0444 },
+	.show = show_vme_pipe_id,
+	.store = NULL,
+};
+
+static struct attribute *attrs_vme_pipe[] = {
+	&dev_attr_vme_pipe_id.attr,
+	NULL,
+};
+
+static struct attribute_group group_vme_pipe = {
+	.name = "e1743ca0-7fc8-410b-a066-de7bbb9280b7",
+	.attrs =  attrs_vme_pipe,
+};
+
+static ssize_t
+show_test_oa_id(struct device *kdev, struct device_attribute *attr, char *buf)
+{
+	return sprintf(buf, "%d\n", METRIC_SET_ID_TEST_OA);
+}
+
+static struct device_attribute dev_attr_test_oa_id = {
+	.attr = { .name = "id", .mode = 0444 },
+	.show = show_test_oa_id,
+	.store = NULL,
+};
+
+static struct attribute *attrs_test_oa[] = {
+	&dev_attr_test_oa_id.attr,
+	NULL,
+};
+
+static struct attribute_group group_test_oa = {
+	.name = "d6de6f55-e526-4f79-a6a6-d7315c09044e",
+	.attrs =  attrs_test_oa,
+};
+
+int
+i915_perf_register_sysfs_bdw(struct drm_i915_private *dev_priv)
+{
+	const struct i915_oa_reg *mux_regs[ARRAY_SIZE(dev_priv->perf.oa.mux_regs)];
+	int mux_lens[ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens)];
+	int ret = 0;
+
+	if (get_render_basic_mux_config(dev_priv, mux_regs, mux_lens)) {
+		ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_render_basic);
+		if (ret)
+			goto error_render_basic;
+	}
+	if (get_compute_basic_mux_config(dev_priv, mux_regs, mux_lens)) {
+		ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_compute_basic);
+		if (ret)
+			goto error_compute_basic;
+	}
+	if (get_render_pipe_profile_mux_config(dev_priv, mux_regs, mux_lens)) {
+		ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_render_pipe_profile);
+		if (ret)
+			goto error_render_pipe_profile;
+	}
+	if (get_memory_reads_mux_config(dev_priv, mux_regs, mux_lens)) {
+		ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_memory_reads);
+		if (ret)
+			goto error_memory_reads;
+	}
+	if (get_memory_writes_mux_config(dev_priv, mux_regs, mux_lens)) {
+		ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_memory_writes);
+		if (ret)
+			goto error_memory_writes;
+	}
+	if (get_compute_extended_mux_config(dev_priv, mux_regs, mux_lens)) {
+		ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_compute_extended);
+		if (ret)
+			goto error_compute_extended;
+	}
+	if (get_compute_l3_cache_mux_config(dev_priv, mux_regs, mux_lens)) {
+		ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_compute_l3_cache);
+		if (ret)
+			goto error_compute_l3_cache;
+	}
+	if (get_data_port_reads_coalescing_mux_config(dev_priv, mux_regs, mux_lens)) {
+		ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_data_port_reads_coalescing);
+		if (ret)
+			goto error_data_port_reads_coalescing;
+	}
+	if (get_data_port_writes_coalescing_mux_config(dev_priv, mux_regs, mux_lens)) {
+		ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_data_port_writes_coalescing);
+		if (ret)
+			goto error_data_port_writes_coalescing;
+	}
+	if (get_hdc_and_sf_mux_config(dev_priv, mux_regs, mux_lens)) {
+		ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_hdc_and_sf);
+		if (ret)
+			goto error_hdc_and_sf;
+	}
+	if (get_l3_1_mux_config(dev_priv, mux_regs, mux_lens)) {
+		ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_l3_1);
+		if (ret)
+			goto error_l3_1;
+	}
+	if (get_l3_2_mux_config(dev_priv, mux_regs, mux_lens)) {
+		ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_l3_2);
+		if (ret)
+			goto error_l3_2;
+	}
+	if (get_l3_3_mux_config(dev_priv, mux_regs, mux_lens)) {
+		ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_l3_3);
+		if (ret)
+			goto error_l3_3;
+	}
+	if (get_l3_4_mux_config(dev_priv, mux_regs, mux_lens)) {
+		ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_l3_4);
+		if (ret)
+			goto error_l3_4;
+	}
+	if (get_rasterizer_and_pixel_backend_mux_config(dev_priv, mux_regs, mux_lens)) {
+		ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_rasterizer_and_pixel_backend);
+		if (ret)
+			goto error_rasterizer_and_pixel_backend;
+	}
+	if (get_sampler_1_mux_config(dev_priv, mux_regs, mux_lens)) {
+		ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_sampler_1);
+		if (ret)
+			goto error_sampler_1;
+	}
+	if (get_sampler_2_mux_config(dev_priv, mux_regs, mux_lens)) {
+		ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_sampler_2);
+		if (ret)
+			goto error_sampler_2;
+	}
+	if (get_tdl_1_mux_config(dev_priv, mux_regs, mux_lens)) {
+		ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_tdl_1);
+		if (ret)
+			goto error_tdl_1;
+	}
+	if (get_tdl_2_mux_config(dev_priv, mux_regs, mux_lens)) {
+		ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_tdl_2);
+		if (ret)
+			goto error_tdl_2;
+	}
+	if (get_compute_extra_mux_config(dev_priv, mux_regs, mux_lens)) {
+		ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_compute_extra);
+		if (ret)
+			goto error_compute_extra;
+	}
+	if (get_vme_pipe_mux_config(dev_priv, mux_regs, mux_lens)) {
+		ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_vme_pipe);
+		if (ret)
+			goto error_vme_pipe;
+	}
+	if (get_test_oa_mux_config(dev_priv, mux_regs, mux_lens)) {
+		ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_test_oa);
+		if (ret)
+			goto error_test_oa;
+	}
+
+	return 0;
+
+error_test_oa:
+	if (get_vme_pipe_mux_config(dev_priv, mux_regs, mux_lens))
+		sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_vme_pipe);
+error_vme_pipe:
+	if (get_compute_extra_mux_config(dev_priv, mux_regs, mux_lens))
+		sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_compute_extra);
+error_compute_extra:
+	if (get_tdl_2_mux_config(dev_priv, mux_regs, mux_lens))
+		sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_tdl_2);
+error_tdl_2:
+	if (get_tdl_1_mux_config(dev_priv, mux_regs, mux_lens))
+		sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_tdl_1);
+error_tdl_1:
+	if (get_sampler_2_mux_config(dev_priv, mux_regs, mux_lens))
+		sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_sampler_2);
+error_sampler_2:
+	if (get_sampler_1_mux_config(dev_priv, mux_regs, mux_lens))
+		sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_sampler_1);
+error_sampler_1:
+	if (get_rasterizer_and_pixel_backend_mux_config(dev_priv, mux_regs, mux_lens))
+		sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_rasterizer_and_pixel_backend);
+error_rasterizer_and_pixel_backend:
+	if (get_l3_4_mux_config(dev_priv, mux_regs, mux_lens))
+		sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_l3_4);
+error_l3_4:
+	if (get_l3_3_mux_config(dev_priv, mux_regs, mux_lens))
+		sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_l3_3);
+error_l3_3:
+	if (get_l3_2_mux_config(dev_priv, mux_regs, mux_lens))
+		sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_l3_2);
+error_l3_2:
+	if (get_l3_1_mux_config(dev_priv, mux_regs, mux_lens))
+		sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_l3_1);
+error_l3_1:
+	if (get_hdc_and_sf_mux_config(dev_priv, mux_regs, mux_lens))
+		sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_hdc_and_sf);
+error_hdc_and_sf:
+	if (get_data_port_writes_coalescing_mux_config(dev_priv, mux_regs, mux_lens))
+		sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_data_port_writes_coalescing);
+error_data_port_writes_coalescing:
+	if (get_data_port_reads_coalescing_mux_config(dev_priv, mux_regs, mux_lens))
+		sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_data_port_reads_coalescing);
+error_data_port_reads_coalescing:
+	if (get_compute_l3_cache_mux_config(dev_priv, mux_regs, mux_lens))
+		sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_compute_l3_cache);
+error_compute_l3_cache:
+	if (get_compute_extended_mux_config(dev_priv, mux_regs, mux_lens))
+		sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_compute_extended);
+error_compute_extended:
+	if (get_memory_writes_mux_config(dev_priv, mux_regs, mux_lens))
+		sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_memory_writes);
+error_memory_writes:
+	if (get_memory_reads_mux_config(dev_priv, mux_regs, mux_lens))
+		sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_memory_reads);
+error_memory_reads:
+	if (get_render_pipe_profile_mux_config(dev_priv, mux_regs, mux_lens))
+		sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_render_pipe_profile);
+error_render_pipe_profile:
+	if (get_compute_basic_mux_config(dev_priv, mux_regs, mux_lens))
+		sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_compute_basic);
+error_compute_basic:
+	if (get_render_basic_mux_config(dev_priv, mux_regs, mux_lens))
+		sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_render_basic);
+error_render_basic:
+	return ret;
+}
+
+void
+i915_perf_unregister_sysfs_bdw(struct drm_i915_private *dev_priv)
+{
+	const struct i915_oa_reg *mux_regs[ARRAY_SIZE(dev_priv->perf.oa.mux_regs)];
+	int mux_lens[ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens)];
+
+	if (get_render_basic_mux_config(dev_priv, mux_regs, mux_lens))
+		sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_render_basic);
+	if (get_compute_basic_mux_config(dev_priv, mux_regs, mux_lens))
+		sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_compute_basic);
+	if (get_render_pipe_profile_mux_config(dev_priv, mux_regs, mux_lens))
+		sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_render_pipe_profile);
+	if (get_memory_reads_mux_config(dev_priv, mux_regs, mux_lens))
+		sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_memory_reads);
+	if (get_memory_writes_mux_config(dev_priv, mux_regs, mux_lens))
+		sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_memory_writes);
+	if (get_compute_extended_mux_config(dev_priv, mux_regs, mux_lens))
+		sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_compute_extended);
+	if (get_compute_l3_cache_mux_config(dev_priv, mux_regs, mux_lens))
+		sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_compute_l3_cache);
+	if (get_data_port_reads_coalescing_mux_config(dev_priv, mux_regs, mux_lens))
+		sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_data_port_reads_coalescing);
+	if (get_data_port_writes_coalescing_mux_config(dev_priv, mux_regs, mux_lens))
+		sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_data_port_writes_coalescing);
+	if (get_hdc_and_sf_mux_config(dev_priv, mux_regs, mux_lens))
+		sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_hdc_and_sf);
+	if (get_l3_1_mux_config(dev_priv, mux_regs, mux_lens))
+		sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_l3_1);
+	if (get_l3_2_mux_config(dev_priv, mux_regs, mux_lens))
+		sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_l3_2);
+	if (get_l3_3_mux_config(dev_priv, mux_regs, mux_lens))
+		sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_l3_3);
+	if (get_l3_4_mux_config(dev_priv, mux_regs, mux_lens))
+		sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_l3_4);
+	if (get_rasterizer_and_pixel_backend_mux_config(dev_priv, mux_regs, mux_lens))
+		sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_rasterizer_and_pixel_backend);
+	if (get_sampler_1_mux_config(dev_priv, mux_regs, mux_lens))
+		sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_sampler_1);
+	if (get_sampler_2_mux_config(dev_priv, mux_regs, mux_lens))
+		sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_sampler_2);
+	if (get_tdl_1_mux_config(dev_priv, mux_regs, mux_lens))
+		sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_tdl_1);
+	if (get_tdl_2_mux_config(dev_priv, mux_regs, mux_lens))
+		sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_tdl_2);
+	if (get_compute_extra_mux_config(dev_priv, mux_regs, mux_lens))
+		sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_compute_extra);
+	if (get_vme_pipe_mux_config(dev_priv, mux_regs, mux_lens))
+		sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_vme_pipe);
+	if (get_test_oa_mux_config(dev_priv, mux_regs, mux_lens))
+		sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_test_oa);
+}

+ 40 - 0
drivers/gpu/drm/i915/i915_oa_bdw.h

@@ -0,0 +1,40 @@
+/*
+ * Autogenerated file by GPU Top : https://github.com/rib/gputop
+ * DO NOT EDIT manually!
+ *
+ *
+ * Copyright (c) 2015 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __I915_OA_BDW_H__
+#define __I915_OA_BDW_H__
+
+extern int i915_oa_n_builtin_metric_sets_bdw;
+
+extern int i915_oa_select_metric_set_bdw(struct drm_i915_private *dev_priv);
+
+extern int i915_perf_register_sysfs_bdw(struct drm_i915_private *dev_priv);
+
+extern void i915_perf_unregister_sysfs_bdw(struct drm_i915_private *dev_priv);
+
+#endif

+ 2690 - 0
drivers/gpu/drm/i915/i915_oa_bxt.c

@@ -0,0 +1,2690 @@
+/*
+ * Autogenerated file by GPU Top : https://github.com/rib/gputop
+ * DO NOT EDIT manually!
+ *
+ *
+ * Copyright (c) 2015 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#include <linux/sysfs.h>
+
+#include "i915_drv.h"
+#include "i915_oa_bxt.h"
+
+enum metric_set_id {
+	METRIC_SET_ID_RENDER_BASIC = 1,
+	METRIC_SET_ID_COMPUTE_BASIC,
+	METRIC_SET_ID_RENDER_PIPE_PROFILE,
+	METRIC_SET_ID_MEMORY_READS,
+	METRIC_SET_ID_MEMORY_WRITES,
+	METRIC_SET_ID_COMPUTE_EXTENDED,
+	METRIC_SET_ID_COMPUTE_L3_CACHE,
+	METRIC_SET_ID_HDC_AND_SF,
+	METRIC_SET_ID_L3_1,
+	METRIC_SET_ID_RASTERIZER_AND_PIXEL_BACKEND,
+	METRIC_SET_ID_SAMPLER,
+	METRIC_SET_ID_TDL_1,
+	METRIC_SET_ID_TDL_2,
+	METRIC_SET_ID_COMPUTE_EXTRA,
+	METRIC_SET_ID_TEST_OA,
+};
+
+int i915_oa_n_builtin_metric_sets_bxt = 15;
+
+static const struct i915_oa_reg b_counter_config_render_basic[] = {
+	{ _MMIO(0x2710), 0x00000000 },
+	{ _MMIO(0x2714), 0x00800000 },
+	{ _MMIO(0x2720), 0x00000000 },
+	{ _MMIO(0x2724), 0x00800000 },
+	{ _MMIO(0x2740), 0x00000000 },
+};
+
+static const struct i915_oa_reg flex_eu_config_render_basic[] = {
+	{ _MMIO(0xe458), 0x00005004 },
+	{ _MMIO(0xe558), 0x00010003 },
+	{ _MMIO(0xe658), 0x00012011 },
+	{ _MMIO(0xe758), 0x00015014 },
+	{ _MMIO(0xe45c), 0x00051050 },
+	{ _MMIO(0xe55c), 0x00053052 },
+	{ _MMIO(0xe65c), 0x00055054 },
+};
+
+static const struct i915_oa_reg mux_config_render_basic_0_sku_gte_0x03[] = {
+	{ _MMIO(0x9888), 0x166c00f0 },
+	{ _MMIO(0x9888), 0x12120280 },
+	{ _MMIO(0x9888), 0x12320280 },
+	{ _MMIO(0x9888), 0x11930317 },
+	{ _MMIO(0x9888), 0x159303df },
+	{ _MMIO(0x9888), 0x3f900c00 },
+	{ _MMIO(0x9888), 0x419000a0 },
+	{ _MMIO(0x9888), 0x002d1000 },
+	{ _MMIO(0x9888), 0x062d4000 },
+	{ _MMIO(0x9888), 0x082d5000 },
+	{ _MMIO(0x9888), 0x0a2d1000 },
+	{ _MMIO(0x9888), 0x0c2e0800 },
+	{ _MMIO(0x9888), 0x0e2e5900 },
+	{ _MMIO(0x9888), 0x0a4c8000 },
+	{ _MMIO(0x9888), 0x0c4c8000 },
+	{ _MMIO(0x9888), 0x0e4c4000 },
+	{ _MMIO(0x9888), 0x064e8000 },
+	{ _MMIO(0x9888), 0x084e8000 },
+	{ _MMIO(0x9888), 0x0a4e2000 },
+	{ _MMIO(0x9888), 0x1c4f0010 },
+	{ _MMIO(0x9888), 0x0a6c0053 },
+	{ _MMIO(0x9888), 0x106c0000 },
+	{ _MMIO(0x9888), 0x1c6c0000 },
+	{ _MMIO(0x9888), 0x1a0fcc00 },
+	{ _MMIO(0x9888), 0x1c0f0002 },
+	{ _MMIO(0x9888), 0x1c2c0040 },
+	{ _MMIO(0x9888), 0x00101000 },
+	{ _MMIO(0x9888), 0x04101000 },
+	{ _MMIO(0x9888), 0x00114000 },
+	{ _MMIO(0x9888), 0x08114000 },
+	{ _MMIO(0x9888), 0x00120020 },
+	{ _MMIO(0x9888), 0x08120021 },
+	{ _MMIO(0x9888), 0x00141000 },
+	{ _MMIO(0x9888), 0x08141000 },
+	{ _MMIO(0x9888), 0x02308000 },
+	{ _MMIO(0x9888), 0x04302000 },
+	{ _MMIO(0x9888), 0x06318000 },
+	{ _MMIO(0x9888), 0x08318000 },
+	{ _MMIO(0x9888), 0x06320800 },
+	{ _MMIO(0x9888), 0x08320840 },
+	{ _MMIO(0x9888), 0x00320000 },
+	{ _MMIO(0x9888), 0x06344000 },
+	{ _MMIO(0x9888), 0x08344000 },
+	{ _MMIO(0x9888), 0x0d931831 },
+	{ _MMIO(0x9888), 0x0f939f3f },
+	{ _MMIO(0x9888), 0x01939e80 },
+	{ _MMIO(0x9888), 0x039303bc },
+	{ _MMIO(0x9888), 0x0593000e },
+	{ _MMIO(0x9888), 0x1993002a },
+	{ _MMIO(0x9888), 0x07930000 },
+	{ _MMIO(0x9888), 0x09930000 },
+	{ _MMIO(0x9888), 0x1d900177 },
+	{ _MMIO(0x9888), 0x1f900187 },
+	{ _MMIO(0x9888), 0x35900000 },
+	{ _MMIO(0x9888), 0x13904000 },
+	{ _MMIO(0x9888), 0x21904000 },
+	{ _MMIO(0x9888), 0x23904000 },
+	{ _MMIO(0x9888), 0x25904000 },
+	{ _MMIO(0x9888), 0x27904000 },
+	{ _MMIO(0x9888), 0x2b904000 },
+	{ _MMIO(0x9888), 0x2d904000 },
+	{ _MMIO(0x9888), 0x2f904000 },
+	{ _MMIO(0x9888), 0x31904000 },
+	{ _MMIO(0x9888), 0x15904000 },
+	{ _MMIO(0x9888), 0x17904000 },
+	{ _MMIO(0x9888), 0x19904000 },
+	{ _MMIO(0x9888), 0x1b904000 },
+	{ _MMIO(0x9888), 0x53901110 },
+	{ _MMIO(0x9888), 0x43900423 },
+	{ _MMIO(0x9888), 0x55900111 },
+	{ _MMIO(0x9888), 0x47900c02 },
+	{ _MMIO(0x9888), 0x57900000 },
+	{ _MMIO(0x9888), 0x49900020 },
+	{ _MMIO(0x9888), 0x59901111 },
+	{ _MMIO(0x9888), 0x4b900421 },
+	{ _MMIO(0x9888), 0x37900000 },
+	{ _MMIO(0x9888), 0x33900000 },
+	{ _MMIO(0x9888), 0x4d900001 },
+	{ _MMIO(0x9888), 0x45900821 },
+};
+
+static int
+get_render_basic_mux_config(struct drm_i915_private *dev_priv,
+			    const struct i915_oa_reg **regs,
+			    int *lens)
+{
+	int n = 0;
+
+	BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
+	BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
+
+	if (dev_priv->drm.pdev->revision >= 0x03) {
+		regs[n] = mux_config_render_basic_0_sku_gte_0x03;
+		lens[n] = ARRAY_SIZE(mux_config_render_basic_0_sku_gte_0x03);
+		n++;
+	}
+
+	return n;
+}
+
+static const struct i915_oa_reg b_counter_config_compute_basic[] = {
+	{ _MMIO(0x2710), 0x00000000 },
+	{ _MMIO(0x2714), 0x00800000 },
+	{ _MMIO(0x2720), 0x00000000 },
+	{ _MMIO(0x2724), 0x00800000 },
+	{ _MMIO(0x2740), 0x00000000 },
+};
+
+static const struct i915_oa_reg flex_eu_config_compute_basic[] = {
+	{ _MMIO(0xe458), 0x00005004 },
+	{ _MMIO(0xe558), 0x00000003 },
+	{ _MMIO(0xe658), 0x00002001 },
+	{ _MMIO(0xe758), 0x00778008 },
+	{ _MMIO(0xe45c), 0x00088078 },
+	{ _MMIO(0xe55c), 0x00808708 },
+	{ _MMIO(0xe65c), 0x00a08908 },
+};
+
+static const struct i915_oa_reg mux_config_compute_basic[] = {
+	{ _MMIO(0x9888), 0x104f00e0 },
+	{ _MMIO(0x9888), 0x124f1c00 },
+	{ _MMIO(0x9888), 0x39900340 },
+	{ _MMIO(0x9888), 0x3f900c00 },
+	{ _MMIO(0x9888), 0x41900000 },
+	{ _MMIO(0x9888), 0x002d5000 },
+	{ _MMIO(0x9888), 0x062d4000 },
+	{ _MMIO(0x9888), 0x082d4000 },
+	{ _MMIO(0x9888), 0x0a2d1000 },
+	{ _MMIO(0x9888), 0x0c2d5000 },
+	{ _MMIO(0x9888), 0x0e2d4000 },
+	{ _MMIO(0x9888), 0x0c2e1400 },
+	{ _MMIO(0x9888), 0x0e2e5100 },
+	{ _MMIO(0x9888), 0x102e0114 },
+	{ _MMIO(0x9888), 0x044cc000 },
+	{ _MMIO(0x9888), 0x0a4c8000 },
+	{ _MMIO(0x9888), 0x0c4c8000 },
+	{ _MMIO(0x9888), 0x0e4c4000 },
+	{ _MMIO(0x9888), 0x104c8000 },
+	{ _MMIO(0x9888), 0x124c8000 },
+	{ _MMIO(0x9888), 0x164c2000 },
+	{ _MMIO(0x9888), 0x004ea000 },
+	{ _MMIO(0x9888), 0x064e8000 },
+	{ _MMIO(0x9888), 0x084e8000 },
+	{ _MMIO(0x9888), 0x0a4e2000 },
+	{ _MMIO(0x9888), 0x0c4ea000 },
+	{ _MMIO(0x9888), 0x0e4e8000 },
+	{ _MMIO(0x9888), 0x004f6b42 },
+	{ _MMIO(0x9888), 0x064f6200 },
+	{ _MMIO(0x9888), 0x084f4100 },
+	{ _MMIO(0x9888), 0x0a4f0061 },
+	{ _MMIO(0x9888), 0x0c4f6c4c },
+	{ _MMIO(0x9888), 0x0e4f4b00 },
+	{ _MMIO(0x9888), 0x1a4f0000 },
+	{ _MMIO(0x9888), 0x1c4f0000 },
+	{ _MMIO(0x9888), 0x180f5000 },
+	{ _MMIO(0x9888), 0x1a0f8800 },
+	{ _MMIO(0x9888), 0x1c0f08a2 },
+	{ _MMIO(0x9888), 0x182c4000 },
+	{ _MMIO(0x9888), 0x1c2c1451 },
+	{ _MMIO(0x9888), 0x1e2c0001 },
+	{ _MMIO(0x9888), 0x1a2c0010 },
+	{ _MMIO(0x9888), 0x01938000 },
+	{ _MMIO(0x9888), 0x0f938000 },
+	{ _MMIO(0x9888), 0x19938a28 },
+	{ _MMIO(0x9888), 0x03938000 },
+	{ _MMIO(0x9888), 0x19900177 },
+	{ _MMIO(0x9888), 0x1b900178 },
+	{ _MMIO(0x9888), 0x1d900125 },
+	{ _MMIO(0x9888), 0x1f900123 },
+	{ _MMIO(0x9888), 0x35900000 },
+	{ _MMIO(0x9888), 0x13904000 },
+	{ _MMIO(0x9888), 0x21904000 },
+	{ _MMIO(0x9888), 0x25904000 },
+	{ _MMIO(0x9888), 0x27904000 },
+	{ _MMIO(0x9888), 0x2b904000 },
+	{ _MMIO(0x9888), 0x2d904000 },
+	{ _MMIO(0x9888), 0x31904000 },
+	{ _MMIO(0x9888), 0x15904000 },
+	{ _MMIO(0x9888), 0x53901000 },
+	{ _MMIO(0x9888), 0x43900000 },
+	{ _MMIO(0x9888), 0x55900111 },
+	{ _MMIO(0x9888), 0x47900000 },
+	{ _MMIO(0x9888), 0x57900000 },
+	{ _MMIO(0x9888), 0x49900000 },
+	{ _MMIO(0x9888), 0x59900000 },
+	{ _MMIO(0x9888), 0x4b900000 },
+	{ _MMIO(0x9888), 0x37900000 },
+	{ _MMIO(0x9888), 0x33900000 },
+	{ _MMIO(0x9888), 0x4d900000 },
+	{ _MMIO(0x9888), 0x45900000 },
+};
+
+static int
+get_compute_basic_mux_config(struct drm_i915_private *dev_priv,
+			     const struct i915_oa_reg **regs,
+			     int *lens)
+{
+	int n = 0;
+
+	BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
+	BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
+
+	regs[n] = mux_config_compute_basic;
+	lens[n] = ARRAY_SIZE(mux_config_compute_basic);
+	n++;
+
+	return n;
+}
+
+static const struct i915_oa_reg b_counter_config_render_pipe_profile[] = {
+	{ _MMIO(0x2724), 0xf0800000 },
+	{ _MMIO(0x2720), 0x00000000 },
+	{ _MMIO(0x2714), 0xf0800000 },
+	{ _MMIO(0x2710), 0x00000000 },
+	{ _MMIO(0x2740), 0x00000000 },
+	{ _MMIO(0x2770), 0x0007ffea },
+	{ _MMIO(0x2774), 0x00007ffc },
+	{ _MMIO(0x2778), 0x0007affa },
+	{ _MMIO(0x277c), 0x0000f5fd },
+	{ _MMIO(0x2780), 0x00079ffa },
+	{ _MMIO(0x2784), 0x0000f3fb },
+	{ _MMIO(0x2788), 0x0007bf7a },
+	{ _MMIO(0x278c), 0x0000f7e7 },
+	{ _MMIO(0x2790), 0x0007fefa },
+	{ _MMIO(0x2794), 0x0000f7cf },
+	{ _MMIO(0x2798), 0x00077ffa },
+	{ _MMIO(0x279c), 0x0000efdf },
+	{ _MMIO(0x27a0), 0x0006fffa },
+	{ _MMIO(0x27a4), 0x0000cfbf },
+	{ _MMIO(0x27a8), 0x0003fffa },
+	{ _MMIO(0x27ac), 0x00005f7f },
+};
+
+static const struct i915_oa_reg flex_eu_config_render_pipe_profile[] = {
+	{ _MMIO(0xe458), 0x00005004 },
+	{ _MMIO(0xe558), 0x00015014 },
+	{ _MMIO(0xe658), 0x00025024 },
+	{ _MMIO(0xe758), 0x00035034 },
+	{ _MMIO(0xe45c), 0x00045044 },
+	{ _MMIO(0xe55c), 0x00055054 },
+	{ _MMIO(0xe65c), 0x00065064 },
+};
+
+static const struct i915_oa_reg mux_config_render_pipe_profile[] = {
+	{ _MMIO(0x9888), 0x0c2e001f },
+	{ _MMIO(0x9888), 0x0a2f0000 },
+	{ _MMIO(0x9888), 0x10186800 },
+	{ _MMIO(0x9888), 0x11810019 },
+	{ _MMIO(0x9888), 0x15810013 },
+	{ _MMIO(0x9888), 0x13820020 },
+	{ _MMIO(0x9888), 0x11830020 },
+	{ _MMIO(0x9888), 0x17840000 },
+	{ _MMIO(0x9888), 0x11860007 },
+	{ _MMIO(0x9888), 0x21860000 },
+	{ _MMIO(0x9888), 0x178703e0 },
+	{ _MMIO(0x9888), 0x0c2d8000 },
+	{ _MMIO(0x9888), 0x042d4000 },
+	{ _MMIO(0x9888), 0x062d1000 },
+	{ _MMIO(0x9888), 0x022e5400 },
+	{ _MMIO(0x9888), 0x002e0000 },
+	{ _MMIO(0x9888), 0x0e2e0080 },
+	{ _MMIO(0x9888), 0x082f0040 },
+	{ _MMIO(0x9888), 0x002f0000 },
+	{ _MMIO(0x9888), 0x06143000 },
+	{ _MMIO(0x9888), 0x06174000 },
+	{ _MMIO(0x9888), 0x06180012 },
+	{ _MMIO(0x9888), 0x00180000 },
+	{ _MMIO(0x9888), 0x0d804000 },
+	{ _MMIO(0x9888), 0x0f804000 },
+	{ _MMIO(0x9888), 0x05804000 },
+	{ _MMIO(0x9888), 0x09810200 },
+	{ _MMIO(0x9888), 0x0b810030 },
+	{ _MMIO(0x9888), 0x03810003 },
+	{ _MMIO(0x9888), 0x21819140 },
+	{ _MMIO(0x9888), 0x23819050 },
+	{ _MMIO(0x9888), 0x25810018 },
+	{ _MMIO(0x9888), 0x0b820980 },
+	{ _MMIO(0x9888), 0x03820d80 },
+	{ _MMIO(0x9888), 0x11820000 },
+	{ _MMIO(0x9888), 0x0182c000 },
+	{ _MMIO(0x9888), 0x07828000 },
+	{ _MMIO(0x9888), 0x09824000 },
+	{ _MMIO(0x9888), 0x0f828000 },
+	{ _MMIO(0x9888), 0x0d830004 },
+	{ _MMIO(0x9888), 0x0583000c },
+	{ _MMIO(0x9888), 0x0f831000 },
+	{ _MMIO(0x9888), 0x01848072 },
+	{ _MMIO(0x9888), 0x11840000 },
+	{ _MMIO(0x9888), 0x07848000 },
+	{ _MMIO(0x9888), 0x09844000 },
+	{ _MMIO(0x9888), 0x0f848000 },
+	{ _MMIO(0x9888), 0x07860000 },
+	{ _MMIO(0x9888), 0x09860092 },
+	{ _MMIO(0x9888), 0x0f860400 },
+	{ _MMIO(0x9888), 0x01869100 },
+	{ _MMIO(0x9888), 0x0f870065 },
+	{ _MMIO(0x9888), 0x01870000 },
+	{ _MMIO(0x9888), 0x19930800 },
+	{ _MMIO(0x9888), 0x0b938000 },
+	{ _MMIO(0x9888), 0x0d938000 },
+	{ _MMIO(0x9888), 0x1b952000 },
+	{ _MMIO(0x9888), 0x1d955055 },
+	{ _MMIO(0x9888), 0x1f951455 },
+	{ _MMIO(0x9888), 0x0992a000 },
+	{ _MMIO(0x9888), 0x0f928000 },
+	{ _MMIO(0x9888), 0x1192a800 },
+	{ _MMIO(0x9888), 0x1392028a },
+	{ _MMIO(0x9888), 0x0b92a000 },
+	{ _MMIO(0x9888), 0x0d922000 },
+	{ _MMIO(0x9888), 0x13908000 },
+	{ _MMIO(0x9888), 0x21908000 },
+	{ _MMIO(0x9888), 0x23908000 },
+	{ _MMIO(0x9888), 0x25908000 },
+	{ _MMIO(0x9888), 0x27908000 },
+	{ _MMIO(0x9888), 0x29908000 },
+	{ _MMIO(0x9888), 0x2b908000 },
+	{ _MMIO(0x9888), 0x2d904000 },
+	{ _MMIO(0x9888), 0x2f908000 },
+	{ _MMIO(0x9888), 0x31908000 },
+	{ _MMIO(0x9888), 0x15908000 },
+	{ _MMIO(0x9888), 0x17908000 },
+	{ _MMIO(0x9888), 0x19908000 },
+	{ _MMIO(0x9888), 0x1b908000 },
+	{ _MMIO(0x9888), 0x1d904000 },
+	{ _MMIO(0x9888), 0x1f904000 },
+	{ _MMIO(0x9888), 0x53900000 },
+	{ _MMIO(0x9888), 0x43900c01 },
+	{ _MMIO(0x9888), 0x55900000 },
+	{ _MMIO(0x9888), 0x47900000 },
+	{ _MMIO(0x9888), 0x57900000 },
+	{ _MMIO(0x9888), 0x49900863 },
+	{ _MMIO(0x9888), 0x59900000 },
+	{ _MMIO(0x9888), 0x4b900061 },
+	{ _MMIO(0x9888), 0x37900000 },
+	{ _MMIO(0x9888), 0x33900000 },
+	{ _MMIO(0x9888), 0x4d900000 },
+	{ _MMIO(0x9888), 0x45900c22 },
+};
+
+static int
+get_render_pipe_profile_mux_config(struct drm_i915_private *dev_priv,
+				   const struct i915_oa_reg **regs,
+				   int *lens)
+{
+	int n = 0;
+
+	BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
+	BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
+
+	regs[n] = mux_config_render_pipe_profile;
+	lens[n] = ARRAY_SIZE(mux_config_render_pipe_profile);
+	n++;
+
+	return n;
+}
+
+static const struct i915_oa_reg b_counter_config_memory_reads[] = {
+	{ _MMIO(0x272c), 0xffffffff },
+	{ _MMIO(0x2728), 0xffffffff },
+	{ _MMIO(0x2724), 0xf0800000 },
+	{ _MMIO(0x2720), 0x00000000 },
+	{ _MMIO(0x271c), 0xffffffff },
+	{ _MMIO(0x2718), 0xffffffff },
+	{ _MMIO(0x2714), 0xf0800000 },
+	{ _MMIO(0x2710), 0x00000000 },
+	{ _MMIO(0x274c), 0x86543210 },
+	{ _MMIO(0x2748), 0x86543210 },
+	{ _MMIO(0x2744), 0x00006667 },
+	{ _MMIO(0x2740), 0x00000000 },
+	{ _MMIO(0x275c), 0x86543210 },
+	{ _MMIO(0x2758), 0x86543210 },
+	{ _MMIO(0x2754), 0x00006465 },
+	{ _MMIO(0x2750), 0x00000000 },
+	{ _MMIO(0x2770), 0x0007f81a },
+	{ _MMIO(0x2774), 0x0000fe00 },
+	{ _MMIO(0x2778), 0x0007f82a },
+	{ _MMIO(0x277c), 0x0000fe00 },
+	{ _MMIO(0x2780), 0x0007f872 },
+	{ _MMIO(0x2784), 0x0000fe00 },
+	{ _MMIO(0x2788), 0x0007f8ba },
+	{ _MMIO(0x278c), 0x0000fe00 },
+	{ _MMIO(0x2790), 0x0007f87a },
+	{ _MMIO(0x2794), 0x0000fe00 },
+	{ _MMIO(0x2798), 0x0007f8ea },
+	{ _MMIO(0x279c), 0x0000fe00 },
+	{ _MMIO(0x27a0), 0x0007f8e2 },
+	{ _MMIO(0x27a4), 0x0000fe00 },
+	{ _MMIO(0x27a8), 0x0007f8f2 },
+	{ _MMIO(0x27ac), 0x0000fe00 },
+};
+
+static const struct i915_oa_reg flex_eu_config_memory_reads[] = {
+	{ _MMIO(0xe458), 0x00005004 },
+	{ _MMIO(0xe558), 0x00015014 },
+	{ _MMIO(0xe658), 0x00025024 },
+	{ _MMIO(0xe758), 0x00035034 },
+	{ _MMIO(0xe45c), 0x00045044 },
+	{ _MMIO(0xe55c), 0x00055054 },
+	{ _MMIO(0xe65c), 0x00065064 },
+};
+
+static const struct i915_oa_reg mux_config_memory_reads[] = {
+	{ _MMIO(0x9888), 0x19800343 },
+	{ _MMIO(0x9888), 0x39900340 },
+	{ _MMIO(0x9888), 0x3f901000 },
+	{ _MMIO(0x9888), 0x41900003 },
+	{ _MMIO(0x9888), 0x03803180 },
+	{ _MMIO(0x9888), 0x058035e2 },
+	{ _MMIO(0x9888), 0x0780006a },
+	{ _MMIO(0x9888), 0x11800000 },
+	{ _MMIO(0x9888), 0x2181a000 },
+	{ _MMIO(0x9888), 0x2381000a },
+	{ _MMIO(0x9888), 0x1d950550 },
+	{ _MMIO(0x9888), 0x0b928000 },
+	{ _MMIO(0x9888), 0x0d92a000 },
+	{ _MMIO(0x9888), 0x0f922000 },
+	{ _MMIO(0x9888), 0x13900170 },
+	{ _MMIO(0x9888), 0x21900171 },
+	{ _MMIO(0x9888), 0x23900172 },
+	{ _MMIO(0x9888), 0x25900173 },
+	{ _MMIO(0x9888), 0x27900174 },
+	{ _MMIO(0x9888), 0x29900175 },
+	{ _MMIO(0x9888), 0x2b900176 },
+	{ _MMIO(0x9888), 0x2d900177 },
+	{ _MMIO(0x9888), 0x2f90017f },
+	{ _MMIO(0x9888), 0x31900125 },
+	{ _MMIO(0x9888), 0x15900123 },
+	{ _MMIO(0x9888), 0x17900121 },
+	{ _MMIO(0x9888), 0x35900000 },
+	{ _MMIO(0x9888), 0x19908000 },
+	{ _MMIO(0x9888), 0x1b908000 },
+	{ _MMIO(0x9888), 0x1d908000 },
+	{ _MMIO(0x9888), 0x1f908000 },
+	{ _MMIO(0x9888), 0x53900000 },
+	{ _MMIO(0x9888), 0x43901084 },
+	{ _MMIO(0x9888), 0x55900000 },
+	{ _MMIO(0x9888), 0x47901080 },
+	{ _MMIO(0x9888), 0x57900000 },
+	{ _MMIO(0x9888), 0x49901084 },
+	{ _MMIO(0x9888), 0x59900000 },
+	{ _MMIO(0x9888), 0x4b901084 },
+	{ _MMIO(0x9888), 0x37900000 },
+	{ _MMIO(0x9888), 0x33900000 },
+	{ _MMIO(0x9888), 0x4d900004 },
+	{ _MMIO(0x9888), 0x45900000 },
+};
+
+static int
+get_memory_reads_mux_config(struct drm_i915_private *dev_priv,
+			    const struct i915_oa_reg **regs,
+			    int *lens)
+{
+	int n = 0;
+
+	BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
+	BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
+
+	regs[n] = mux_config_memory_reads;
+	lens[n] = ARRAY_SIZE(mux_config_memory_reads);
+	n++;
+
+	return n;
+}
+
+static const struct i915_oa_reg b_counter_config_memory_writes[] = {
+	{ _MMIO(0x272c), 0xffffffff },
+	{ _MMIO(0x2728), 0xffffffff },
+	{ _MMIO(0x2724), 0xf0800000 },
+	{ _MMIO(0x2720), 0x00000000 },
+	{ _MMIO(0x271c), 0xffffffff },
+	{ _MMIO(0x2718), 0xffffffff },
+	{ _MMIO(0x2714), 0xf0800000 },
+	{ _MMIO(0x2710), 0x00000000 },
+	{ _MMIO(0x274c), 0x86543210 },
+	{ _MMIO(0x2748), 0x86543210 },
+	{ _MMIO(0x2744), 0x00006667 },
+	{ _MMIO(0x2740), 0x00000000 },
+	{ _MMIO(0x275c), 0x86543210 },
+	{ _MMIO(0x2758), 0x86543210 },
+	{ _MMIO(0x2754), 0x00006465 },
+	{ _MMIO(0x2750), 0x00000000 },
+	{ _MMIO(0x2770), 0x0007f81a },
+	{ _MMIO(0x2774), 0x0000fe00 },
+	{ _MMIO(0x2778), 0x0007f82a },
+	{ _MMIO(0x277c), 0x0000fe00 },
+	{ _MMIO(0x2780), 0x0007f822 },
+	{ _MMIO(0x2784), 0x0000fe00 },
+	{ _MMIO(0x2788), 0x0007f8ba },
+	{ _MMIO(0x278c), 0x0000fe00 },
+	{ _MMIO(0x2790), 0x0007f87a },
+	{ _MMIO(0x2794), 0x0000fe00 },
+	{ _MMIO(0x2798), 0x0007f8ea },
+	{ _MMIO(0x279c), 0x0000fe00 },
+	{ _MMIO(0x27a0), 0x0007f8e2 },
+	{ _MMIO(0x27a4), 0x0000fe00 },
+	{ _MMIO(0x27a8), 0x0007f8f2 },
+	{ _MMIO(0x27ac), 0x0000fe00 },
+};
+
+static const struct i915_oa_reg flex_eu_config_memory_writes[] = {
+	{ _MMIO(0xe458), 0x00005004 },
+	{ _MMIO(0xe558), 0x00015014 },
+	{ _MMIO(0xe658), 0x00025024 },
+	{ _MMIO(0xe758), 0x00035034 },
+	{ _MMIO(0xe45c), 0x00045044 },
+	{ _MMIO(0xe55c), 0x00055054 },
+	{ _MMIO(0xe65c), 0x00065064 },
+};
+
+static const struct i915_oa_reg mux_config_memory_writes[] = {
+	{ _MMIO(0x9888), 0x19800343 },
+	{ _MMIO(0x9888), 0x39900340 },
+	{ _MMIO(0x9888), 0x3f900000 },
+	{ _MMIO(0x9888), 0x41900080 },
+	{ _MMIO(0x9888), 0x03803180 },
+	{ _MMIO(0x9888), 0x058035e2 },
+	{ _MMIO(0x9888), 0x0780006a },
+	{ _MMIO(0x9888), 0x11800000 },
+	{ _MMIO(0x9888), 0x2181a000 },
+	{ _MMIO(0x9888), 0x2381000a },
+	{ _MMIO(0x9888), 0x1d950550 },
+	{ _MMIO(0x9888), 0x0b928000 },
+	{ _MMIO(0x9888), 0x0d92a000 },
+	{ _MMIO(0x9888), 0x0f922000 },
+	{ _MMIO(0x9888), 0x13900180 },
+	{ _MMIO(0x9888), 0x21900181 },
+	{ _MMIO(0x9888), 0x23900182 },
+	{ _MMIO(0x9888), 0x25900183 },
+	{ _MMIO(0x9888), 0x27900184 },
+	{ _MMIO(0x9888), 0x29900185 },
+	{ _MMIO(0x9888), 0x2b900186 },
+	{ _MMIO(0x9888), 0x2d900187 },
+	{ _MMIO(0x9888), 0x2f900170 },
+	{ _MMIO(0x9888), 0x31900125 },
+	{ _MMIO(0x9888), 0x15900123 },
+	{ _MMIO(0x9888), 0x17900121 },
+	{ _MMIO(0x9888), 0x35900000 },
+	{ _MMIO(0x9888), 0x19908000 },
+	{ _MMIO(0x9888), 0x1b908000 },
+	{ _MMIO(0x9888), 0x1d908000 },
+	{ _MMIO(0x9888), 0x1f908000 },
+	{ _MMIO(0x9888), 0x53900000 },
+	{ _MMIO(0x9888), 0x43901084 },
+	{ _MMIO(0x9888), 0x55900000 },
+	{ _MMIO(0x9888), 0x47901080 },
+	{ _MMIO(0x9888), 0x57900000 },
+	{ _MMIO(0x9888), 0x49901084 },
+	{ _MMIO(0x9888), 0x59900000 },
+	{ _MMIO(0x9888), 0x4b901084 },
+	{ _MMIO(0x9888), 0x37900000 },
+	{ _MMIO(0x9888), 0x33900000 },
+	{ _MMIO(0x9888), 0x4d900004 },
+	{ _MMIO(0x9888), 0x45900000 },
+};
+
+static int
+get_memory_writes_mux_config(struct drm_i915_private *dev_priv,
+			     const struct i915_oa_reg **regs,
+			     int *lens)
+{
+	int n = 0;
+
+	BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
+	BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
+
+	regs[n] = mux_config_memory_writes;
+	lens[n] = ARRAY_SIZE(mux_config_memory_writes);
+	n++;
+
+	return n;
+}
+
+static const struct i915_oa_reg b_counter_config_compute_extended[] = {
+	{ _MMIO(0x2724), 0xf0800000 },
+	{ _MMIO(0x2720), 0x00000000 },
+	{ _MMIO(0x2714), 0xf0800000 },
+	{ _MMIO(0x2710), 0x00000000 },
+	{ _MMIO(0x2740), 0x00000000 },
+	{ _MMIO(0x2770), 0x0007fc2a },
+	{ _MMIO(0x2774), 0x0000bf00 },
+	{ _MMIO(0x2778), 0x0007fc6a },
+	{ _MMIO(0x277c), 0x0000bf00 },
+	{ _MMIO(0x2780), 0x0007fc92 },
+	{ _MMIO(0x2784), 0x0000bf00 },
+	{ _MMIO(0x2788), 0x0007fca2 },
+	{ _MMIO(0x278c), 0x0000bf00 },
+	{ _MMIO(0x2790), 0x0007fc32 },
+	{ _MMIO(0x2794), 0x0000bf00 },
+	{ _MMIO(0x2798), 0x0007fc9a },
+	{ _MMIO(0x279c), 0x0000bf00 },
+	{ _MMIO(0x27a0), 0x0007fe6a },
+	{ _MMIO(0x27a4), 0x0000bf00 },
+	{ _MMIO(0x27a8), 0x0007fe7a },
+	{ _MMIO(0x27ac), 0x0000bf00 },
+};
+
+static const struct i915_oa_reg flex_eu_config_compute_extended[] = {
+	{ _MMIO(0xe458), 0x00005004 },
+	{ _MMIO(0xe558), 0x00000003 },
+	{ _MMIO(0xe658), 0x00002001 },
+	{ _MMIO(0xe758), 0x00778008 },
+	{ _MMIO(0xe45c), 0x00088078 },
+	{ _MMIO(0xe55c), 0x00808708 },
+	{ _MMIO(0xe65c), 0x00a08908 },
+};
+
+static const struct i915_oa_reg mux_config_compute_extended[] = {
+	{ _MMIO(0x9888), 0x104f00e0 },
+	{ _MMIO(0x9888), 0x141c0160 },
+	{ _MMIO(0x9888), 0x161c0015 },
+	{ _MMIO(0x9888), 0x181c0120 },
+	{ _MMIO(0x9888), 0x002d5000 },
+	{ _MMIO(0x9888), 0x062d4000 },
+	{ _MMIO(0x9888), 0x082d5000 },
+	{ _MMIO(0x9888), 0x0a2d5000 },
+	{ _MMIO(0x9888), 0x0c2d5000 },
+	{ _MMIO(0x9888), 0x0e2d5000 },
+	{ _MMIO(0x9888), 0x022d5000 },
+	{ _MMIO(0x9888), 0x042d5000 },
+	{ _MMIO(0x9888), 0x0c2e5400 },
+	{ _MMIO(0x9888), 0x0e2e5515 },
+	{ _MMIO(0x9888), 0x102e0155 },
+	{ _MMIO(0x9888), 0x044cc000 },
+	{ _MMIO(0x9888), 0x0a4c8000 },
+	{ _MMIO(0x9888), 0x0c4cc000 },
+	{ _MMIO(0x9888), 0x0e4cc000 },
+	{ _MMIO(0x9888), 0x104c8000 },
+	{ _MMIO(0x9888), 0x124c8000 },
+	{ _MMIO(0x9888), 0x144c8000 },
+	{ _MMIO(0x9888), 0x164c2000 },
+	{ _MMIO(0x9888), 0x064cc000 },
+	{ _MMIO(0x9888), 0x084cc000 },
+	{ _MMIO(0x9888), 0x004ea000 },
+	{ _MMIO(0x9888), 0x064e8000 },
+	{ _MMIO(0x9888), 0x084ea000 },
+	{ _MMIO(0x9888), 0x0a4ea000 },
+	{ _MMIO(0x9888), 0x0c4ea000 },
+	{ _MMIO(0x9888), 0x0e4ea000 },
+	{ _MMIO(0x9888), 0x024ea000 },
+	{ _MMIO(0x9888), 0x044ea000 },
+	{ _MMIO(0x9888), 0x0e4f4b41 },
+	{ _MMIO(0x9888), 0x004f4200 },
+	{ _MMIO(0x9888), 0x024f404c },
+	{ _MMIO(0x9888), 0x1c4f0000 },
+	{ _MMIO(0x9888), 0x1a4f0000 },
+	{ _MMIO(0x9888), 0x001b4000 },
+	{ _MMIO(0x9888), 0x061b8000 },
+	{ _MMIO(0x9888), 0x081bc000 },
+	{ _MMIO(0x9888), 0x0a1bc000 },
+	{ _MMIO(0x9888), 0x0c1bc000 },
+	{ _MMIO(0x9888), 0x041bc000 },
+	{ _MMIO(0x9888), 0x001c0031 },
+	{ _MMIO(0x9888), 0x061c1900 },
+	{ _MMIO(0x9888), 0x081c1a33 },
+	{ _MMIO(0x9888), 0x0a1c1b35 },
+	{ _MMIO(0x9888), 0x0c1c3337 },
+	{ _MMIO(0x9888), 0x041c31c7 },
+	{ _MMIO(0x9888), 0x180f5000 },
+	{ _MMIO(0x9888), 0x1a0fa8aa },
+	{ _MMIO(0x9888), 0x1c0f0aaa },
+	{ _MMIO(0x9888), 0x182c8000 },
+	{ _MMIO(0x9888), 0x1c2c6aaa },
+	{ _MMIO(0x9888), 0x1e2c0001 },
+	{ _MMIO(0x9888), 0x1a2c2950 },
+	{ _MMIO(0x9888), 0x01938000 },
+	{ _MMIO(0x9888), 0x0f938000 },
+	{ _MMIO(0x9888), 0x1993aaaa },
+	{ _MMIO(0x9888), 0x03938000 },
+	{ _MMIO(0x9888), 0x05938000 },
+	{ _MMIO(0x9888), 0x07938000 },
+	{ _MMIO(0x9888), 0x09938000 },
+	{ _MMIO(0x9888), 0x0b938000 },
+	{ _MMIO(0x9888), 0x13904000 },
+	{ _MMIO(0x9888), 0x21904000 },
+	{ _MMIO(0x9888), 0x23904000 },
+	{ _MMIO(0x9888), 0x25904000 },
+	{ _MMIO(0x9888), 0x27904000 },
+	{ _MMIO(0x9888), 0x29904000 },
+	{ _MMIO(0x9888), 0x2b904000 },
+	{ _MMIO(0x9888), 0x2d904000 },
+	{ _MMIO(0x9888), 0x2f904000 },
+	{ _MMIO(0x9888), 0x31904000 },
+	{ _MMIO(0x9888), 0x15904000 },
+	{ _MMIO(0x9888), 0x17904000 },
+	{ _MMIO(0x9888), 0x19904000 },
+	{ _MMIO(0x9888), 0x1b904000 },
+	{ _MMIO(0x9888), 0x1d904000 },
+	{ _MMIO(0x9888), 0x53900000 },
+	{ _MMIO(0x9888), 0x43900420 },
+	{ _MMIO(0x9888), 0x55900000 },
+	{ _MMIO(0x9888), 0x47900000 },
+	{ _MMIO(0x9888), 0x57900000 },
+	{ _MMIO(0x9888), 0x49900000 },
+	{ _MMIO(0x9888), 0x59900000 },
+	{ _MMIO(0x9888), 0x4b900400 },
+	{ _MMIO(0x9888), 0x37900000 },
+	{ _MMIO(0x9888), 0x33900000 },
+	{ _MMIO(0x9888), 0x4d900001 },
+	{ _MMIO(0x9888), 0x45900001 },
+};
+
+static int
+get_compute_extended_mux_config(struct drm_i915_private *dev_priv,
+				const struct i915_oa_reg **regs,
+				int *lens)
+{
+	int n = 0;
+
+	BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
+	BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
+
+	regs[n] = mux_config_compute_extended;
+	lens[n] = ARRAY_SIZE(mux_config_compute_extended);
+	n++;
+
+	return n;
+}
+
+static const struct i915_oa_reg b_counter_config_compute_l3_cache[] = {
+	{ _MMIO(0x2710), 0x00000000 },
+	{ _MMIO(0x2714), 0x30800000 },
+	{ _MMIO(0x2720), 0x00000000 },
+	{ _MMIO(0x2724), 0x30800000 },
+	{ _MMIO(0x2740), 0x00000000 },
+	{ _MMIO(0x2770), 0x0007fffa },
+	{ _MMIO(0x2774), 0x0000fefe },
+	{ _MMIO(0x2778), 0x0007fffa },
+	{ _MMIO(0x277c), 0x0000fefd },
+	{ _MMIO(0x2790), 0x0007fffa },
+	{ _MMIO(0x2794), 0x0000fbef },
+	{ _MMIO(0x2798), 0x0007fffa },
+	{ _MMIO(0x279c), 0x0000fbdf },
+};
+
+static const struct i915_oa_reg flex_eu_config_compute_l3_cache[] = {
+	{ _MMIO(0xe458), 0x00005004 },
+	{ _MMIO(0xe558), 0x00000003 },
+	{ _MMIO(0xe658), 0x00002001 },
+	{ _MMIO(0xe758), 0x00101100 },
+	{ _MMIO(0xe45c), 0x00201200 },
+	{ _MMIO(0xe55c), 0x00301300 },
+	{ _MMIO(0xe65c), 0x00401400 },
+};
+
+static const struct i915_oa_reg mux_config_compute_l3_cache[] = {
+	{ _MMIO(0x9888), 0x166c03b0 },
+	{ _MMIO(0x9888), 0x1593001e },
+	{ _MMIO(0x9888), 0x3f900c00 },
+	{ _MMIO(0x9888), 0x41900000 },
+	{ _MMIO(0x9888), 0x002d1000 },
+	{ _MMIO(0x9888), 0x062d4000 },
+	{ _MMIO(0x9888), 0x082d5000 },
+	{ _MMIO(0x9888), 0x0e2d5000 },
+	{ _MMIO(0x9888), 0x0c2e0400 },
+	{ _MMIO(0x9888), 0x0e2e1500 },
+	{ _MMIO(0x9888), 0x102e0140 },
+	{ _MMIO(0x9888), 0x044c4000 },
+	{ _MMIO(0x9888), 0x0a4c8000 },
+	{ _MMIO(0x9888), 0x0c4cc000 },
+	{ _MMIO(0x9888), 0x144c8000 },
+	{ _MMIO(0x9888), 0x164c2000 },
+	{ _MMIO(0x9888), 0x004e2000 },
+	{ _MMIO(0x9888), 0x064e8000 },
+	{ _MMIO(0x9888), 0x084ea000 },
+	{ _MMIO(0x9888), 0x0e4ea000 },
+	{ _MMIO(0x9888), 0x1a4f4001 },
+	{ _MMIO(0x9888), 0x1c4f5005 },
+	{ _MMIO(0x9888), 0x006c0051 },
+	{ _MMIO(0x9888), 0x066c5000 },
+	{ _MMIO(0x9888), 0x086c5c5d },
+	{ _MMIO(0x9888), 0x0e6c5e5f },
+	{ _MMIO(0x9888), 0x106c0000 },
+	{ _MMIO(0x9888), 0x146c0000 },
+	{ _MMIO(0x9888), 0x1a6c0000 },
+	{ _MMIO(0x9888), 0x1c6c0000 },
+	{ _MMIO(0x9888), 0x180f1000 },
+	{ _MMIO(0x9888), 0x1a0fa800 },
+	{ _MMIO(0x9888), 0x1c0f0a00 },
+	{ _MMIO(0x9888), 0x182c4000 },
+	{ _MMIO(0x9888), 0x1c2c4015 },
+	{ _MMIO(0x9888), 0x1e2c0001 },
+	{ _MMIO(0x9888), 0x03931980 },
+	{ _MMIO(0x9888), 0x05930032 },
+	{ _MMIO(0x9888), 0x11930000 },
+	{ _MMIO(0x9888), 0x01938000 },
+	{ _MMIO(0x9888), 0x0f938000 },
+	{ _MMIO(0x9888), 0x1993a00a },
+	{ _MMIO(0x9888), 0x07930000 },
+	{ _MMIO(0x9888), 0x09930000 },
+	{ _MMIO(0x9888), 0x1d900177 },
+	{ _MMIO(0x9888), 0x1f900178 },
+	{ _MMIO(0x9888), 0x35900000 },
+	{ _MMIO(0x9888), 0x13904000 },
+	{ _MMIO(0x9888), 0x21904000 },
+	{ _MMIO(0x9888), 0x23904000 },
+	{ _MMIO(0x9888), 0x25904000 },
+	{ _MMIO(0x9888), 0x2f904000 },
+	{ _MMIO(0x9888), 0x31904000 },
+	{ _MMIO(0x9888), 0x19904000 },
+	{ _MMIO(0x9888), 0x1b904000 },
+	{ _MMIO(0x9888), 0x53901000 },
+	{ _MMIO(0x9888), 0x43900000 },
+	{ _MMIO(0x9888), 0x55900111 },
+	{ _MMIO(0x9888), 0x47900001 },
+	{ _MMIO(0x9888), 0x57900000 },
+	{ _MMIO(0x9888), 0x49900000 },
+	{ _MMIO(0x9888), 0x37900000 },
+	{ _MMIO(0x9888), 0x33900000 },
+	{ _MMIO(0x9888), 0x59900000 },
+	{ _MMIO(0x9888), 0x4b900000 },
+	{ _MMIO(0x9888), 0x4d900000 },
+	{ _MMIO(0x9888), 0x45900400 },
+};
+
+static int
+get_compute_l3_cache_mux_config(struct drm_i915_private *dev_priv,
+				const struct i915_oa_reg **regs,
+				int *lens)
+{
+	int n = 0;
+
+	BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
+	BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
+
+	regs[n] = mux_config_compute_l3_cache;
+	lens[n] = ARRAY_SIZE(mux_config_compute_l3_cache);
+	n++;
+
+	return n;
+}
+
+static const struct i915_oa_reg b_counter_config_hdc_and_sf[] = {
+	{ _MMIO(0x2740), 0x00000000 },
+	{ _MMIO(0x2744), 0x00800000 },
+	{ _MMIO(0x2710), 0x00000000 },
+	{ _MMIO(0x2714), 0x10800000 },
+	{ _MMIO(0x2720), 0x00000000 },
+	{ _MMIO(0x2724), 0x00800000 },
+	{ _MMIO(0x2770), 0x00000002 },
+	{ _MMIO(0x2774), 0x0000fdff },
+};
+
+static const struct i915_oa_reg flex_eu_config_hdc_and_sf[] = {
+	{ _MMIO(0xe458), 0x00005004 },
+	{ _MMIO(0xe558), 0x00010003 },
+	{ _MMIO(0xe658), 0x00012011 },
+	{ _MMIO(0xe758), 0x00015014 },
+	{ _MMIO(0xe45c), 0x00051050 },
+	{ _MMIO(0xe55c), 0x00053052 },
+	{ _MMIO(0xe65c), 0x00055054 },
+};
+
+static const struct i915_oa_reg mux_config_hdc_and_sf[] = {
+	{ _MMIO(0x9888), 0x104f0232 },
+	{ _MMIO(0x9888), 0x124f4640 },
+	{ _MMIO(0x9888), 0x11834400 },
+	{ _MMIO(0x9888), 0x022d4000 },
+	{ _MMIO(0x9888), 0x042d5000 },
+	{ _MMIO(0x9888), 0x062d1000 },
+	{ _MMIO(0x9888), 0x0e2e0055 },
+	{ _MMIO(0x9888), 0x064c8000 },
+	{ _MMIO(0x9888), 0x084cc000 },
+	{ _MMIO(0x9888), 0x0a4c4000 },
+	{ _MMIO(0x9888), 0x024e8000 },
+	{ _MMIO(0x9888), 0x044ea000 },
+	{ _MMIO(0x9888), 0x064e2000 },
+	{ _MMIO(0x9888), 0x024f6100 },
+	{ _MMIO(0x9888), 0x044f416b },
+	{ _MMIO(0x9888), 0x064f004b },
+	{ _MMIO(0x9888), 0x1a4f0000 },
+	{ _MMIO(0x9888), 0x1a0f02a8 },
+	{ _MMIO(0x9888), 0x1a2c5500 },
+	{ _MMIO(0x9888), 0x0f808000 },
+	{ _MMIO(0x9888), 0x25810020 },
+	{ _MMIO(0x9888), 0x0f8305c0 },
+	{ _MMIO(0x9888), 0x07938000 },
+	{ _MMIO(0x9888), 0x09938000 },
+	{ _MMIO(0x9888), 0x0b938000 },
+	{ _MMIO(0x9888), 0x0d938000 },
+	{ _MMIO(0x9888), 0x1f951000 },
+	{ _MMIO(0x9888), 0x13920200 },
+	{ _MMIO(0x9888), 0x31908000 },
+	{ _MMIO(0x9888), 0x19904000 },
+	{ _MMIO(0x9888), 0x1b904000 },
+	{ _MMIO(0x9888), 0x1d904000 },
+	{ _MMIO(0x9888), 0x1f904000 },
+	{ _MMIO(0x9888), 0x37900000 },
+	{ _MMIO(0x9888), 0x59900000 },
+	{ _MMIO(0x9888), 0x4d900003 },
+	{ _MMIO(0x9888), 0x53900000 },
+	{ _MMIO(0x9888), 0x45900000 },
+	{ _MMIO(0x9888), 0x55900000 },
+	{ _MMIO(0x9888), 0x47900000 },
+	{ _MMIO(0x9888), 0x33900000 },
+};
+
+static int
+get_hdc_and_sf_mux_config(struct drm_i915_private *dev_priv,
+			  const struct i915_oa_reg **regs,
+			  int *lens)
+{
+	int n = 0;
+
+	BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
+	BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
+
+	regs[n] = mux_config_hdc_and_sf;
+	lens[n] = ARRAY_SIZE(mux_config_hdc_and_sf);
+	n++;
+
+	return n;
+}
+
+static const struct i915_oa_reg b_counter_config_l3_1[] = {
+	{ _MMIO(0x2740), 0x00000000 },
+	{ _MMIO(0x2744), 0x00800000 },
+	{ _MMIO(0x2710), 0x00000000 },
+	{ _MMIO(0x2714), 0xf0800000 },
+	{ _MMIO(0x2720), 0x00000000 },
+	{ _MMIO(0x2724), 0xf0800000 },
+	{ _MMIO(0x2770), 0x00100070 },
+	{ _MMIO(0x2774), 0x0000fff1 },
+	{ _MMIO(0x2778), 0x00014002 },
+	{ _MMIO(0x277c), 0x0000c3ff },
+	{ _MMIO(0x2780), 0x00010002 },
+	{ _MMIO(0x2784), 0x0000c7ff },
+	{ _MMIO(0x2788), 0x00004002 },
+	{ _MMIO(0x278c), 0x0000d3ff },
+	{ _MMIO(0x2790), 0x00100700 },
+	{ _MMIO(0x2794), 0x0000ff1f },
+	{ _MMIO(0x2798), 0x00001402 },
+	{ _MMIO(0x279c), 0x0000fc3f },
+	{ _MMIO(0x27a0), 0x00001002 },
+	{ _MMIO(0x27a4), 0x0000fc7f },
+	{ _MMIO(0x27a8), 0x00000402 },
+	{ _MMIO(0x27ac), 0x0000fd3f },
+};
+
+static const struct i915_oa_reg flex_eu_config_l3_1[] = {
+	{ _MMIO(0xe458), 0x00005004 },
+	{ _MMIO(0xe558), 0x00010003 },
+	{ _MMIO(0xe658), 0x00012011 },
+	{ _MMIO(0xe758), 0x00015014 },
+	{ _MMIO(0xe45c), 0x00051050 },
+	{ _MMIO(0xe55c), 0x00053052 },
+	{ _MMIO(0xe65c), 0x00055054 },
+};
+
+static const struct i915_oa_reg mux_config_l3_1_0_sku_gte_0x03[] = {
+	{ _MMIO(0x9888), 0x12643400 },
+	{ _MMIO(0x9888), 0x12653400 },
+	{ _MMIO(0x9888), 0x106c6800 },
+	{ _MMIO(0x9888), 0x126c001e },
+	{ _MMIO(0x9888), 0x166c0010 },
+	{ _MMIO(0x9888), 0x0c2d5000 },
+	{ _MMIO(0x9888), 0x0e2d5000 },
+	{ _MMIO(0x9888), 0x002d4000 },
+	{ _MMIO(0x9888), 0x022d5000 },
+	{ _MMIO(0x9888), 0x042d5000 },
+	{ _MMIO(0x9888), 0x062d1000 },
+	{ _MMIO(0x9888), 0x102e0154 },
+	{ _MMIO(0x9888), 0x0c2e5000 },
+	{ _MMIO(0x9888), 0x0e2e0055 },
+	{ _MMIO(0x9888), 0x104c8000 },
+	{ _MMIO(0x9888), 0x124c8000 },
+	{ _MMIO(0x9888), 0x144c8000 },
+	{ _MMIO(0x9888), 0x164c2000 },
+	{ _MMIO(0x9888), 0x044c8000 },
+	{ _MMIO(0x9888), 0x064cc000 },
+	{ _MMIO(0x9888), 0x084cc000 },
+	{ _MMIO(0x9888), 0x0a4c4000 },
+	{ _MMIO(0x9888), 0x0c4ea000 },
+	{ _MMIO(0x9888), 0x0e4ea000 },
+	{ _MMIO(0x9888), 0x004e8000 },
+	{ _MMIO(0x9888), 0x024ea000 },
+	{ _MMIO(0x9888), 0x044ea000 },
+	{ _MMIO(0x9888), 0x064e2000 },
+	{ _MMIO(0x9888), 0x1c4f5500 },
+	{ _MMIO(0x9888), 0x1a4f1554 },
+	{ _MMIO(0x9888), 0x0a640024 },
+	{ _MMIO(0x9888), 0x10640000 },
+	{ _MMIO(0x9888), 0x04640000 },
+	{ _MMIO(0x9888), 0x0c650024 },
+	{ _MMIO(0x9888), 0x10650000 },
+	{ _MMIO(0x9888), 0x06650000 },
+	{ _MMIO(0x9888), 0x0c6c5327 },
+	{ _MMIO(0x9888), 0x0e6c5425 },
+	{ _MMIO(0x9888), 0x006c2a00 },
+	{ _MMIO(0x9888), 0x026c285b },
+	{ _MMIO(0x9888), 0x046c005c },
+	{ _MMIO(0x9888), 0x1c6c0000 },
+	{ _MMIO(0x9888), 0x1a6c0900 },
+	{ _MMIO(0x9888), 0x1c0f0aa0 },
+	{ _MMIO(0x9888), 0x180f4000 },
+	{ _MMIO(0x9888), 0x1a0f02aa },
+	{ _MMIO(0x9888), 0x1c2c5400 },
+	{ _MMIO(0x9888), 0x1e2c0001 },
+	{ _MMIO(0x9888), 0x1a2c5550 },
+	{ _MMIO(0x9888), 0x1993aa00 },
+	{ _MMIO(0x9888), 0x03938000 },
+	{ _MMIO(0x9888), 0x05938000 },
+	{ _MMIO(0x9888), 0x07938000 },
+	{ _MMIO(0x9888), 0x09938000 },
+	{ _MMIO(0x9888), 0x0b938000 },
+	{ _MMIO(0x9888), 0x0d938000 },
+	{ _MMIO(0x9888), 0x2b904000 },
+	{ _MMIO(0x9888), 0x2d904000 },
+	{ _MMIO(0x9888), 0x2f904000 },
+	{ _MMIO(0x9888), 0x31904000 },
+	{ _MMIO(0x9888), 0x15904000 },
+	{ _MMIO(0x9888), 0x17904000 },
+	{ _MMIO(0x9888), 0x19904000 },
+	{ _MMIO(0x9888), 0x1b904000 },
+	{ _MMIO(0x9888), 0x1d904000 },
+	{ _MMIO(0x9888), 0x1f904000 },
+	{ _MMIO(0x9888), 0x59900000 },
+	{ _MMIO(0x9888), 0x4b900421 },
+	{ _MMIO(0x9888), 0x37900000 },
+	{ _MMIO(0x9888), 0x33900000 },
+	{ _MMIO(0x9888), 0x4d900001 },
+	{ _MMIO(0x9888), 0x53900000 },
+	{ _MMIO(0x9888), 0x43900420 },
+	{ _MMIO(0x9888), 0x45900021 },
+	{ _MMIO(0x9888), 0x55900000 },
+	{ _MMIO(0x9888), 0x47900000 },
+};
+
+static const struct i915_oa_reg mux_config_l3_1_0_sku_lt_0x03[] = {
+	{ _MMIO(0x9888), 0x14640340 },
+	{ _MMIO(0x9888), 0x14650340 },
+	{ _MMIO(0x9888), 0x106c6800 },
+	{ _MMIO(0x9888), 0x126c001e },
+	{ _MMIO(0x9888), 0x166c0010 },
+	{ _MMIO(0x9888), 0x0c2d5000 },
+	{ _MMIO(0x9888), 0x0e2d5000 },
+	{ _MMIO(0x9888), 0x002d4000 },
+	{ _MMIO(0x9888), 0x022d5000 },
+	{ _MMIO(0x9888), 0x042d5000 },
+	{ _MMIO(0x9888), 0x062d1000 },
+	{ _MMIO(0x9888), 0x102e0154 },
+	{ _MMIO(0x9888), 0x0c2e5000 },
+	{ _MMIO(0x9888), 0x0e2e0055 },
+	{ _MMIO(0x9888), 0x104c8000 },
+	{ _MMIO(0x9888), 0x124c8000 },
+	{ _MMIO(0x9888), 0x144c8000 },
+	{ _MMIO(0x9888), 0x164c2000 },
+	{ _MMIO(0x9888), 0x044c8000 },
+	{ _MMIO(0x9888), 0x064cc000 },
+	{ _MMIO(0x9888), 0x084cc000 },
+	{ _MMIO(0x9888), 0x0a4c4000 },
+	{ _MMIO(0x9888), 0x0c4ea000 },
+	{ _MMIO(0x9888), 0x0e4ea000 },
+	{ _MMIO(0x9888), 0x004e8000 },
+	{ _MMIO(0x9888), 0x024ea000 },
+	{ _MMIO(0x9888), 0x044ea000 },
+	{ _MMIO(0x9888), 0x064e2000 },
+	{ _MMIO(0x9888), 0x1c4f5500 },
+	{ _MMIO(0x9888), 0x1a4f1554 },
+	{ _MMIO(0x9888), 0x04642400 },
+	{ _MMIO(0x9888), 0x22640000 },
+	{ _MMIO(0x9888), 0x1a640000 },
+	{ _MMIO(0x9888), 0x06650024 },
+	{ _MMIO(0x9888), 0x22650000 },
+	{ _MMIO(0x9888), 0x1c650000 },
+	{ _MMIO(0x9888), 0x0c6c5327 },
+	{ _MMIO(0x9888), 0x0e6c5425 },
+	{ _MMIO(0x9888), 0x006c2a00 },
+	{ _MMIO(0x9888), 0x026c285b },
+	{ _MMIO(0x9888), 0x046c005c },
+	{ _MMIO(0x9888), 0x1c6c0000 },
+	{ _MMIO(0x9888), 0x1a6c0900 },
+	{ _MMIO(0x9888), 0x1c0f0aa0 },
+	{ _MMIO(0x9888), 0x180f4000 },
+	{ _MMIO(0x9888), 0x1a0f02aa },
+	{ _MMIO(0x9888), 0x1c2c5400 },
+	{ _MMIO(0x9888), 0x1e2c0001 },
+	{ _MMIO(0x9888), 0x1a2c5550 },
+	{ _MMIO(0x9888), 0x1993aa00 },
+	{ _MMIO(0x9888), 0x03938000 },
+	{ _MMIO(0x9888), 0x05938000 },
+	{ _MMIO(0x9888), 0x07938000 },
+	{ _MMIO(0x9888), 0x09938000 },
+	{ _MMIO(0x9888), 0x0b938000 },
+	{ _MMIO(0x9888), 0x0d938000 },
+	{ _MMIO(0x9888), 0x2b904000 },
+	{ _MMIO(0x9888), 0x2d904000 },
+	{ _MMIO(0x9888), 0x2f904000 },
+	{ _MMIO(0x9888), 0x31904000 },
+	{ _MMIO(0x9888), 0x15904000 },
+	{ _MMIO(0x9888), 0x17904000 },
+	{ _MMIO(0x9888), 0x19904000 },
+	{ _MMIO(0x9888), 0x1b904000 },
+	{ _MMIO(0x9888), 0x1d904000 },
+	{ _MMIO(0x9888), 0x1f904000 },
+	{ _MMIO(0x9888), 0x59900000 },
+	{ _MMIO(0x9888), 0x4b900421 },
+	{ _MMIO(0x9888), 0x37900000 },
+	{ _MMIO(0x9888), 0x33900000 },
+	{ _MMIO(0x9888), 0x4d900001 },
+	{ _MMIO(0x9888), 0x53900000 },
+	{ _MMIO(0x9888), 0x43900420 },
+	{ _MMIO(0x9888), 0x45900021 },
+	{ _MMIO(0x9888), 0x55900000 },
+	{ _MMIO(0x9888), 0x47900000 },
+};
+
+static int
+get_l3_1_mux_config(struct drm_i915_private *dev_priv,
+		    const struct i915_oa_reg **regs,
+		    int *lens)
+{
+	int n = 0;
+
+	BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 2);
+	BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 2);
+
+	if (dev_priv->drm.pdev->revision >= 0x03) {
+		regs[n] = mux_config_l3_1_0_sku_gte_0x03;
+		lens[n] = ARRAY_SIZE(mux_config_l3_1_0_sku_gte_0x03);
+		n++;
+	}
+	if (dev_priv->drm.pdev->revision < 0x03) {
+		regs[n] = mux_config_l3_1_0_sku_lt_0x03;
+		lens[n] = ARRAY_SIZE(mux_config_l3_1_0_sku_lt_0x03);
+		n++;
+	}
+
+	return n;
+}
+
+static const struct i915_oa_reg b_counter_config_rasterizer_and_pixel_backend[] = {
+	{ _MMIO(0x2740), 0x00000000 },
+	{ _MMIO(0x2744), 0x00800000 },
+	{ _MMIO(0x2710), 0x00000000 },
+	{ _MMIO(0x2714), 0x30800000 },
+	{ _MMIO(0x2720), 0x00000000 },
+	{ _MMIO(0x2724), 0x00800000 },
+	{ _MMIO(0x2770), 0x00000002 },
+	{ _MMIO(0x2774), 0x0000efff },
+	{ _MMIO(0x2778), 0x00006000 },
+	{ _MMIO(0x277c), 0x0000f3ff },
+};
+
+static const struct i915_oa_reg flex_eu_config_rasterizer_and_pixel_backend[] = {
+	{ _MMIO(0xe458), 0x00005004 },
+	{ _MMIO(0xe558), 0x00010003 },
+	{ _MMIO(0xe658), 0x00012011 },
+	{ _MMIO(0xe758), 0x00015014 },
+	{ _MMIO(0xe45c), 0x00051050 },
+	{ _MMIO(0xe55c), 0x00053052 },
+	{ _MMIO(0xe65c), 0x00055054 },
+};
+
+static const struct i915_oa_reg mux_config_rasterizer_and_pixel_backend[] = {
+	{ _MMIO(0x9888), 0x102d7800 },
+	{ _MMIO(0x9888), 0x122d79e0 },
+	{ _MMIO(0x9888), 0x0c2f0004 },
+	{ _MMIO(0x9888), 0x100e3800 },
+	{ _MMIO(0x9888), 0x180f0005 },
+	{ _MMIO(0x9888), 0x002d0940 },
+	{ _MMIO(0x9888), 0x022d802f },
+	{ _MMIO(0x9888), 0x042d4013 },
+	{ _MMIO(0x9888), 0x062d1000 },
+	{ _MMIO(0x9888), 0x0e2e0050 },
+	{ _MMIO(0x9888), 0x022f0010 },
+	{ _MMIO(0x9888), 0x002f0000 },
+	{ _MMIO(0x9888), 0x084c8000 },
+	{ _MMIO(0x9888), 0x0a4c4000 },
+	{ _MMIO(0x9888), 0x044e8000 },
+	{ _MMIO(0x9888), 0x064e2000 },
+	{ _MMIO(0x9888), 0x040e0480 },
+	{ _MMIO(0x9888), 0x000e0000 },
+	{ _MMIO(0x9888), 0x060f0027 },
+	{ _MMIO(0x9888), 0x100f0000 },
+	{ _MMIO(0x9888), 0x1a0f0040 },
+	{ _MMIO(0x9888), 0x03938000 },
+	{ _MMIO(0x9888), 0x05938000 },
+	{ _MMIO(0x9888), 0x07938000 },
+	{ _MMIO(0x9888), 0x09938000 },
+	{ _MMIO(0x9888), 0x0b938000 },
+	{ _MMIO(0x9888), 0x0d938000 },
+	{ _MMIO(0x9888), 0x15904000 },
+	{ _MMIO(0x9888), 0x17904000 },
+	{ _MMIO(0x9888), 0x19904000 },
+	{ _MMIO(0x9888), 0x1b904000 },
+	{ _MMIO(0x9888), 0x1d904000 },
+	{ _MMIO(0x9888), 0x1f904000 },
+	{ _MMIO(0x9888), 0x37900000 },
+	{ _MMIO(0x9888), 0x53900000 },
+	{ _MMIO(0x9888), 0x439014a0 },
+	{ _MMIO(0x9888), 0x459000a4 },
+	{ _MMIO(0x9888), 0x55900000 },
+	{ _MMIO(0x9888), 0x47900001 },
+	{ _MMIO(0x9888), 0x33900000 },
+};
+
+static int
+get_rasterizer_and_pixel_backend_mux_config(struct drm_i915_private *dev_priv,
+					    const struct i915_oa_reg **regs,
+					    int *lens)
+{
+	int n = 0;
+
+	BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
+	BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
+
+	regs[n] = mux_config_rasterizer_and_pixel_backend;
+	lens[n] = ARRAY_SIZE(mux_config_rasterizer_and_pixel_backend);
+	n++;
+
+	return n;
+}
+
+static const struct i915_oa_reg b_counter_config_sampler[] = {
+	{ _MMIO(0x2740), 0x00000000 },
+	{ _MMIO(0x2744), 0x00800000 },
+	{ _MMIO(0x2710), 0x00000000 },
+	{ _MMIO(0x2714), 0x70800000 },
+	{ _MMIO(0x2720), 0x00000000 },
+	{ _MMIO(0x2724), 0x00800000 },
+	{ _MMIO(0x2770), 0x0000c000 },
+	{ _MMIO(0x2774), 0x0000e7ff },
+	{ _MMIO(0x2778), 0x00003000 },
+	{ _MMIO(0x277c), 0x0000f9ff },
+	{ _MMIO(0x2780), 0x00000c00 },
+	{ _MMIO(0x2784), 0x0000fe7f },
+};
+
+static const struct i915_oa_reg flex_eu_config_sampler[] = {
+	{ _MMIO(0xe458), 0x00005004 },
+	{ _MMIO(0xe558), 0x00010003 },
+	{ _MMIO(0xe658), 0x00012011 },
+	{ _MMIO(0xe758), 0x00015014 },
+	{ _MMIO(0xe45c), 0x00051050 },
+	{ _MMIO(0xe55c), 0x00053052 },
+	{ _MMIO(0xe65c), 0x00055054 },
+};
+
+static const struct i915_oa_reg mux_config_sampler[] = {
+	{ _MMIO(0x9888), 0x121300a0 },
+	{ _MMIO(0x9888), 0x141600ab },
+	{ _MMIO(0x9888), 0x123300a0 },
+	{ _MMIO(0x9888), 0x143600ab },
+	{ _MMIO(0x9888), 0x125300a0 },
+	{ _MMIO(0x9888), 0x145600ab },
+	{ _MMIO(0x9888), 0x0c2d4000 },
+	{ _MMIO(0x9888), 0x0e2d5000 },
+	{ _MMIO(0x9888), 0x002d4000 },
+	{ _MMIO(0x9888), 0x022d5000 },
+	{ _MMIO(0x9888), 0x042d5000 },
+	{ _MMIO(0x9888), 0x062d1000 },
+	{ _MMIO(0x9888), 0x102e01a0 },
+	{ _MMIO(0x9888), 0x0c2e5000 },
+	{ _MMIO(0x9888), 0x0e2e0065 },
+	{ _MMIO(0x9888), 0x164c2000 },
+	{ _MMIO(0x9888), 0x044c8000 },
+	{ _MMIO(0x9888), 0x064cc000 },
+	{ _MMIO(0x9888), 0x084c4000 },
+	{ _MMIO(0x9888), 0x0a4c4000 },
+	{ _MMIO(0x9888), 0x0e4e8000 },
+	{ _MMIO(0x9888), 0x004e8000 },
+	{ _MMIO(0x9888), 0x024ea000 },
+	{ _MMIO(0x9888), 0x044e2000 },
+	{ _MMIO(0x9888), 0x064e2000 },
+	{ _MMIO(0x9888), 0x1c0f0800 },
+	{ _MMIO(0x9888), 0x180f4000 },
+	{ _MMIO(0x9888), 0x1a0f023f },
+	{ _MMIO(0x9888), 0x1e2c0003 },
+	{ _MMIO(0x9888), 0x1a2cc030 },
+	{ _MMIO(0x9888), 0x04132180 },
+	{ _MMIO(0x9888), 0x02130000 },
+	{ _MMIO(0x9888), 0x0c148000 },
+	{ _MMIO(0x9888), 0x0e142000 },
+	{ _MMIO(0x9888), 0x04148000 },
+	{ _MMIO(0x9888), 0x1e150140 },
+	{ _MMIO(0x9888), 0x1c150040 },
+	{ _MMIO(0x9888), 0x0c163000 },
+	{ _MMIO(0x9888), 0x0e160068 },
+	{ _MMIO(0x9888), 0x10160000 },
+	{ _MMIO(0x9888), 0x18160000 },
+	{ _MMIO(0x9888), 0x0a164000 },
+	{ _MMIO(0x9888), 0x04330043 },
+	{ _MMIO(0x9888), 0x02330000 },
+	{ _MMIO(0x9888), 0x0234a000 },
+	{ _MMIO(0x9888), 0x04342000 },
+	{ _MMIO(0x9888), 0x1c350015 },
+	{ _MMIO(0x9888), 0x02363460 },
+	{ _MMIO(0x9888), 0x10360000 },
+	{ _MMIO(0x9888), 0x04360000 },
+	{ _MMIO(0x9888), 0x06360000 },
+	{ _MMIO(0x9888), 0x08364000 },
+	{ _MMIO(0x9888), 0x06530043 },
+	{ _MMIO(0x9888), 0x02530000 },
+	{ _MMIO(0x9888), 0x0e548000 },
+	{ _MMIO(0x9888), 0x00548000 },
+	{ _MMIO(0x9888), 0x06542000 },
+	{ _MMIO(0x9888), 0x1e550400 },
+	{ _MMIO(0x9888), 0x1a552000 },
+	{ _MMIO(0x9888), 0x1c550100 },
+	{ _MMIO(0x9888), 0x0e563000 },
+	{ _MMIO(0x9888), 0x00563400 },
+	{ _MMIO(0x9888), 0x10560000 },
+	{ _MMIO(0x9888), 0x18560000 },
+	{ _MMIO(0x9888), 0x02560000 },
+	{ _MMIO(0x9888), 0x0c564000 },
+	{ _MMIO(0x9888), 0x1993a800 },
+	{ _MMIO(0x9888), 0x03938000 },
+	{ _MMIO(0x9888), 0x05938000 },
+	{ _MMIO(0x9888), 0x07938000 },
+	{ _MMIO(0x9888), 0x09938000 },
+	{ _MMIO(0x9888), 0x0b938000 },
+	{ _MMIO(0x9888), 0x0d938000 },
+	{ _MMIO(0x9888), 0x2d904000 },
+	{ _MMIO(0x9888), 0x2f904000 },
+	{ _MMIO(0x9888), 0x31904000 },
+	{ _MMIO(0x9888), 0x15904000 },
+	{ _MMIO(0x9888), 0x17904000 },
+	{ _MMIO(0x9888), 0x19904000 },
+	{ _MMIO(0x9888), 0x1b904000 },
+	{ _MMIO(0x9888), 0x1d904000 },
+	{ _MMIO(0x9888), 0x1f904000 },
+	{ _MMIO(0x9888), 0x59900000 },
+	{ _MMIO(0x9888), 0x4b9014a0 },
+	{ _MMIO(0x9888), 0x37900000 },
+	{ _MMIO(0x9888), 0x33900000 },
+	{ _MMIO(0x9888), 0x4d900001 },
+	{ _MMIO(0x9888), 0x53900000 },
+	{ _MMIO(0x9888), 0x43900820 },
+	{ _MMIO(0x9888), 0x45901022 },
+	{ _MMIO(0x9888), 0x55900000 },
+	{ _MMIO(0x9888), 0x47900000 },
+};
+
+static int
+get_sampler_mux_config(struct drm_i915_private *dev_priv,
+		       const struct i915_oa_reg **regs,
+		       int *lens)
+{
+	int n = 0;
+
+	BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
+	BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
+
+	regs[n] = mux_config_sampler;
+	lens[n] = ARRAY_SIZE(mux_config_sampler);
+	n++;
+
+	return n;
+}
+
+static const struct i915_oa_reg b_counter_config_tdl_1[] = {
+	{ _MMIO(0x2740), 0x00000000 },
+	{ _MMIO(0x2744), 0x00800000 },
+	{ _MMIO(0x2710), 0x00000000 },
+	{ _MMIO(0x2714), 0xf0800000 },
+	{ _MMIO(0x2720), 0x00000000 },
+	{ _MMIO(0x2724), 0x30800000 },
+	{ _MMIO(0x2770), 0x00000002 },
+	{ _MMIO(0x2774), 0x00007fff },
+	{ _MMIO(0x2778), 0x00000000 },
+	{ _MMIO(0x277c), 0x00009fff },
+	{ _MMIO(0x2780), 0x00000002 },
+	{ _MMIO(0x2784), 0x0000efff },
+	{ _MMIO(0x2788), 0x00000000 },
+	{ _MMIO(0x278c), 0x0000f3ff },
+	{ _MMIO(0x2790), 0x00000002 },
+	{ _MMIO(0x2794), 0x0000fdff },
+	{ _MMIO(0x2798), 0x00000000 },
+	{ _MMIO(0x279c), 0x0000fe7f },
+};
+
+static const struct i915_oa_reg flex_eu_config_tdl_1[] = {
+	{ _MMIO(0xe458), 0x00005004 },
+	{ _MMIO(0xe558), 0x00010003 },
+	{ _MMIO(0xe658), 0x00012011 },
+	{ _MMIO(0xe758), 0x00015014 },
+	{ _MMIO(0xe45c), 0x00051050 },
+	{ _MMIO(0xe55c), 0x00053052 },
+	{ _MMIO(0xe65c), 0x00055054 },
+};
+
+static const struct i915_oa_reg mux_config_tdl_1[] = {
+	{ _MMIO(0x9888), 0x141a0000 },
+	{ _MMIO(0x9888), 0x143a0000 },
+	{ _MMIO(0x9888), 0x145a0000 },
+	{ _MMIO(0x9888), 0x0c2d4000 },
+	{ _MMIO(0x9888), 0x0e2d5000 },
+	{ _MMIO(0x9888), 0x002d4000 },
+	{ _MMIO(0x9888), 0x022d5000 },
+	{ _MMIO(0x9888), 0x042d5000 },
+	{ _MMIO(0x9888), 0x062d1000 },
+	{ _MMIO(0x9888), 0x102e0150 },
+	{ _MMIO(0x9888), 0x0c2e5000 },
+	{ _MMIO(0x9888), 0x0e2e006a },
+	{ _MMIO(0x9888), 0x124c8000 },
+	{ _MMIO(0x9888), 0x144c8000 },
+	{ _MMIO(0x9888), 0x164c2000 },
+	{ _MMIO(0x9888), 0x044c8000 },
+	{ _MMIO(0x9888), 0x064c4000 },
+	{ _MMIO(0x9888), 0x0a4c4000 },
+	{ _MMIO(0x9888), 0x0c4e8000 },
+	{ _MMIO(0x9888), 0x0e4ea000 },
+	{ _MMIO(0x9888), 0x004e8000 },
+	{ _MMIO(0x9888), 0x024e2000 },
+	{ _MMIO(0x9888), 0x064e2000 },
+	{ _MMIO(0x9888), 0x1c0f0bc0 },
+	{ _MMIO(0x9888), 0x180f4000 },
+	{ _MMIO(0x9888), 0x1a0f0302 },
+	{ _MMIO(0x9888), 0x1e2c0003 },
+	{ _MMIO(0x9888), 0x1a2c00f0 },
+	{ _MMIO(0x9888), 0x021a3080 },
+	{ _MMIO(0x9888), 0x041a31e5 },
+	{ _MMIO(0x9888), 0x02148000 },
+	{ _MMIO(0x9888), 0x0414a000 },
+	{ _MMIO(0x9888), 0x1c150054 },
+	{ _MMIO(0x9888), 0x06168000 },
+	{ _MMIO(0x9888), 0x08168000 },
+	{ _MMIO(0x9888), 0x0a168000 },
+	{ _MMIO(0x9888), 0x0c3a3280 },
+	{ _MMIO(0x9888), 0x0e3a0063 },
+	{ _MMIO(0x9888), 0x063a0061 },
+	{ _MMIO(0x9888), 0x023a0000 },
+	{ _MMIO(0x9888), 0x0c348000 },
+	{ _MMIO(0x9888), 0x0e342000 },
+	{ _MMIO(0x9888), 0x06342000 },
+	{ _MMIO(0x9888), 0x1e350140 },
+	{ _MMIO(0x9888), 0x1c350100 },
+	{ _MMIO(0x9888), 0x18360028 },
+	{ _MMIO(0x9888), 0x0c368000 },
+	{ _MMIO(0x9888), 0x0e5a3080 },
+	{ _MMIO(0x9888), 0x005a3280 },
+	{ _MMIO(0x9888), 0x025a0063 },
+	{ _MMIO(0x9888), 0x0e548000 },
+	{ _MMIO(0x9888), 0x00548000 },
+	{ _MMIO(0x9888), 0x02542000 },
+	{ _MMIO(0x9888), 0x1e550400 },
+	{ _MMIO(0x9888), 0x1a552000 },
+	{ _MMIO(0x9888), 0x1c550001 },
+	{ _MMIO(0x9888), 0x18560080 },
+	{ _MMIO(0x9888), 0x02568000 },
+	{ _MMIO(0x9888), 0x04568000 },
+	{ _MMIO(0x9888), 0x1993a800 },
+	{ _MMIO(0x9888), 0x03938000 },
+	{ _MMIO(0x9888), 0x05938000 },
+	{ _MMIO(0x9888), 0x07938000 },
+	{ _MMIO(0x9888), 0x09938000 },
+	{ _MMIO(0x9888), 0x0b938000 },
+	{ _MMIO(0x9888), 0x0d938000 },
+	{ _MMIO(0x9888), 0x2d904000 },
+	{ _MMIO(0x9888), 0x2f904000 },
+	{ _MMIO(0x9888), 0x31904000 },
+	{ _MMIO(0x9888), 0x15904000 },
+	{ _MMIO(0x9888), 0x17904000 },
+	{ _MMIO(0x9888), 0x19904000 },
+	{ _MMIO(0x9888), 0x1b904000 },
+	{ _MMIO(0x9888), 0x1d904000 },
+	{ _MMIO(0x9888), 0x1f904000 },
+	{ _MMIO(0x9888), 0x59900000 },
+	{ _MMIO(0x9888), 0x4b900420 },
+	{ _MMIO(0x9888), 0x37900000 },
+	{ _MMIO(0x9888), 0x33900000 },
+	{ _MMIO(0x9888), 0x4d900000 },
+	{ _MMIO(0x9888), 0x53900000 },
+	{ _MMIO(0x9888), 0x43900000 },
+	{ _MMIO(0x9888), 0x45901084 },
+	{ _MMIO(0x9888), 0x55900000 },
+	{ _MMIO(0x9888), 0x47900001 },
+};
+
+static int
+get_tdl_1_mux_config(struct drm_i915_private *dev_priv,
+		     const struct i915_oa_reg **regs,
+		     int *lens)
+{
+	int n = 0;
+
+	BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
+	BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
+
+	regs[n] = mux_config_tdl_1;
+	lens[n] = ARRAY_SIZE(mux_config_tdl_1);
+	n++;
+
+	return n;
+}
+
+static const struct i915_oa_reg b_counter_config_tdl_2[] = {
+	{ _MMIO(0x2740), 0x00000000 },
+	{ _MMIO(0x2744), 0x00800000 },
+	{ _MMIO(0x2710), 0x00000000 },
+	{ _MMIO(0x2714), 0x00800000 },
+	{ _MMIO(0x2720), 0x00000000 },
+	{ _MMIO(0x2724), 0x00800000 },
+};
+
+static const struct i915_oa_reg flex_eu_config_tdl_2[] = {
+	{ _MMIO(0xe458), 0x00005004 },
+	{ _MMIO(0xe558), 0x00010003 },
+	{ _MMIO(0xe658), 0x00012011 },
+	{ _MMIO(0xe758), 0x00015014 },
+	{ _MMIO(0xe45c), 0x00051050 },
+	{ _MMIO(0xe55c), 0x00053052 },
+	{ _MMIO(0xe65c), 0x00055054 },
+};
+
+static const struct i915_oa_reg mux_config_tdl_2[] = {
+	{ _MMIO(0x9888), 0x141a026b },
+	{ _MMIO(0x9888), 0x143a0173 },
+	{ _MMIO(0x9888), 0x145a026b },
+	{ _MMIO(0x9888), 0x002d4000 },
+	{ _MMIO(0x9888), 0x022d5000 },
+	{ _MMIO(0x9888), 0x042d5000 },
+	{ _MMIO(0x9888), 0x062d1000 },
+	{ _MMIO(0x9888), 0x0c2e5000 },
+	{ _MMIO(0x9888), 0x0e2e0069 },
+	{ _MMIO(0x9888), 0x044c8000 },
+	{ _MMIO(0x9888), 0x064cc000 },
+	{ _MMIO(0x9888), 0x0a4c4000 },
+	{ _MMIO(0x9888), 0x004e8000 },
+	{ _MMIO(0x9888), 0x024ea000 },
+	{ _MMIO(0x9888), 0x064e2000 },
+	{ _MMIO(0x9888), 0x180f6000 },
+	{ _MMIO(0x9888), 0x1a0f030a },
+	{ _MMIO(0x9888), 0x1a2c03c0 },
+	{ _MMIO(0x9888), 0x041a37e7 },
+	{ _MMIO(0x9888), 0x021a0000 },
+	{ _MMIO(0x9888), 0x0414a000 },
+	{ _MMIO(0x9888), 0x1c150050 },
+	{ _MMIO(0x9888), 0x08168000 },
+	{ _MMIO(0x9888), 0x0a168000 },
+	{ _MMIO(0x9888), 0x003a3380 },
+	{ _MMIO(0x9888), 0x063a006f },
+	{ _MMIO(0x9888), 0x023a0000 },
+	{ _MMIO(0x9888), 0x00348000 },
+	{ _MMIO(0x9888), 0x06342000 },
+	{ _MMIO(0x9888), 0x1a352000 },
+	{ _MMIO(0x9888), 0x1c350100 },
+	{ _MMIO(0x9888), 0x02368000 },
+	{ _MMIO(0x9888), 0x0c368000 },
+	{ _MMIO(0x9888), 0x025a37e7 },
+	{ _MMIO(0x9888), 0x0254a000 },
+	{ _MMIO(0x9888), 0x1c550005 },
+	{ _MMIO(0x9888), 0x04568000 },
+	{ _MMIO(0x9888), 0x06568000 },
+	{ _MMIO(0x9888), 0x03938000 },
+	{ _MMIO(0x9888), 0x05938000 },
+	{ _MMIO(0x9888), 0x07938000 },
+	{ _MMIO(0x9888), 0x09938000 },
+	{ _MMIO(0x9888), 0x0b938000 },
+	{ _MMIO(0x9888), 0x0d938000 },
+	{ _MMIO(0x9888), 0x15904000 },
+	{ _MMIO(0x9888), 0x17904000 },
+	{ _MMIO(0x9888), 0x19904000 },
+	{ _MMIO(0x9888), 0x1b904000 },
+	{ _MMIO(0x9888), 0x1d904000 },
+	{ _MMIO(0x9888), 0x1f904000 },
+	{ _MMIO(0x9888), 0x37900000 },
+	{ _MMIO(0x9888), 0x53900000 },
+	{ _MMIO(0x9888), 0x43900020 },
+	{ _MMIO(0x9888), 0x45901080 },
+	{ _MMIO(0x9888), 0x55900000 },
+	{ _MMIO(0x9888), 0x47900001 },
+	{ _MMIO(0x9888), 0x33900000 },
+};
+
+static int
+get_tdl_2_mux_config(struct drm_i915_private *dev_priv,
+		     const struct i915_oa_reg **regs,
+		     int *lens)
+{
+	int n = 0;
+
+	BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
+	BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
+
+	regs[n] = mux_config_tdl_2;
+	lens[n] = ARRAY_SIZE(mux_config_tdl_2);
+	n++;
+
+	return n;
+}
+
+static const struct i915_oa_reg b_counter_config_compute_extra[] = {
+	{ _MMIO(0x2740), 0x00000000 },
+	{ _MMIO(0x2744), 0x00800000 },
+	{ _MMIO(0x2710), 0x00000000 },
+	{ _MMIO(0x2714), 0x00800000 },
+	{ _MMIO(0x2720), 0x00000000 },
+	{ _MMIO(0x2724), 0x00800000 },
+};
+
+static const struct i915_oa_reg flex_eu_config_compute_extra[] = {
+	{ _MMIO(0xe458), 0x00001000 },
+	{ _MMIO(0xe558), 0x00003002 },
+	{ _MMIO(0xe658), 0x00005004 },
+	{ _MMIO(0xe758), 0x00011010 },
+	{ _MMIO(0xe45c), 0x00050012 },
+	{ _MMIO(0xe55c), 0x00052051 },
+	{ _MMIO(0xe65c), 0x00000008 },
+};
+
+static const struct i915_oa_reg mux_config_compute_extra[] = {
+	{ _MMIO(0x9888), 0x141a001f },
+	{ _MMIO(0x9888), 0x143a001f },
+	{ _MMIO(0x9888), 0x145a001f },
+	{ _MMIO(0x9888), 0x042d5000 },
+	{ _MMIO(0x9888), 0x062d1000 },
+	{ _MMIO(0x9888), 0x0e2e0094 },
+	{ _MMIO(0x9888), 0x084cc000 },
+	{ _MMIO(0x9888), 0x044ea000 },
+	{ _MMIO(0x9888), 0x1a0f00e0 },
+	{ _MMIO(0x9888), 0x1a2c0c00 },
+	{ _MMIO(0x9888), 0x061a0063 },
+	{ _MMIO(0x9888), 0x021a0000 },
+	{ _MMIO(0x9888), 0x06142000 },
+	{ _MMIO(0x9888), 0x1c150100 },
+	{ _MMIO(0x9888), 0x0c168000 },
+	{ _MMIO(0x9888), 0x043a3180 },
+	{ _MMIO(0x9888), 0x023a0000 },
+	{ _MMIO(0x9888), 0x04348000 },
+	{ _MMIO(0x9888), 0x1c350040 },
+	{ _MMIO(0x9888), 0x0a368000 },
+	{ _MMIO(0x9888), 0x045a0063 },
+	{ _MMIO(0x9888), 0x025a0000 },
+	{ _MMIO(0x9888), 0x04542000 },
+	{ _MMIO(0x9888), 0x1c550010 },
+	{ _MMIO(0x9888), 0x08568000 },
+	{ _MMIO(0x9888), 0x09938000 },
+	{ _MMIO(0x9888), 0x0b938000 },
+	{ _MMIO(0x9888), 0x0d938000 },
+	{ _MMIO(0x9888), 0x1b904000 },
+	{ _MMIO(0x9888), 0x1d904000 },
+	{ _MMIO(0x9888), 0x1f904000 },
+	{ _MMIO(0x9888), 0x37900000 },
+	{ _MMIO(0x9888), 0x55900000 },
+	{ _MMIO(0x9888), 0x45900400 },
+	{ _MMIO(0x9888), 0x47900004 },
+	{ _MMIO(0x9888), 0x33900000 },
+};
+
+static int
+get_compute_extra_mux_config(struct drm_i915_private *dev_priv,
+			     const struct i915_oa_reg **regs,
+			     int *lens)
+{
+	int n = 0;
+
+	BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
+	BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
+
+	regs[n] = mux_config_compute_extra;
+	lens[n] = ARRAY_SIZE(mux_config_compute_extra);
+	n++;
+
+	return n;
+}
+
+static const struct i915_oa_reg b_counter_config_test_oa[] = {
+	{ _MMIO(0x2740), 0x00000000 },
+	{ _MMIO(0x2744), 0x00800000 },
+	{ _MMIO(0x2714), 0xf0800000 },
+	{ _MMIO(0x2710), 0x00000000 },
+	{ _MMIO(0x2724), 0xf0800000 },
+	{ _MMIO(0x2720), 0x00000000 },
+	{ _MMIO(0x2770), 0x00000004 },
+	{ _MMIO(0x2774), 0x00000000 },
+	{ _MMIO(0x2778), 0x00000003 },
+	{ _MMIO(0x277c), 0x00000000 },
+	{ _MMIO(0x2780), 0x00000007 },
+	{ _MMIO(0x2784), 0x00000000 },
+	{ _MMIO(0x2788), 0x00100002 },
+	{ _MMIO(0x278c), 0x0000fff7 },
+	{ _MMIO(0x2790), 0x00100002 },
+	{ _MMIO(0x2794), 0x0000ffcf },
+	{ _MMIO(0x2798), 0x00100082 },
+	{ _MMIO(0x279c), 0x0000ffef },
+	{ _MMIO(0x27a0), 0x001000c2 },
+	{ _MMIO(0x27a4), 0x0000ffe7 },
+	{ _MMIO(0x27a8), 0x00100001 },
+	{ _MMIO(0x27ac), 0x0000ffe7 },
+};
+
+static const struct i915_oa_reg flex_eu_config_test_oa[] = {
+};
+
+static const struct i915_oa_reg mux_config_test_oa[] = {
+	{ _MMIO(0x9888), 0x19800000 },
+	{ _MMIO(0x9888), 0x07800063 },
+	{ _MMIO(0x9888), 0x11800000 },
+	{ _MMIO(0x9888), 0x23810008 },
+	{ _MMIO(0x9888), 0x1d950400 },
+	{ _MMIO(0x9888), 0x0f922000 },
+	{ _MMIO(0x9888), 0x1f908000 },
+	{ _MMIO(0x9888), 0x37900000 },
+	{ _MMIO(0x9888), 0x55900000 },
+	{ _MMIO(0x9888), 0x47900000 },
+	{ _MMIO(0x9888), 0x33900000 },
+};
+
+static int
+get_test_oa_mux_config(struct drm_i915_private *dev_priv,
+		       const struct i915_oa_reg **regs,
+		       int *lens)
+{
+	int n = 0;
+
+	BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
+	BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
+
+	regs[n] = mux_config_test_oa;
+	lens[n] = ARRAY_SIZE(mux_config_test_oa);
+	n++;
+
+	return n;
+}
+
+int i915_oa_select_metric_set_bxt(struct drm_i915_private *dev_priv)
+{
+	dev_priv->perf.oa.n_mux_configs = 0;
+	dev_priv->perf.oa.b_counter_regs = NULL;
+	dev_priv->perf.oa.b_counter_regs_len = 0;
+	dev_priv->perf.oa.flex_regs = NULL;
+	dev_priv->perf.oa.flex_regs_len = 0;
+
+	switch (dev_priv->perf.oa.metrics_set) {
+	case METRIC_SET_ID_RENDER_BASIC:
+		dev_priv->perf.oa.n_mux_configs =
+			get_render_basic_mux_config(dev_priv,
+						    dev_priv->perf.oa.mux_regs,
+						    dev_priv->perf.oa.mux_regs_lens);
+		if (dev_priv->perf.oa.n_mux_configs == 0) {
+			DRM_DEBUG_DRIVER("No suitable MUX config for \"RENDER_BASIC\" metric set\n");
+
+			/* EINVAL because *_register_sysfs already checked this
+			 * and so it wouldn't have been advertised to userspace and
+			 * so shouldn't have been requested
+			 */
+			return -EINVAL;
+		}
+
+		dev_priv->perf.oa.b_counter_regs =
+			b_counter_config_render_basic;
+		dev_priv->perf.oa.b_counter_regs_len =
+			ARRAY_SIZE(b_counter_config_render_basic);
+
+		dev_priv->perf.oa.flex_regs =
+			flex_eu_config_render_basic;
+		dev_priv->perf.oa.flex_regs_len =
+			ARRAY_SIZE(flex_eu_config_render_basic);
+
+		return 0;
+	case METRIC_SET_ID_COMPUTE_BASIC:
+		dev_priv->perf.oa.n_mux_configs =
+			get_compute_basic_mux_config(dev_priv,
+						     dev_priv->perf.oa.mux_regs,
+						     dev_priv->perf.oa.mux_regs_lens);
+		if (dev_priv->perf.oa.n_mux_configs == 0) {
+			DRM_DEBUG_DRIVER("No suitable MUX config for \"COMPUTE_BASIC\" metric set\n");
+
+			/* EINVAL because *_register_sysfs already checked this
+			 * and so it wouldn't have been advertised to userspace and
+			 * so shouldn't have been requested
+			 */
+			return -EINVAL;
+		}
+
+		dev_priv->perf.oa.b_counter_regs =
+			b_counter_config_compute_basic;
+		dev_priv->perf.oa.b_counter_regs_len =
+			ARRAY_SIZE(b_counter_config_compute_basic);
+
+		dev_priv->perf.oa.flex_regs =
+			flex_eu_config_compute_basic;
+		dev_priv->perf.oa.flex_regs_len =
+			ARRAY_SIZE(flex_eu_config_compute_basic);
+
+		return 0;
+	case METRIC_SET_ID_RENDER_PIPE_PROFILE:
+		dev_priv->perf.oa.n_mux_configs =
+			get_render_pipe_profile_mux_config(dev_priv,
+							   dev_priv->perf.oa.mux_regs,
+							   dev_priv->perf.oa.mux_regs_lens);
+		if (dev_priv->perf.oa.n_mux_configs == 0) {
+			DRM_DEBUG_DRIVER("No suitable MUX config for \"RENDER_PIPE_PROFILE\" metric set\n");
+
+			/* EINVAL because *_register_sysfs already checked this
+			 * and so it wouldn't have been advertised to userspace and
+			 * so shouldn't have been requested
+			 */
+			return -EINVAL;
+		}
+
+		dev_priv->perf.oa.b_counter_regs =
+			b_counter_config_render_pipe_profile;
+		dev_priv->perf.oa.b_counter_regs_len =
+			ARRAY_SIZE(b_counter_config_render_pipe_profile);
+
+		dev_priv->perf.oa.flex_regs =
+			flex_eu_config_render_pipe_profile;
+		dev_priv->perf.oa.flex_regs_len =
+			ARRAY_SIZE(flex_eu_config_render_pipe_profile);
+
+		return 0;
+	case METRIC_SET_ID_MEMORY_READS:
+		dev_priv->perf.oa.n_mux_configs =
+			get_memory_reads_mux_config(dev_priv,
+						    dev_priv->perf.oa.mux_regs,
+						    dev_priv->perf.oa.mux_regs_lens);
+		if (dev_priv->perf.oa.n_mux_configs == 0) {
+			DRM_DEBUG_DRIVER("No suitable MUX config for \"MEMORY_READS\" metric set\n");
+
+			/* EINVAL because *_register_sysfs already checked this
+			 * and so it wouldn't have been advertised to userspace and
+			 * so shouldn't have been requested
+			 */
+			return -EINVAL;
+		}
+
+		dev_priv->perf.oa.b_counter_regs =
+			b_counter_config_memory_reads;
+		dev_priv->perf.oa.b_counter_regs_len =
+			ARRAY_SIZE(b_counter_config_memory_reads);
+
+		dev_priv->perf.oa.flex_regs =
+			flex_eu_config_memory_reads;
+		dev_priv->perf.oa.flex_regs_len =
+			ARRAY_SIZE(flex_eu_config_memory_reads);
+
+		return 0;
+	case METRIC_SET_ID_MEMORY_WRITES:
+		dev_priv->perf.oa.n_mux_configs =
+			get_memory_writes_mux_config(dev_priv,
+						     dev_priv->perf.oa.mux_regs,
+						     dev_priv->perf.oa.mux_regs_lens);
+		if (dev_priv->perf.oa.n_mux_configs == 0) {
+			DRM_DEBUG_DRIVER("No suitable MUX config for \"MEMORY_WRITES\" metric set\n");
+
+			/* EINVAL because *_register_sysfs already checked this
+			 * and so it wouldn't have been advertised to userspace and
+			 * so shouldn't have been requested
+			 */
+			return -EINVAL;
+		}
+
+		dev_priv->perf.oa.b_counter_regs =
+			b_counter_config_memory_writes;
+		dev_priv->perf.oa.b_counter_regs_len =
+			ARRAY_SIZE(b_counter_config_memory_writes);
+
+		dev_priv->perf.oa.flex_regs =
+			flex_eu_config_memory_writes;
+		dev_priv->perf.oa.flex_regs_len =
+			ARRAY_SIZE(flex_eu_config_memory_writes);
+
+		return 0;
+	case METRIC_SET_ID_COMPUTE_EXTENDED:
+		dev_priv->perf.oa.n_mux_configs =
+			get_compute_extended_mux_config(dev_priv,
+							dev_priv->perf.oa.mux_regs,
+							dev_priv->perf.oa.mux_regs_lens);
+		if (dev_priv->perf.oa.n_mux_configs == 0) {
+			DRM_DEBUG_DRIVER("No suitable MUX config for \"COMPUTE_EXTENDED\" metric set\n");
+
+			/* EINVAL because *_register_sysfs already checked this
+			 * and so it wouldn't have been advertised to userspace and
+			 * so shouldn't have been requested
+			 */
+			return -EINVAL;
+		}
+
+		dev_priv->perf.oa.b_counter_regs =
+			b_counter_config_compute_extended;
+		dev_priv->perf.oa.b_counter_regs_len =
+			ARRAY_SIZE(b_counter_config_compute_extended);
+
+		dev_priv->perf.oa.flex_regs =
+			flex_eu_config_compute_extended;
+		dev_priv->perf.oa.flex_regs_len =
+			ARRAY_SIZE(flex_eu_config_compute_extended);
+
+		return 0;
+	case METRIC_SET_ID_COMPUTE_L3_CACHE:
+		dev_priv->perf.oa.n_mux_configs =
+			get_compute_l3_cache_mux_config(dev_priv,
+							dev_priv->perf.oa.mux_regs,
+							dev_priv->perf.oa.mux_regs_lens);
+		if (dev_priv->perf.oa.n_mux_configs == 0) {
+			DRM_DEBUG_DRIVER("No suitable MUX config for \"COMPUTE_L3_CACHE\" metric set\n");
+
+			/* EINVAL because *_register_sysfs already checked this
+			 * and so it wouldn't have been advertised to userspace and
+			 * so shouldn't have been requested
+			 */
+			return -EINVAL;
+		}
+
+		dev_priv->perf.oa.b_counter_regs =
+			b_counter_config_compute_l3_cache;
+		dev_priv->perf.oa.b_counter_regs_len =
+			ARRAY_SIZE(b_counter_config_compute_l3_cache);
+
+		dev_priv->perf.oa.flex_regs =
+			flex_eu_config_compute_l3_cache;
+		dev_priv->perf.oa.flex_regs_len =
+			ARRAY_SIZE(flex_eu_config_compute_l3_cache);
+
+		return 0;
+	case METRIC_SET_ID_HDC_AND_SF:
+		dev_priv->perf.oa.n_mux_configs =
+			get_hdc_and_sf_mux_config(dev_priv,
+						  dev_priv->perf.oa.mux_regs,
+						  dev_priv->perf.oa.mux_regs_lens);
+		if (dev_priv->perf.oa.n_mux_configs == 0) {
+			DRM_DEBUG_DRIVER("No suitable MUX config for \"HDC_AND_SF\" metric set\n");
+
+			/* EINVAL because *_register_sysfs already checked this
+			 * and so it wouldn't have been advertised to userspace and
+			 * so shouldn't have been requested
+			 */
+			return -EINVAL;
+		}
+
+		dev_priv->perf.oa.b_counter_regs =
+			b_counter_config_hdc_and_sf;
+		dev_priv->perf.oa.b_counter_regs_len =
+			ARRAY_SIZE(b_counter_config_hdc_and_sf);
+
+		dev_priv->perf.oa.flex_regs =
+			flex_eu_config_hdc_and_sf;
+		dev_priv->perf.oa.flex_regs_len =
+			ARRAY_SIZE(flex_eu_config_hdc_and_sf);
+
+		return 0;
+	case METRIC_SET_ID_L3_1:
+		dev_priv->perf.oa.n_mux_configs =
+			get_l3_1_mux_config(dev_priv,
+					    dev_priv->perf.oa.mux_regs,
+					    dev_priv->perf.oa.mux_regs_lens);
+		if (dev_priv->perf.oa.n_mux_configs == 0) {
+			DRM_DEBUG_DRIVER("No suitable MUX config for \"L3_1\" metric set\n");
+
+			/* EINVAL because *_register_sysfs already checked this
+			 * and so it wouldn't have been advertised to userspace and
+			 * so shouldn't have been requested
+			 */
+			return -EINVAL;
+		}
+
+		dev_priv->perf.oa.b_counter_regs =
+			b_counter_config_l3_1;
+		dev_priv->perf.oa.b_counter_regs_len =
+			ARRAY_SIZE(b_counter_config_l3_1);
+
+		dev_priv->perf.oa.flex_regs =
+			flex_eu_config_l3_1;
+		dev_priv->perf.oa.flex_regs_len =
+			ARRAY_SIZE(flex_eu_config_l3_1);
+
+		return 0;
+	case METRIC_SET_ID_RASTERIZER_AND_PIXEL_BACKEND:
+		dev_priv->perf.oa.n_mux_configs =
+			get_rasterizer_and_pixel_backend_mux_config(dev_priv,
+								    dev_priv->perf.oa.mux_regs,
+								    dev_priv->perf.oa.mux_regs_lens);
+		if (dev_priv->perf.oa.n_mux_configs == 0) {
+			DRM_DEBUG_DRIVER("No suitable MUX config for \"RASTERIZER_AND_PIXEL_BACKEND\" metric set\n");
+
+			/* EINVAL because *_register_sysfs already checked this
+			 * and so it wouldn't have been advertised to userspace and
+			 * so shouldn't have been requested
+			 */
+			return -EINVAL;
+		}
+
+		dev_priv->perf.oa.b_counter_regs =
+			b_counter_config_rasterizer_and_pixel_backend;
+		dev_priv->perf.oa.b_counter_regs_len =
+			ARRAY_SIZE(b_counter_config_rasterizer_and_pixel_backend);
+
+		dev_priv->perf.oa.flex_regs =
+			flex_eu_config_rasterizer_and_pixel_backend;
+		dev_priv->perf.oa.flex_regs_len =
+			ARRAY_SIZE(flex_eu_config_rasterizer_and_pixel_backend);
+
+		return 0;
+	case METRIC_SET_ID_SAMPLER:
+		dev_priv->perf.oa.n_mux_configs =
+			get_sampler_mux_config(dev_priv,
+					       dev_priv->perf.oa.mux_regs,
+					       dev_priv->perf.oa.mux_regs_lens);
+		if (dev_priv->perf.oa.n_mux_configs == 0) {
+			DRM_DEBUG_DRIVER("No suitable MUX config for \"SAMPLER\" metric set\n");
+
+			/* EINVAL because *_register_sysfs already checked this
+			 * and so it wouldn't have been advertised to userspace and
+			 * so shouldn't have been requested
+			 */
+			return -EINVAL;
+		}
+
+		dev_priv->perf.oa.b_counter_regs =
+			b_counter_config_sampler;
+		dev_priv->perf.oa.b_counter_regs_len =
+			ARRAY_SIZE(b_counter_config_sampler);
+
+		dev_priv->perf.oa.flex_regs =
+			flex_eu_config_sampler;
+		dev_priv->perf.oa.flex_regs_len =
+			ARRAY_SIZE(flex_eu_config_sampler);
+
+		return 0;
+	case METRIC_SET_ID_TDL_1:
+		dev_priv->perf.oa.n_mux_configs =
+			get_tdl_1_mux_config(dev_priv,
+					     dev_priv->perf.oa.mux_regs,
+					     dev_priv->perf.oa.mux_regs_lens);
+		if (dev_priv->perf.oa.n_mux_configs == 0) {
+			DRM_DEBUG_DRIVER("No suitable MUX config for \"TDL_1\" metric set\n");
+
+			/* EINVAL because *_register_sysfs already checked this
+			 * and so it wouldn't have been advertised to userspace and
+			 * so shouldn't have been requested
+			 */
+			return -EINVAL;
+		}
+
+		dev_priv->perf.oa.b_counter_regs =
+			b_counter_config_tdl_1;
+		dev_priv->perf.oa.b_counter_regs_len =
+			ARRAY_SIZE(b_counter_config_tdl_1);
+
+		dev_priv->perf.oa.flex_regs =
+			flex_eu_config_tdl_1;
+		dev_priv->perf.oa.flex_regs_len =
+			ARRAY_SIZE(flex_eu_config_tdl_1);
+
+		return 0;
+	case METRIC_SET_ID_TDL_2:
+		dev_priv->perf.oa.n_mux_configs =
+			get_tdl_2_mux_config(dev_priv,
+					     dev_priv->perf.oa.mux_regs,
+					     dev_priv->perf.oa.mux_regs_lens);
+		if (dev_priv->perf.oa.n_mux_configs == 0) {
+			DRM_DEBUG_DRIVER("No suitable MUX config for \"TDL_2\" metric set\n");
+
+			/* EINVAL because *_register_sysfs already checked this
+			 * and so it wouldn't have been advertised to userspace and
+			 * so shouldn't have been requested
+			 */
+			return -EINVAL;
+		}
+
+		dev_priv->perf.oa.b_counter_regs =
+			b_counter_config_tdl_2;
+		dev_priv->perf.oa.b_counter_regs_len =
+			ARRAY_SIZE(b_counter_config_tdl_2);
+
+		dev_priv->perf.oa.flex_regs =
+			flex_eu_config_tdl_2;
+		dev_priv->perf.oa.flex_regs_len =
+			ARRAY_SIZE(flex_eu_config_tdl_2);
+
+		return 0;
+	case METRIC_SET_ID_COMPUTE_EXTRA:
+		dev_priv->perf.oa.n_mux_configs =
+			get_compute_extra_mux_config(dev_priv,
+						     dev_priv->perf.oa.mux_regs,
+						     dev_priv->perf.oa.mux_regs_lens);
+		if (dev_priv->perf.oa.n_mux_configs == 0) {
+			DRM_DEBUG_DRIVER("No suitable MUX config for \"COMPUTE_EXTRA\" metric set\n");
+
+			/* EINVAL because *_register_sysfs already checked this
+			 * and so it wouldn't have been advertised to userspace and
+			 * so shouldn't have been requested
+			 */
+			return -EINVAL;
+		}
+
+		dev_priv->perf.oa.b_counter_regs =
+			b_counter_config_compute_extra;
+		dev_priv->perf.oa.b_counter_regs_len =
+			ARRAY_SIZE(b_counter_config_compute_extra);
+
+		dev_priv->perf.oa.flex_regs =
+			flex_eu_config_compute_extra;
+		dev_priv->perf.oa.flex_regs_len =
+			ARRAY_SIZE(flex_eu_config_compute_extra);
+
+		return 0;
+	case METRIC_SET_ID_TEST_OA:
+		dev_priv->perf.oa.n_mux_configs =
+			get_test_oa_mux_config(dev_priv,
+					       dev_priv->perf.oa.mux_regs,
+					       dev_priv->perf.oa.mux_regs_lens);
+		if (dev_priv->perf.oa.n_mux_configs == 0) {
+			DRM_DEBUG_DRIVER("No suitable MUX config for \"TEST_OA\" metric set\n");
+
+			/* EINVAL because *_register_sysfs already checked this
+			 * and so it wouldn't have been advertised to userspace and
+			 * so shouldn't have been requested
+			 */
+			return -EINVAL;
+		}
+
+		dev_priv->perf.oa.b_counter_regs =
+			b_counter_config_test_oa;
+		dev_priv->perf.oa.b_counter_regs_len =
+			ARRAY_SIZE(b_counter_config_test_oa);
+
+		dev_priv->perf.oa.flex_regs =
+			flex_eu_config_test_oa;
+		dev_priv->perf.oa.flex_regs_len =
+			ARRAY_SIZE(flex_eu_config_test_oa);
+
+		return 0;
+	default:
+		return -ENODEV;
+	}
+}
+
+static ssize_t
+show_render_basic_id(struct device *kdev, struct device_attribute *attr, char *buf)
+{
+	return sprintf(buf, "%d\n", METRIC_SET_ID_RENDER_BASIC);
+}
+
+static struct device_attribute dev_attr_render_basic_id = {
+	.attr = { .name = "id", .mode = 0444 },
+	.show = show_render_basic_id,
+	.store = NULL,
+};
+
+static struct attribute *attrs_render_basic[] = {
+	&dev_attr_render_basic_id.attr,
+	NULL,
+};
+
+static struct attribute_group group_render_basic = {
+	.name = "22b9519a-e9ba-4c41-8b54-f4f8ca14fa0a",
+	.attrs =  attrs_render_basic,
+};
+
+static ssize_t
+show_compute_basic_id(struct device *kdev, struct device_attribute *attr, char *buf)
+{
+	return sprintf(buf, "%d\n", METRIC_SET_ID_COMPUTE_BASIC);
+}
+
+static struct device_attribute dev_attr_compute_basic_id = {
+	.attr = { .name = "id", .mode = 0444 },
+	.show = show_compute_basic_id,
+	.store = NULL,
+};
+
+static struct attribute *attrs_compute_basic[] = {
+	&dev_attr_compute_basic_id.attr,
+	NULL,
+};
+
+static struct attribute_group group_compute_basic = {
+	.name = "012d72cf-82a9-4d25-8ddf-74076fd30797",
+	.attrs =  attrs_compute_basic,
+};
+
+static ssize_t
+show_render_pipe_profile_id(struct device *kdev, struct device_attribute *attr, char *buf)
+{
+	return sprintf(buf, "%d\n", METRIC_SET_ID_RENDER_PIPE_PROFILE);
+}
+
+static struct device_attribute dev_attr_render_pipe_profile_id = {
+	.attr = { .name = "id", .mode = 0444 },
+	.show = show_render_pipe_profile_id,
+	.store = NULL,
+};
+
+static struct attribute *attrs_render_pipe_profile[] = {
+	&dev_attr_render_pipe_profile_id.attr,
+	NULL,
+};
+
+static struct attribute_group group_render_pipe_profile = {
+	.name = "ce416533-e49e-4211-80af-ec513590a914",
+	.attrs =  attrs_render_pipe_profile,
+};
+
+static ssize_t
+show_memory_reads_id(struct device *kdev, struct device_attribute *attr, char *buf)
+{
+	return sprintf(buf, "%d\n", METRIC_SET_ID_MEMORY_READS);
+}
+
+static struct device_attribute dev_attr_memory_reads_id = {
+	.attr = { .name = "id", .mode = 0444 },
+	.show = show_memory_reads_id,
+	.store = NULL,
+};
+
+static struct attribute *attrs_memory_reads[] = {
+	&dev_attr_memory_reads_id.attr,
+	NULL,
+};
+
+static struct attribute_group group_memory_reads = {
+	.name = "398e2452-18d7-42d0-b241-e4d0a9148ada",
+	.attrs =  attrs_memory_reads,
+};
+
+static ssize_t
+show_memory_writes_id(struct device *kdev, struct device_attribute *attr, char *buf)
+{
+	return sprintf(buf, "%d\n", METRIC_SET_ID_MEMORY_WRITES);
+}
+
+static struct device_attribute dev_attr_memory_writes_id = {
+	.attr = { .name = "id", .mode = 0444 },
+	.show = show_memory_writes_id,
+	.store = NULL,
+};
+
+static struct attribute *attrs_memory_writes[] = {
+	&dev_attr_memory_writes_id.attr,
+	NULL,
+};
+
+static struct attribute_group group_memory_writes = {
+	.name = "d324a0d6-7269-4847-a5c2-6f71ddc7fed5",
+	.attrs =  attrs_memory_writes,
+};
+
+static ssize_t
+show_compute_extended_id(struct device *kdev, struct device_attribute *attr, char *buf)
+{
+	return sprintf(buf, "%d\n", METRIC_SET_ID_COMPUTE_EXTENDED);
+}
+
+static struct device_attribute dev_attr_compute_extended_id = {
+	.attr = { .name = "id", .mode = 0444 },
+	.show = show_compute_extended_id,
+	.store = NULL,
+};
+
+static struct attribute *attrs_compute_extended[] = {
+	&dev_attr_compute_extended_id.attr,
+	NULL,
+};
+
+static struct attribute_group group_compute_extended = {
+	.name = "caf3596a-7bb1-4dec-b3b3-2a080d283b49",
+	.attrs =  attrs_compute_extended,
+};
+
+static ssize_t
+show_compute_l3_cache_id(struct device *kdev, struct device_attribute *attr, char *buf)
+{
+	return sprintf(buf, "%d\n", METRIC_SET_ID_COMPUTE_L3_CACHE);
+}
+
+static struct device_attribute dev_attr_compute_l3_cache_id = {
+	.attr = { .name = "id", .mode = 0444 },
+	.show = show_compute_l3_cache_id,
+	.store = NULL,
+};
+
+static struct attribute *attrs_compute_l3_cache[] = {
+	&dev_attr_compute_l3_cache_id.attr,
+	NULL,
+};
+
+static struct attribute_group group_compute_l3_cache = {
+	.name = "49b956e2-d5b9-47e0-9d8a-cee5e8cec527",
+	.attrs =  attrs_compute_l3_cache,
+};
+
+static ssize_t
+show_hdc_and_sf_id(struct device *kdev, struct device_attribute *attr, char *buf)
+{
+	return sprintf(buf, "%d\n", METRIC_SET_ID_HDC_AND_SF);
+}
+
+static struct device_attribute dev_attr_hdc_and_sf_id = {
+	.attr = { .name = "id", .mode = 0444 },
+	.show = show_hdc_and_sf_id,
+	.store = NULL,
+};
+
+static struct attribute *attrs_hdc_and_sf[] = {
+	&dev_attr_hdc_and_sf_id.attr,
+	NULL,
+};
+
+static struct attribute_group group_hdc_and_sf = {
+	.name = "f64ef50a-bdba-4b35-8f09-203c13d8ee5a",
+	.attrs =  attrs_hdc_and_sf,
+};
+
+static ssize_t
+show_l3_1_id(struct device *kdev, struct device_attribute *attr, char *buf)
+{
+	return sprintf(buf, "%d\n", METRIC_SET_ID_L3_1);
+}
+
+static struct device_attribute dev_attr_l3_1_id = {
+	.attr = { .name = "id", .mode = 0444 },
+	.show = show_l3_1_id,
+	.store = NULL,
+};
+
+static struct attribute *attrs_l3_1[] = {
+	&dev_attr_l3_1_id.attr,
+	NULL,
+};
+
+static struct attribute_group group_l3_1 = {
+	.name = "00ad5a41-7eab-4f7a-9103-49d411c67219",
+	.attrs =  attrs_l3_1,
+};
+
+static ssize_t
+show_rasterizer_and_pixel_backend_id(struct device *kdev, struct device_attribute *attr, char *buf)
+{
+	return sprintf(buf, "%d\n", METRIC_SET_ID_RASTERIZER_AND_PIXEL_BACKEND);
+}
+
+static struct device_attribute dev_attr_rasterizer_and_pixel_backend_id = {
+	.attr = { .name = "id", .mode = 0444 },
+	.show = show_rasterizer_and_pixel_backend_id,
+	.store = NULL,
+};
+
+static struct attribute *attrs_rasterizer_and_pixel_backend[] = {
+	&dev_attr_rasterizer_and_pixel_backend_id.attr,
+	NULL,
+};
+
+static struct attribute_group group_rasterizer_and_pixel_backend = {
+	.name = "46dc44ca-491c-4cc1-a951-e7b3e62bf02b",
+	.attrs =  attrs_rasterizer_and_pixel_backend,
+};
+
+static ssize_t
+show_sampler_id(struct device *kdev, struct device_attribute *attr, char *buf)
+{
+	return sprintf(buf, "%d\n", METRIC_SET_ID_SAMPLER);
+}
+
+static struct device_attribute dev_attr_sampler_id = {
+	.attr = { .name = "id", .mode = 0444 },
+	.show = show_sampler_id,
+	.store = NULL,
+};
+
+static struct attribute *attrs_sampler[] = {
+	&dev_attr_sampler_id.attr,
+	NULL,
+};
+
+static struct attribute_group group_sampler = {
+	.name = "8364e2a8-af63-40af-b0d5-42969a255654",
+	.attrs =  attrs_sampler,
+};
+
+static ssize_t
+show_tdl_1_id(struct device *kdev, struct device_attribute *attr, char *buf)
+{
+	return sprintf(buf, "%d\n", METRIC_SET_ID_TDL_1);
+}
+
+static struct device_attribute dev_attr_tdl_1_id = {
+	.attr = { .name = "id", .mode = 0444 },
+	.show = show_tdl_1_id,
+	.store = NULL,
+};
+
+static struct attribute *attrs_tdl_1[] = {
+	&dev_attr_tdl_1_id.attr,
+	NULL,
+};
+
+static struct attribute_group group_tdl_1 = {
+	.name = "175c8092-cb25-4d1e-8dc7-b4fdd39e2d92",
+	.attrs =  attrs_tdl_1,
+};
+
+static ssize_t
+show_tdl_2_id(struct device *kdev, struct device_attribute *attr, char *buf)
+{
+	return sprintf(buf, "%d\n", METRIC_SET_ID_TDL_2);
+}
+
+static struct device_attribute dev_attr_tdl_2_id = {
+	.attr = { .name = "id", .mode = 0444 },
+	.show = show_tdl_2_id,
+	.store = NULL,
+};
+
+static struct attribute *attrs_tdl_2[] = {
+	&dev_attr_tdl_2_id.attr,
+	NULL,
+};
+
+static struct attribute_group group_tdl_2 = {
+	.name = "d260f03f-b34d-4b49-a44e-436819117332",
+	.attrs =  attrs_tdl_2,
+};
+
+static ssize_t
+show_compute_extra_id(struct device *kdev, struct device_attribute *attr, char *buf)
+{
+	return sprintf(buf, "%d\n", METRIC_SET_ID_COMPUTE_EXTRA);
+}
+
+static struct device_attribute dev_attr_compute_extra_id = {
+	.attr = { .name = "id", .mode = 0444 },
+	.show = show_compute_extra_id,
+	.store = NULL,
+};
+
+static struct attribute *attrs_compute_extra[] = {
+	&dev_attr_compute_extra_id.attr,
+	NULL,
+};
+
+static struct attribute_group group_compute_extra = {
+	.name = "fa6ecf21-2cb8-4d0b-9308-6e4a7b4ca87a",
+	.attrs =  attrs_compute_extra,
+};
+
+static ssize_t
+show_test_oa_id(struct device *kdev, struct device_attribute *attr, char *buf)
+{
+	return sprintf(buf, "%d\n", METRIC_SET_ID_TEST_OA);
+}
+
+static struct device_attribute dev_attr_test_oa_id = {
+	.attr = { .name = "id", .mode = 0444 },
+	.show = show_test_oa_id,
+	.store = NULL,
+};
+
+static struct attribute *attrs_test_oa[] = {
+	&dev_attr_test_oa_id.attr,
+	NULL,
+};
+
+static struct attribute_group group_test_oa = {
+	.name = "5ee72f5c-092f-421e-8b70-225f7c3e9612",
+	.attrs =  attrs_test_oa,
+};
+
+int
+i915_perf_register_sysfs_bxt(struct drm_i915_private *dev_priv)
+{
+	const struct i915_oa_reg *mux_regs[ARRAY_SIZE(dev_priv->perf.oa.mux_regs)];
+	int mux_lens[ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens)];
+	int ret = 0;
+
+	if (get_render_basic_mux_config(dev_priv, mux_regs, mux_lens)) {
+		ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_render_basic);
+		if (ret)
+			goto error_render_basic;
+	}
+	if (get_compute_basic_mux_config(dev_priv, mux_regs, mux_lens)) {
+		ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_compute_basic);
+		if (ret)
+			goto error_compute_basic;
+	}
+	if (get_render_pipe_profile_mux_config(dev_priv, mux_regs, mux_lens)) {
+		ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_render_pipe_profile);
+		if (ret)
+			goto error_render_pipe_profile;
+	}
+	if (get_memory_reads_mux_config(dev_priv, mux_regs, mux_lens)) {
+		ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_memory_reads);
+		if (ret)
+			goto error_memory_reads;
+	}
+	if (get_memory_writes_mux_config(dev_priv, mux_regs, mux_lens)) {
+		ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_memory_writes);
+		if (ret)
+			goto error_memory_writes;
+	}
+	if (get_compute_extended_mux_config(dev_priv, mux_regs, mux_lens)) {
+		ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_compute_extended);
+		if (ret)
+			goto error_compute_extended;
+	}
+	if (get_compute_l3_cache_mux_config(dev_priv, mux_regs, mux_lens)) {
+		ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_compute_l3_cache);
+		if (ret)
+			goto error_compute_l3_cache;
+	}
+	if (get_hdc_and_sf_mux_config(dev_priv, mux_regs, mux_lens)) {
+		ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_hdc_and_sf);
+		if (ret)
+			goto error_hdc_and_sf;
+	}
+	if (get_l3_1_mux_config(dev_priv, mux_regs, mux_lens)) {
+		ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_l3_1);
+		if (ret)
+			goto error_l3_1;
+	}
+	if (get_rasterizer_and_pixel_backend_mux_config(dev_priv, mux_regs, mux_lens)) {
+		ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_rasterizer_and_pixel_backend);
+		if (ret)
+			goto error_rasterizer_and_pixel_backend;
+	}
+	if (get_sampler_mux_config(dev_priv, mux_regs, mux_lens)) {
+		ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_sampler);
+		if (ret)
+			goto error_sampler;
+	}
+	if (get_tdl_1_mux_config(dev_priv, mux_regs, mux_lens)) {
+		ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_tdl_1);
+		if (ret)
+			goto error_tdl_1;
+	}
+	if (get_tdl_2_mux_config(dev_priv, mux_regs, mux_lens)) {
+		ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_tdl_2);
+		if (ret)
+			goto error_tdl_2;
+	}
+	if (get_compute_extra_mux_config(dev_priv, mux_regs, mux_lens)) {
+		ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_compute_extra);
+		if (ret)
+			goto error_compute_extra;
+	}
+	if (get_test_oa_mux_config(dev_priv, mux_regs, mux_lens)) {
+		ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_test_oa);
+		if (ret)
+			goto error_test_oa;
+	}
+
+	return 0;
+
+error_test_oa:
+	if (get_compute_extra_mux_config(dev_priv, mux_regs, mux_lens))
+		sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_compute_extra);
+error_compute_extra:
+	if (get_tdl_2_mux_config(dev_priv, mux_regs, mux_lens))
+		sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_tdl_2);
+error_tdl_2:
+	if (get_tdl_1_mux_config(dev_priv, mux_regs, mux_lens))
+		sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_tdl_1);
+error_tdl_1:
+	if (get_sampler_mux_config(dev_priv, mux_regs, mux_lens))
+		sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_sampler);
+error_sampler:
+	if (get_rasterizer_and_pixel_backend_mux_config(dev_priv, mux_regs, mux_lens))
+		sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_rasterizer_and_pixel_backend);
+error_rasterizer_and_pixel_backend:
+	if (get_l3_1_mux_config(dev_priv, mux_regs, mux_lens))
+		sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_l3_1);
+error_l3_1:
+	if (get_hdc_and_sf_mux_config(dev_priv, mux_regs, mux_lens))
+		sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_hdc_and_sf);
+error_hdc_and_sf:
+	if (get_compute_l3_cache_mux_config(dev_priv, mux_regs, mux_lens))
+		sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_compute_l3_cache);
+error_compute_l3_cache:
+	if (get_compute_extended_mux_config(dev_priv, mux_regs, mux_lens))
+		sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_compute_extended);
+error_compute_extended:
+	if (get_memory_writes_mux_config(dev_priv, mux_regs, mux_lens))
+		sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_memory_writes);
+error_memory_writes:
+	if (get_memory_reads_mux_config(dev_priv, mux_regs, mux_lens))
+		sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_memory_reads);
+error_memory_reads:
+	if (get_render_pipe_profile_mux_config(dev_priv, mux_regs, mux_lens))
+		sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_render_pipe_profile);
+error_render_pipe_profile:
+	if (get_compute_basic_mux_config(dev_priv, mux_regs, mux_lens))
+		sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_compute_basic);
+error_compute_basic:
+	if (get_render_basic_mux_config(dev_priv, mux_regs, mux_lens))
+		sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_render_basic);
+error_render_basic:
+	return ret;
+}
+
+void
+i915_perf_unregister_sysfs_bxt(struct drm_i915_private *dev_priv)
+{
+	const struct i915_oa_reg *mux_regs[ARRAY_SIZE(dev_priv->perf.oa.mux_regs)];
+	int mux_lens[ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens)];
+
+	if (get_render_basic_mux_config(dev_priv, mux_regs, mux_lens))
+		sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_render_basic);
+	if (get_compute_basic_mux_config(dev_priv, mux_regs, mux_lens))
+		sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_compute_basic);
+	if (get_render_pipe_profile_mux_config(dev_priv, mux_regs, mux_lens))
+		sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_render_pipe_profile);
+	if (get_memory_reads_mux_config(dev_priv, mux_regs, mux_lens))
+		sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_memory_reads);
+	if (get_memory_writes_mux_config(dev_priv, mux_regs, mux_lens))
+		sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_memory_writes);
+	if (get_compute_extended_mux_config(dev_priv, mux_regs, mux_lens))
+		sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_compute_extended);
+	if (get_compute_l3_cache_mux_config(dev_priv, mux_regs, mux_lens))
+		sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_compute_l3_cache);
+	if (get_hdc_and_sf_mux_config(dev_priv, mux_regs, mux_lens))
+		sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_hdc_and_sf);
+	if (get_l3_1_mux_config(dev_priv, mux_regs, mux_lens))
+		sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_l3_1);
+	if (get_rasterizer_and_pixel_backend_mux_config(dev_priv, mux_regs, mux_lens))
+		sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_rasterizer_and_pixel_backend);
+	if (get_sampler_mux_config(dev_priv, mux_regs, mux_lens))
+		sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_sampler);
+	if (get_tdl_1_mux_config(dev_priv, mux_regs, mux_lens))
+		sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_tdl_1);
+	if (get_tdl_2_mux_config(dev_priv, mux_regs, mux_lens))
+		sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_tdl_2);
+	if (get_compute_extra_mux_config(dev_priv, mux_regs, mux_lens))
+		sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_compute_extra);
+	if (get_test_oa_mux_config(dev_priv, mux_regs, mux_lens))
+		sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_test_oa);
+}

+ 40 - 0
drivers/gpu/drm/i915/i915_oa_bxt.h

@@ -0,0 +1,40 @@
+/*
+ * Autogenerated file by GPU Top : https://github.com/rib/gputop
+ * DO NOT EDIT manually!
+ *
+ *
+ * Copyright (c) 2015 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __I915_OA_BXT_H__
+#define __I915_OA_BXT_H__
+
+extern int i915_oa_n_builtin_metric_sets_bxt;
+
+extern int i915_oa_select_metric_set_bxt(struct drm_i915_private *dev_priv);
+
+extern int i915_perf_register_sysfs_bxt(struct drm_i915_private *dev_priv);
+
+extern void i915_perf_unregister_sysfs_bxt(struct drm_i915_private *dev_priv);
+
+#endif

+ 2873 - 0
drivers/gpu/drm/i915/i915_oa_chv.c

@@ -0,0 +1,2873 @@
+/*
+ * Autogenerated file by GPU Top : https://github.com/rib/gputop
+ * DO NOT EDIT manually!
+ *
+ *
+ * Copyright (c) 2015 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#include <linux/sysfs.h>
+
+#include "i915_drv.h"
+#include "i915_oa_chv.h"
+
+enum metric_set_id {
+	METRIC_SET_ID_RENDER_BASIC = 1,
+	METRIC_SET_ID_COMPUTE_BASIC,
+	METRIC_SET_ID_RENDER_PIPE_PROFILE,
+	METRIC_SET_ID_HDC_AND_SF,
+	METRIC_SET_ID_L3_1,
+	METRIC_SET_ID_L3_2,
+	METRIC_SET_ID_L3_3,
+	METRIC_SET_ID_L3_4,
+	METRIC_SET_ID_RASTERIZER_AND_PIXEL_BACKEND,
+	METRIC_SET_ID_SAMPLER_1,
+	METRIC_SET_ID_SAMPLER_2,
+	METRIC_SET_ID_TDL_1,
+	METRIC_SET_ID_TDL_2,
+	METRIC_SET_ID_TEST_OA,
+};
+
+int i915_oa_n_builtin_metric_sets_chv = 14;
+
+static const struct i915_oa_reg b_counter_config_render_basic[] = {
+	{ _MMIO(0x2740), 0x00000000 },
+	{ _MMIO(0x2710), 0x00000000 },
+	{ _MMIO(0x2714), 0x00800000 },
+	{ _MMIO(0x2720), 0x00000000 },
+	{ _MMIO(0x2724), 0x00800000 },
+};
+
+static const struct i915_oa_reg flex_eu_config_render_basic[] = {
+	{ _MMIO(0xe458), 0x00005004 },
+	{ _MMIO(0xe558), 0x00010003 },
+	{ _MMIO(0xe658), 0x00012011 },
+	{ _MMIO(0xe758), 0x00015014 },
+	{ _MMIO(0xe45c), 0x00051050 },
+	{ _MMIO(0xe55c), 0x00053052 },
+	{ _MMIO(0xe65c), 0x00055054 },
+};
+
+static const struct i915_oa_reg mux_config_render_basic[] = {
+	{ _MMIO(0x9888), 0x59800000 },
+	{ _MMIO(0x9888), 0x59800001 },
+	{ _MMIO(0x9888), 0x285a0006 },
+	{ _MMIO(0x9888), 0x2c110014 },
+	{ _MMIO(0x9888), 0x2e110000 },
+	{ _MMIO(0x9888), 0x2c310014 },
+	{ _MMIO(0x9888), 0x2e310000 },
+	{ _MMIO(0x9888), 0x2b8303df },
+	{ _MMIO(0x9888), 0x3580024f },
+	{ _MMIO(0x9888), 0x00580888 },
+	{ _MMIO(0x9888), 0x1e5a0015 },
+	{ _MMIO(0x9888), 0x205a0014 },
+	{ _MMIO(0x9888), 0x045a0000 },
+	{ _MMIO(0x9888), 0x025a0000 },
+	{ _MMIO(0x9888), 0x02180500 },
+	{ _MMIO(0x9888), 0x00190555 },
+	{ _MMIO(0x9888), 0x021d0500 },
+	{ _MMIO(0x9888), 0x021f0a00 },
+	{ _MMIO(0x9888), 0x00380444 },
+	{ _MMIO(0x9888), 0x02390500 },
+	{ _MMIO(0x9888), 0x003a0666 },
+	{ _MMIO(0x9888), 0x00100111 },
+	{ _MMIO(0x9888), 0x06110030 },
+	{ _MMIO(0x9888), 0x0a110031 },
+	{ _MMIO(0x9888), 0x0e110046 },
+	{ _MMIO(0x9888), 0x04110000 },
+	{ _MMIO(0x9888), 0x00110000 },
+	{ _MMIO(0x9888), 0x00130111 },
+	{ _MMIO(0x9888), 0x00300444 },
+	{ _MMIO(0x9888), 0x08310030 },
+	{ _MMIO(0x9888), 0x0c310031 },
+	{ _MMIO(0x9888), 0x10310046 },
+	{ _MMIO(0x9888), 0x04310000 },
+	{ _MMIO(0x9888), 0x00310000 },
+	{ _MMIO(0x9888), 0x00330444 },
+	{ _MMIO(0x9888), 0x038a0a00 },
+	{ _MMIO(0x9888), 0x018b0fff },
+	{ _MMIO(0x9888), 0x038b0a00 },
+	{ _MMIO(0x9888), 0x01855000 },
+	{ _MMIO(0x9888), 0x03850055 },
+	{ _MMIO(0x9888), 0x13830021 },
+	{ _MMIO(0x9888), 0x15830020 },
+	{ _MMIO(0x9888), 0x1783002f },
+	{ _MMIO(0x9888), 0x1983002e },
+	{ _MMIO(0x9888), 0x1b83002d },
+	{ _MMIO(0x9888), 0x1d83002c },
+	{ _MMIO(0x9888), 0x05830000 },
+	{ _MMIO(0x9888), 0x01840555 },
+	{ _MMIO(0x9888), 0x03840500 },
+	{ _MMIO(0x9888), 0x23800074 },
+	{ _MMIO(0x9888), 0x2580007d },
+	{ _MMIO(0x9888), 0x05800000 },
+	{ _MMIO(0x9888), 0x01805000 },
+	{ _MMIO(0x9888), 0x03800055 },
+	{ _MMIO(0x9888), 0x01865000 },
+	{ _MMIO(0x9888), 0x03860055 },
+	{ _MMIO(0x9888), 0x01875000 },
+	{ _MMIO(0x9888), 0x03870055 },
+	{ _MMIO(0x9888), 0x418000aa },
+	{ _MMIO(0x9888), 0x4380000a },
+	{ _MMIO(0x9888), 0x45800000 },
+	{ _MMIO(0x9888), 0x4780000a },
+	{ _MMIO(0x9888), 0x49800000 },
+	{ _MMIO(0x9888), 0x4b800000 },
+	{ _MMIO(0x9888), 0x4d800000 },
+	{ _MMIO(0x9888), 0x4f800000 },
+	{ _MMIO(0x9888), 0x51800000 },
+	{ _MMIO(0x9888), 0x53800000 },
+	{ _MMIO(0x9888), 0x55800000 },
+	{ _MMIO(0x9888), 0x57800000 },
+	{ _MMIO(0x9888), 0x59800000 },
+};
+
+static int
+get_render_basic_mux_config(struct drm_i915_private *dev_priv,
+			    const struct i915_oa_reg **regs,
+			    int *lens)
+{
+	int n = 0;
+
+	BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
+	BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
+
+	regs[n] = mux_config_render_basic;
+	lens[n] = ARRAY_SIZE(mux_config_render_basic);
+	n++;
+
+	return n;
+}
+
+static const struct i915_oa_reg b_counter_config_compute_basic[] = {
+	{ _MMIO(0x2710), 0x00000000 },
+	{ _MMIO(0x2714), 0x00800000 },
+	{ _MMIO(0x2720), 0x00000000 },
+	{ _MMIO(0x2724), 0x00800000 },
+};
+
+static const struct i915_oa_reg flex_eu_config_compute_basic[] = {
+	{ _MMIO(0xe458), 0x00005004 },
+	{ _MMIO(0xe558), 0x00000003 },
+	{ _MMIO(0xe658), 0x00002001 },
+	{ _MMIO(0xe758), 0x00778008 },
+	{ _MMIO(0xe45c), 0x00088078 },
+	{ _MMIO(0xe55c), 0x00808708 },
+	{ _MMIO(0xe65c), 0x00a08908 },
+};
+
+static const struct i915_oa_reg mux_config_compute_basic[] = {
+	{ _MMIO(0x9888), 0x59800000 },
+	{ _MMIO(0x9888), 0x59800001 },
+	{ _MMIO(0x9888), 0x2e5800e0 },
+	{ _MMIO(0x9888), 0x2e3800e0 },
+	{ _MMIO(0x9888), 0x3580024f },
+	{ _MMIO(0x9888), 0x3d800140 },
+	{ _MMIO(0x9888), 0x08580042 },
+	{ _MMIO(0x9888), 0x0c580040 },
+	{ _MMIO(0x9888), 0x1058004c },
+	{ _MMIO(0x9888), 0x1458004b },
+	{ _MMIO(0x9888), 0x04580000 },
+	{ _MMIO(0x9888), 0x00580000 },
+	{ _MMIO(0x9888), 0x00195555 },
+	{ _MMIO(0x9888), 0x06380042 },
+	{ _MMIO(0x9888), 0x0a380040 },
+	{ _MMIO(0x9888), 0x0e38004c },
+	{ _MMIO(0x9888), 0x1238004b },
+	{ _MMIO(0x9888), 0x04380000 },
+	{ _MMIO(0x9888), 0x00384444 },
+	{ _MMIO(0x9888), 0x003a5555 },
+	{ _MMIO(0x9888), 0x018bffff },
+	{ _MMIO(0x9888), 0x01845555 },
+	{ _MMIO(0x9888), 0x17800074 },
+	{ _MMIO(0x9888), 0x1980007d },
+	{ _MMIO(0x9888), 0x1b80007c },
+	{ _MMIO(0x9888), 0x1d8000b6 },
+	{ _MMIO(0x9888), 0x1f8000b7 },
+	{ _MMIO(0x9888), 0x05800000 },
+	{ _MMIO(0x9888), 0x03800000 },
+	{ _MMIO(0x9888), 0x418000aa },
+	{ _MMIO(0x9888), 0x438000aa },
+	{ _MMIO(0x9888), 0x45800000 },
+	{ _MMIO(0x9888), 0x47800000 },
+	{ _MMIO(0x9888), 0x4980012a },
+	{ _MMIO(0x9888), 0x4b80012a },
+	{ _MMIO(0x9888), 0x4d80012a },
+	{ _MMIO(0x9888), 0x4f80012a },
+	{ _MMIO(0x9888), 0x518001ce },
+	{ _MMIO(0x9888), 0x538001ce },
+	{ _MMIO(0x9888), 0x5580000e },
+	{ _MMIO(0x9888), 0x59800000 },
+};
+
+static int
+get_compute_basic_mux_config(struct drm_i915_private *dev_priv,
+			     const struct i915_oa_reg **regs,
+			     int *lens)
+{
+	int n = 0;
+
+	BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
+	BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
+
+	regs[n] = mux_config_compute_basic;
+	lens[n] = ARRAY_SIZE(mux_config_compute_basic);
+	n++;
+
+	return n;
+}
+
+static const struct i915_oa_reg b_counter_config_render_pipe_profile[] = {
+	{ _MMIO(0x2724), 0xf0800000 },
+	{ _MMIO(0x2720), 0x00000000 },
+	{ _MMIO(0x2714), 0xf0800000 },
+	{ _MMIO(0x2710), 0x00000000 },
+	{ _MMIO(0x2770), 0x0007ffea },
+	{ _MMIO(0x2774), 0x00007ffc },
+	{ _MMIO(0x2778), 0x0007affa },
+	{ _MMIO(0x277c), 0x0000f5fd },
+	{ _MMIO(0x2780), 0x00079ffa },
+	{ _MMIO(0x2784), 0x0000f3fb },
+	{ _MMIO(0x2788), 0x0007bf7a },
+	{ _MMIO(0x278c), 0x0000f7e7 },
+	{ _MMIO(0x2790), 0x0007fefa },
+	{ _MMIO(0x2794), 0x0000f7cf },
+	{ _MMIO(0x2798), 0x00077ffa },
+	{ _MMIO(0x279c), 0x0000efdf },
+	{ _MMIO(0x27a0), 0x0006fffa },
+	{ _MMIO(0x27a4), 0x0000cfbf },
+	{ _MMIO(0x27a8), 0x0003fffa },
+	{ _MMIO(0x27ac), 0x00005f7f },
+};
+
+static const struct i915_oa_reg flex_eu_config_render_pipe_profile[] = {
+	{ _MMIO(0xe458), 0x00005004 },
+	{ _MMIO(0xe558), 0x00015014 },
+	{ _MMIO(0xe658), 0x00025024 },
+	{ _MMIO(0xe758), 0x00035034 },
+	{ _MMIO(0xe45c), 0x00045044 },
+	{ _MMIO(0xe55c), 0x00055054 },
+	{ _MMIO(0xe65c), 0x00065064 },
+};
+
+static const struct i915_oa_reg mux_config_render_pipe_profile[] = {
+	{ _MMIO(0x9888), 0x59800000 },
+	{ _MMIO(0x9888), 0x59800001 },
+	{ _MMIO(0x9888), 0x261e0000 },
+	{ _MMIO(0x9888), 0x281f000f },
+	{ _MMIO(0x9888), 0x2817001a },
+	{ _MMIO(0x9888), 0x2791001f },
+	{ _MMIO(0x9888), 0x27880019 },
+	{ _MMIO(0x9888), 0x2d890000 },
+	{ _MMIO(0x9888), 0x278a0007 },
+	{ _MMIO(0x9888), 0x298d001f },
+	{ _MMIO(0x9888), 0x278e0020 },
+	{ _MMIO(0x9888), 0x2b8f0012 },
+	{ _MMIO(0x9888), 0x29900000 },
+	{ _MMIO(0x9888), 0x00184000 },
+	{ _MMIO(0x9888), 0x02181000 },
+	{ _MMIO(0x9888), 0x02194000 },
+	{ _MMIO(0x9888), 0x141e0002 },
+	{ _MMIO(0x9888), 0x041e0000 },
+	{ _MMIO(0x9888), 0x001e0000 },
+	{ _MMIO(0x9888), 0x221f0015 },
+	{ _MMIO(0x9888), 0x041f0000 },
+	{ _MMIO(0x9888), 0x001f4000 },
+	{ _MMIO(0x9888), 0x021f0000 },
+	{ _MMIO(0x9888), 0x023a8000 },
+	{ _MMIO(0x9888), 0x0213c000 },
+	{ _MMIO(0x9888), 0x02164000 },
+	{ _MMIO(0x9888), 0x24170012 },
+	{ _MMIO(0x9888), 0x04170000 },
+	{ _MMIO(0x9888), 0x07910005 },
+	{ _MMIO(0x9888), 0x05910000 },
+	{ _MMIO(0x9888), 0x01911500 },
+	{ _MMIO(0x9888), 0x03910501 },
+	{ _MMIO(0x9888), 0x0d880002 },
+	{ _MMIO(0x9888), 0x1d880003 },
+	{ _MMIO(0x9888), 0x05880000 },
+	{ _MMIO(0x9888), 0x0b890032 },
+	{ _MMIO(0x9888), 0x1b890031 },
+	{ _MMIO(0x9888), 0x05890000 },
+	{ _MMIO(0x9888), 0x01890040 },
+	{ _MMIO(0x9888), 0x03890040 },
+	{ _MMIO(0x9888), 0x098a0000 },
+	{ _MMIO(0x9888), 0x198a0004 },
+	{ _MMIO(0x9888), 0x058a0000 },
+	{ _MMIO(0x9888), 0x018a8050 },
+	{ _MMIO(0x9888), 0x038a2050 },
+	{ _MMIO(0x9888), 0x018b95a9 },
+	{ _MMIO(0x9888), 0x038be5a9 },
+	{ _MMIO(0x9888), 0x018c1500 },
+	{ _MMIO(0x9888), 0x038c0501 },
+	{ _MMIO(0x9888), 0x178d0015 },
+	{ _MMIO(0x9888), 0x058d0000 },
+	{ _MMIO(0x9888), 0x138e0004 },
+	{ _MMIO(0x9888), 0x218e000c },
+	{ _MMIO(0x9888), 0x058e0000 },
+	{ _MMIO(0x9888), 0x018e0500 },
+	{ _MMIO(0x9888), 0x038e0101 },
+	{ _MMIO(0x9888), 0x0f8f0027 },
+	{ _MMIO(0x9888), 0x058f0000 },
+	{ _MMIO(0x9888), 0x018f0000 },
+	{ _MMIO(0x9888), 0x038f0001 },
+	{ _MMIO(0x9888), 0x11900013 },
+	{ _MMIO(0x9888), 0x1f900017 },
+	{ _MMIO(0x9888), 0x05900000 },
+	{ _MMIO(0x9888), 0x01900100 },
+	{ _MMIO(0x9888), 0x03900001 },
+	{ _MMIO(0x9888), 0x01845555 },
+	{ _MMIO(0x9888), 0x03845555 },
+	{ _MMIO(0x9888), 0x418000aa },
+	{ _MMIO(0x9888), 0x438000aa },
+	{ _MMIO(0x9888), 0x458000aa },
+	{ _MMIO(0x9888), 0x478000aa },
+	{ _MMIO(0x9888), 0x4980018c },
+	{ _MMIO(0x9888), 0x4b80014b },
+	{ _MMIO(0x9888), 0x4d800128 },
+	{ _MMIO(0x9888), 0x4f80012a },
+	{ _MMIO(0x9888), 0x51800187 },
+	{ _MMIO(0x9888), 0x5380014b },
+	{ _MMIO(0x9888), 0x55800149 },
+	{ _MMIO(0x9888), 0x5780010a },
+	{ _MMIO(0x9888), 0x59800000 },
+};
+
+static int
+get_render_pipe_profile_mux_config(struct drm_i915_private *dev_priv,
+				   const struct i915_oa_reg **regs,
+				   int *lens)
+{
+	int n = 0;
+
+	BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
+	BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
+
+	regs[n] = mux_config_render_pipe_profile;
+	lens[n] = ARRAY_SIZE(mux_config_render_pipe_profile);
+	n++;
+
+	return n;
+}
+
+static const struct i915_oa_reg b_counter_config_hdc_and_sf[] = {
+	{ _MMIO(0x2740), 0x00000000 },
+	{ _MMIO(0x2744), 0x00800000 },
+	{ _MMIO(0x2710), 0x00000000 },
+	{ _MMIO(0x2714), 0x10800000 },
+	{ _MMIO(0x2720), 0x00000000 },
+	{ _MMIO(0x2724), 0x00800000 },
+	{ _MMIO(0x2770), 0x00000002 },
+	{ _MMIO(0x2774), 0x0000fff7 },
+};
+
+static const struct i915_oa_reg flex_eu_config_hdc_and_sf[] = {
+	{ _MMIO(0xe458), 0x00005004 },
+	{ _MMIO(0xe558), 0x00010003 },
+	{ _MMIO(0xe658), 0x00012011 },
+	{ _MMIO(0xe758), 0x00015014 },
+	{ _MMIO(0xe45c), 0x00051050 },
+	{ _MMIO(0xe55c), 0x00053052 },
+	{ _MMIO(0xe65c), 0x00055054 },
+};
+
+static const struct i915_oa_reg mux_config_hdc_and_sf[] = {
+	{ _MMIO(0x9888), 0x105c0232 },
+	{ _MMIO(0x9888), 0x10580232 },
+	{ _MMIO(0x9888), 0x10380232 },
+	{ _MMIO(0x9888), 0x10dc0232 },
+	{ _MMIO(0x9888), 0x10d80232 },
+	{ _MMIO(0x9888), 0x10b80232 },
+	{ _MMIO(0x9888), 0x118e4400 },
+	{ _MMIO(0x9888), 0x025c6080 },
+	{ _MMIO(0x9888), 0x045c004b },
+	{ _MMIO(0x9888), 0x005c8000 },
+	{ _MMIO(0x9888), 0x00582080 },
+	{ _MMIO(0x9888), 0x0258004b },
+	{ _MMIO(0x9888), 0x025b4000 },
+	{ _MMIO(0x9888), 0x045b4000 },
+	{ _MMIO(0x9888), 0x0c1fa000 },
+	{ _MMIO(0x9888), 0x0e1f00aa },
+	{ _MMIO(0x9888), 0x04386080 },
+	{ _MMIO(0x9888), 0x0638404b },
+	{ _MMIO(0x9888), 0x02384000 },
+	{ _MMIO(0x9888), 0x08384000 },
+	{ _MMIO(0x9888), 0x0a380000 },
+	{ _MMIO(0x9888), 0x0c380000 },
+	{ _MMIO(0x9888), 0x00398000 },
+	{ _MMIO(0x9888), 0x0239a000 },
+	{ _MMIO(0x9888), 0x0439a000 },
+	{ _MMIO(0x9888), 0x06392000 },
+	{ _MMIO(0x9888), 0x0cdc25c1 },
+	{ _MMIO(0x9888), 0x0adcc000 },
+	{ _MMIO(0x9888), 0x0ad825c1 },
+	{ _MMIO(0x9888), 0x18db4000 },
+	{ _MMIO(0x9888), 0x1adb0001 },
+	{ _MMIO(0x9888), 0x0e9f8000 },
+	{ _MMIO(0x9888), 0x109f02aa },
+	{ _MMIO(0x9888), 0x0eb825c1 },
+	{ _MMIO(0x9888), 0x18b80154 },
+	{ _MMIO(0x9888), 0x0ab9a000 },
+	{ _MMIO(0x9888), 0x0cb9a000 },
+	{ _MMIO(0x9888), 0x0eb9a000 },
+	{ _MMIO(0x9888), 0x0d88c000 },
+	{ _MMIO(0x9888), 0x0f88000f },
+	{ _MMIO(0x9888), 0x038a8000 },
+	{ _MMIO(0x9888), 0x058a8000 },
+	{ _MMIO(0x9888), 0x078a8000 },
+	{ _MMIO(0x9888), 0x098a8000 },
+	{ _MMIO(0x9888), 0x0b8a8000 },
+	{ _MMIO(0x9888), 0x0d8a8000 },
+	{ _MMIO(0x9888), 0x258baa05 },
+	{ _MMIO(0x9888), 0x278b002a },
+	{ _MMIO(0x9888), 0x238b2a80 },
+	{ _MMIO(0x9888), 0x198c5400 },
+	{ _MMIO(0x9888), 0x1b8c0015 },
+	{ _MMIO(0x9888), 0x098dc000 },
+	{ _MMIO(0x9888), 0x0b8da000 },
+	{ _MMIO(0x9888), 0x0d8da000 },
+	{ _MMIO(0x9888), 0x0f8da000 },
+	{ _MMIO(0x9888), 0x098e05c0 },
+	{ _MMIO(0x9888), 0x058e0000 },
+	{ _MMIO(0x9888), 0x198f0020 },
+	{ _MMIO(0x9888), 0x2185aa0a },
+	{ _MMIO(0x9888), 0x2385002a },
+	{ _MMIO(0x9888), 0x1f85aa00 },
+	{ _MMIO(0x9888), 0x19835000 },
+	{ _MMIO(0x9888), 0x1b830155 },
+	{ _MMIO(0x9888), 0x03834000 },
+	{ _MMIO(0x9888), 0x05834000 },
+	{ _MMIO(0x9888), 0x07834000 },
+	{ _MMIO(0x9888), 0x09834000 },
+	{ _MMIO(0x9888), 0x0b834000 },
+	{ _MMIO(0x9888), 0x0d834000 },
+	{ _MMIO(0x9888), 0x09848000 },
+	{ _MMIO(0x9888), 0x0b84c000 },
+	{ _MMIO(0x9888), 0x0d84c000 },
+	{ _MMIO(0x9888), 0x0f84c000 },
+	{ _MMIO(0x9888), 0x01848000 },
+	{ _MMIO(0x9888), 0x0384c000 },
+	{ _MMIO(0x9888), 0x0584c000 },
+	{ _MMIO(0x9888), 0x07844000 },
+	{ _MMIO(0x9888), 0x19808000 },
+	{ _MMIO(0x9888), 0x1b80c000 },
+	{ _MMIO(0x9888), 0x1d80c000 },
+	{ _MMIO(0x9888), 0x1f80c000 },
+	{ _MMIO(0x9888), 0x11808000 },
+	{ _MMIO(0x9888), 0x1380c000 },
+	{ _MMIO(0x9888), 0x1580c000 },
+	{ _MMIO(0x9888), 0x17804000 },
+	{ _MMIO(0x9888), 0x51800040 },
+	{ _MMIO(0x9888), 0x43800400 },
+	{ _MMIO(0x9888), 0x45800800 },
+	{ _MMIO(0x9888), 0x53800000 },
+	{ _MMIO(0x9888), 0x47800c62 },
+	{ _MMIO(0x9888), 0x21800000 },
+	{ _MMIO(0x9888), 0x31800000 },
+	{ _MMIO(0x9888), 0x4d800000 },
+	{ _MMIO(0x9888), 0x3f801042 },
+	{ _MMIO(0x9888), 0x4f800000 },
+	{ _MMIO(0x9888), 0x418014a4 },
+};
+
+static int
+get_hdc_and_sf_mux_config(struct drm_i915_private *dev_priv,
+			  const struct i915_oa_reg **regs,
+			  int *lens)
+{
+	int n = 0;
+
+	BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
+	BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
+
+	regs[n] = mux_config_hdc_and_sf;
+	lens[n] = ARRAY_SIZE(mux_config_hdc_and_sf);
+	n++;
+
+	return n;
+}
+
+static const struct i915_oa_reg b_counter_config_l3_1[] = {
+	{ _MMIO(0x2740), 0x00000000 },
+	{ _MMIO(0x2744), 0x00800000 },
+	{ _MMIO(0x2710), 0x00000000 },
+	{ _MMIO(0x2714), 0xf0800000 },
+	{ _MMIO(0x2720), 0x00000000 },
+	{ _MMIO(0x2724), 0xf0800000 },
+	{ _MMIO(0x2770), 0x00100070 },
+	{ _MMIO(0x2774), 0x0000fff1 },
+	{ _MMIO(0x2778), 0x00014002 },
+	{ _MMIO(0x277c), 0x0000c3ff },
+	{ _MMIO(0x2780), 0x00010002 },
+	{ _MMIO(0x2784), 0x0000c7ff },
+	{ _MMIO(0x2788), 0x00004002 },
+	{ _MMIO(0x278c), 0x0000d3ff },
+	{ _MMIO(0x2790), 0x00100700 },
+	{ _MMIO(0x2794), 0x0000ff1f },
+	{ _MMIO(0x2798), 0x00001402 },
+	{ _MMIO(0x279c), 0x0000fc3f },
+	{ _MMIO(0x27a0), 0x00001002 },
+	{ _MMIO(0x27a4), 0x0000fc7f },
+	{ _MMIO(0x27a8), 0x00000402 },
+	{ _MMIO(0x27ac), 0x0000fd3f },
+};
+
+static const struct i915_oa_reg flex_eu_config_l3_1[] = {
+	{ _MMIO(0xe458), 0x00005004 },
+	{ _MMIO(0xe558), 0x00010003 },
+	{ _MMIO(0xe658), 0x00012011 },
+	{ _MMIO(0xe758), 0x00015014 },
+	{ _MMIO(0xe45c), 0x00051050 },
+	{ _MMIO(0xe55c), 0x00053052 },
+	{ _MMIO(0xe65c), 0x00055054 },
+};
+
+static const struct i915_oa_reg mux_config_l3_1[] = {
+	{ _MMIO(0x9888), 0x10bf03da },
+	{ _MMIO(0x9888), 0x14bf0001 },
+	{ _MMIO(0x9888), 0x12980340 },
+	{ _MMIO(0x9888), 0x12990340 },
+	{ _MMIO(0x9888), 0x0cbf1187 },
+	{ _MMIO(0x9888), 0x0ebf1205 },
+	{ _MMIO(0x9888), 0x00bf0500 },
+	{ _MMIO(0x9888), 0x02bf042b },
+	{ _MMIO(0x9888), 0x04bf002c },
+	{ _MMIO(0x9888), 0x0cdac000 },
+	{ _MMIO(0x9888), 0x0edac000 },
+	{ _MMIO(0x9888), 0x00da8000 },
+	{ _MMIO(0x9888), 0x02dac000 },
+	{ _MMIO(0x9888), 0x04da4000 },
+	{ _MMIO(0x9888), 0x04983400 },
+	{ _MMIO(0x9888), 0x10980000 },
+	{ _MMIO(0x9888), 0x06990034 },
+	{ _MMIO(0x9888), 0x10990000 },
+	{ _MMIO(0x9888), 0x0c9dc000 },
+	{ _MMIO(0x9888), 0x0e9dc000 },
+	{ _MMIO(0x9888), 0x009d8000 },
+	{ _MMIO(0x9888), 0x029dc000 },
+	{ _MMIO(0x9888), 0x049d4000 },
+	{ _MMIO(0x9888), 0x109f02a8 },
+	{ _MMIO(0x9888), 0x0c9fa000 },
+	{ _MMIO(0x9888), 0x0e9f00ba },
+	{ _MMIO(0x9888), 0x0cb88000 },
+	{ _MMIO(0x9888), 0x0cb95000 },
+	{ _MMIO(0x9888), 0x0eb95000 },
+	{ _MMIO(0x9888), 0x00b94000 },
+	{ _MMIO(0x9888), 0x02b95000 },
+	{ _MMIO(0x9888), 0x04b91000 },
+	{ _MMIO(0x9888), 0x06b92000 },
+	{ _MMIO(0x9888), 0x0cba4000 },
+	{ _MMIO(0x9888), 0x0f88000f },
+	{ _MMIO(0x9888), 0x03888000 },
+	{ _MMIO(0x9888), 0x05888000 },
+	{ _MMIO(0x9888), 0x07888000 },
+	{ _MMIO(0x9888), 0x09888000 },
+	{ _MMIO(0x9888), 0x0b888000 },
+	{ _MMIO(0x9888), 0x0d880400 },
+	{ _MMIO(0x9888), 0x258b800a },
+	{ _MMIO(0x9888), 0x278b002a },
+	{ _MMIO(0x9888), 0x238b5500 },
+	{ _MMIO(0x9888), 0x198c4000 },
+	{ _MMIO(0x9888), 0x1b8c0015 },
+	{ _MMIO(0x9888), 0x038c4000 },
+	{ _MMIO(0x9888), 0x058c4000 },
+	{ _MMIO(0x9888), 0x078c4000 },
+	{ _MMIO(0x9888), 0x098c4000 },
+	{ _MMIO(0x9888), 0x0b8c4000 },
+	{ _MMIO(0x9888), 0x0d8c4000 },
+	{ _MMIO(0x9888), 0x0d8da000 },
+	{ _MMIO(0x9888), 0x0f8da000 },
+	{ _MMIO(0x9888), 0x018d8000 },
+	{ _MMIO(0x9888), 0x038da000 },
+	{ _MMIO(0x9888), 0x058da000 },
+	{ _MMIO(0x9888), 0x078d2000 },
+	{ _MMIO(0x9888), 0x2185800a },
+	{ _MMIO(0x9888), 0x2385002a },
+	{ _MMIO(0x9888), 0x1f85aa00 },
+	{ _MMIO(0x9888), 0x1b830154 },
+	{ _MMIO(0x9888), 0x03834000 },
+	{ _MMIO(0x9888), 0x05834000 },
+	{ _MMIO(0x9888), 0x07834000 },
+	{ _MMIO(0x9888), 0x09834000 },
+	{ _MMIO(0x9888), 0x0b834000 },
+	{ _MMIO(0x9888), 0x0d834000 },
+	{ _MMIO(0x9888), 0x0d84c000 },
+	{ _MMIO(0x9888), 0x0f84c000 },
+	{ _MMIO(0x9888), 0x01848000 },
+	{ _MMIO(0x9888), 0x0384c000 },
+	{ _MMIO(0x9888), 0x0584c000 },
+	{ _MMIO(0x9888), 0x07844000 },
+	{ _MMIO(0x9888), 0x1d80c000 },
+	{ _MMIO(0x9888), 0x1f80c000 },
+	{ _MMIO(0x9888), 0x11808000 },
+	{ _MMIO(0x9888), 0x1380c000 },
+	{ _MMIO(0x9888), 0x1580c000 },
+	{ _MMIO(0x9888), 0x17804000 },
+	{ _MMIO(0x9888), 0x53800000 },
+	{ _MMIO(0x9888), 0x45800000 },
+	{ _MMIO(0x9888), 0x47800000 },
+	{ _MMIO(0x9888), 0x21800000 },
+	{ _MMIO(0x9888), 0x31800000 },
+	{ _MMIO(0x9888), 0x4d800000 },
+	{ _MMIO(0x9888), 0x3f800000 },
+	{ _MMIO(0x9888), 0x4f800000 },
+	{ _MMIO(0x9888), 0x41800060 },
+};
+
+static int
+get_l3_1_mux_config(struct drm_i915_private *dev_priv,
+		    const struct i915_oa_reg **regs,
+		    int *lens)
+{
+	int n = 0;
+
+	BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
+	BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
+
+	regs[n] = mux_config_l3_1;
+	lens[n] = ARRAY_SIZE(mux_config_l3_1);
+	n++;
+
+	return n;
+}
+
+static const struct i915_oa_reg b_counter_config_l3_2[] = {
+	{ _MMIO(0x2740), 0x00000000 },
+	{ _MMIO(0x2744), 0x00800000 },
+	{ _MMIO(0x2710), 0x00000000 },
+	{ _MMIO(0x2714), 0xf0800000 },
+	{ _MMIO(0x2720), 0x00000000 },
+	{ _MMIO(0x2724), 0xf0800000 },
+	{ _MMIO(0x2770), 0x00100070 },
+	{ _MMIO(0x2774), 0x0000fff1 },
+	{ _MMIO(0x2778), 0x00014002 },
+	{ _MMIO(0x277c), 0x0000c3ff },
+	{ _MMIO(0x2780), 0x00010002 },
+	{ _MMIO(0x2784), 0x0000c7ff },
+	{ _MMIO(0x2788), 0x00004002 },
+	{ _MMIO(0x278c), 0x0000d3ff },
+	{ _MMIO(0x2790), 0x00100700 },
+	{ _MMIO(0x2794), 0x0000ff1f },
+	{ _MMIO(0x2798), 0x00001402 },
+	{ _MMIO(0x279c), 0x0000fc3f },
+	{ _MMIO(0x27a0), 0x00001002 },
+	{ _MMIO(0x27a4), 0x0000fc7f },
+	{ _MMIO(0x27a8), 0x00000402 },
+	{ _MMIO(0x27ac), 0x0000fd3f },
+};
+
+static const struct i915_oa_reg flex_eu_config_l3_2[] = {
+	{ _MMIO(0xe458), 0x00005004 },
+	{ _MMIO(0xe558), 0x00010003 },
+	{ _MMIO(0xe658), 0x00012011 },
+	{ _MMIO(0xe758), 0x00015014 },
+	{ _MMIO(0xe45c), 0x00051050 },
+	{ _MMIO(0xe55c), 0x00053052 },
+	{ _MMIO(0xe65c), 0x00055054 },
+};
+
+static const struct i915_oa_reg mux_config_l3_2[] = {
+	{ _MMIO(0x9888), 0x103f03da },
+	{ _MMIO(0x9888), 0x143f0001 },
+	{ _MMIO(0x9888), 0x12180340 },
+	{ _MMIO(0x9888), 0x12190340 },
+	{ _MMIO(0x9888), 0x0c3f1187 },
+	{ _MMIO(0x9888), 0x0e3f1205 },
+	{ _MMIO(0x9888), 0x003f0500 },
+	{ _MMIO(0x9888), 0x023f042b },
+	{ _MMIO(0x9888), 0x043f002c },
+	{ _MMIO(0x9888), 0x0c5ac000 },
+	{ _MMIO(0x9888), 0x0e5ac000 },
+	{ _MMIO(0x9888), 0x005a8000 },
+	{ _MMIO(0x9888), 0x025ac000 },
+	{ _MMIO(0x9888), 0x045a4000 },
+	{ _MMIO(0x9888), 0x04183400 },
+	{ _MMIO(0x9888), 0x10180000 },
+	{ _MMIO(0x9888), 0x06190034 },
+	{ _MMIO(0x9888), 0x10190000 },
+	{ _MMIO(0x9888), 0x0c1dc000 },
+	{ _MMIO(0x9888), 0x0e1dc000 },
+	{ _MMIO(0x9888), 0x001d8000 },
+	{ _MMIO(0x9888), 0x021dc000 },
+	{ _MMIO(0x9888), 0x041d4000 },
+	{ _MMIO(0x9888), 0x101f02a8 },
+	{ _MMIO(0x9888), 0x0c1fa000 },
+	{ _MMIO(0x9888), 0x0e1f00ba },
+	{ _MMIO(0x9888), 0x0c388000 },
+	{ _MMIO(0x9888), 0x0c395000 },
+	{ _MMIO(0x9888), 0x0e395000 },
+	{ _MMIO(0x9888), 0x00394000 },
+	{ _MMIO(0x9888), 0x02395000 },
+	{ _MMIO(0x9888), 0x04391000 },
+	{ _MMIO(0x9888), 0x06392000 },
+	{ _MMIO(0x9888), 0x0c3a4000 },
+	{ _MMIO(0x9888), 0x1b8aa800 },
+	{ _MMIO(0x9888), 0x1d8a0002 },
+	{ _MMIO(0x9888), 0x038a8000 },
+	{ _MMIO(0x9888), 0x058a8000 },
+	{ _MMIO(0x9888), 0x078a8000 },
+	{ _MMIO(0x9888), 0x098a8000 },
+	{ _MMIO(0x9888), 0x0b8a8000 },
+	{ _MMIO(0x9888), 0x0d8a8000 },
+	{ _MMIO(0x9888), 0x258b4005 },
+	{ _MMIO(0x9888), 0x278b0015 },
+	{ _MMIO(0x9888), 0x238b2a80 },
+	{ _MMIO(0x9888), 0x2185800a },
+	{ _MMIO(0x9888), 0x2385002a },
+	{ _MMIO(0x9888), 0x1f85aa00 },
+	{ _MMIO(0x9888), 0x1b830154 },
+	{ _MMIO(0x9888), 0x03834000 },
+	{ _MMIO(0x9888), 0x05834000 },
+	{ _MMIO(0x9888), 0x07834000 },
+	{ _MMIO(0x9888), 0x09834000 },
+	{ _MMIO(0x9888), 0x0b834000 },
+	{ _MMIO(0x9888), 0x0d834000 },
+	{ _MMIO(0x9888), 0x0d84c000 },
+	{ _MMIO(0x9888), 0x0f84c000 },
+	{ _MMIO(0x9888), 0x01848000 },
+	{ _MMIO(0x9888), 0x0384c000 },
+	{ _MMIO(0x9888), 0x0584c000 },
+	{ _MMIO(0x9888), 0x07844000 },
+	{ _MMIO(0x9888), 0x1d80c000 },
+	{ _MMIO(0x9888), 0x1f80c000 },
+	{ _MMIO(0x9888), 0x11808000 },
+	{ _MMIO(0x9888), 0x1380c000 },
+	{ _MMIO(0x9888), 0x1580c000 },
+	{ _MMIO(0x9888), 0x17804000 },
+	{ _MMIO(0x9888), 0x53800000 },
+	{ _MMIO(0x9888), 0x45800000 },
+	{ _MMIO(0x9888), 0x47800000 },
+	{ _MMIO(0x9888), 0x21800000 },
+	{ _MMIO(0x9888), 0x31800000 },
+	{ _MMIO(0x9888), 0x4d800000 },
+	{ _MMIO(0x9888), 0x3f800000 },
+	{ _MMIO(0x9888), 0x4f800000 },
+	{ _MMIO(0x9888), 0x41800060 },
+};
+
+static int
+get_l3_2_mux_config(struct drm_i915_private *dev_priv,
+		    const struct i915_oa_reg **regs,
+		    int *lens)
+{
+	int n = 0;
+
+	BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
+	BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
+
+	regs[n] = mux_config_l3_2;
+	lens[n] = ARRAY_SIZE(mux_config_l3_2);
+	n++;
+
+	return n;
+}
+
+static const struct i915_oa_reg b_counter_config_l3_3[] = {
+	{ _MMIO(0x2740), 0x00000000 },
+	{ _MMIO(0x2744), 0x00800000 },
+	{ _MMIO(0x2710), 0x00000000 },
+	{ _MMIO(0x2714), 0xf0800000 },
+	{ _MMIO(0x2720), 0x00000000 },
+	{ _MMIO(0x2724), 0xf0800000 },
+	{ _MMIO(0x2770), 0x00100070 },
+	{ _MMIO(0x2774), 0x0000fff1 },
+	{ _MMIO(0x2778), 0x00014002 },
+	{ _MMIO(0x277c), 0x0000c3ff },
+	{ _MMIO(0x2780), 0x00010002 },
+	{ _MMIO(0x2784), 0x0000c7ff },
+	{ _MMIO(0x2788), 0x00004002 },
+	{ _MMIO(0x278c), 0x0000d3ff },
+	{ _MMIO(0x2790), 0x00100700 },
+	{ _MMIO(0x2794), 0x0000ff1f },
+	{ _MMIO(0x2798), 0x00001402 },
+	{ _MMIO(0x279c), 0x0000fc3f },
+	{ _MMIO(0x27a0), 0x00001002 },
+	{ _MMIO(0x27a4), 0x0000fc7f },
+	{ _MMIO(0x27a8), 0x00000402 },
+	{ _MMIO(0x27ac), 0x0000fd3f },
+};
+
+static const struct i915_oa_reg flex_eu_config_l3_3[] = {
+	{ _MMIO(0xe458), 0x00005004 },
+	{ _MMIO(0xe558), 0x00010003 },
+	{ _MMIO(0xe658), 0x00012011 },
+	{ _MMIO(0xe758), 0x00015014 },
+	{ _MMIO(0xe45c), 0x00051050 },
+	{ _MMIO(0xe55c), 0x00053052 },
+	{ _MMIO(0xe65c), 0x00055054 },
+};
+
+static const struct i915_oa_reg mux_config_l3_3[] = {
+	{ _MMIO(0x9888), 0x121b0340 },
+	{ _MMIO(0x9888), 0x103f0274 },
+	{ _MMIO(0x9888), 0x123f0000 },
+	{ _MMIO(0x9888), 0x129b0340 },
+	{ _MMIO(0x9888), 0x10bf0274 },
+	{ _MMIO(0x9888), 0x12bf0000 },
+	{ _MMIO(0x9888), 0x041b3400 },
+	{ _MMIO(0x9888), 0x101b0000 },
+	{ _MMIO(0x9888), 0x045c8000 },
+	{ _MMIO(0x9888), 0x0a3d4000 },
+	{ _MMIO(0x9888), 0x003f0080 },
+	{ _MMIO(0x9888), 0x023f0793 },
+	{ _MMIO(0x9888), 0x043f0014 },
+	{ _MMIO(0x9888), 0x04588000 },
+	{ _MMIO(0x9888), 0x005a8000 },
+	{ _MMIO(0x9888), 0x025ac000 },
+	{ _MMIO(0x9888), 0x045a4000 },
+	{ _MMIO(0x9888), 0x0a5b4000 },
+	{ _MMIO(0x9888), 0x001d8000 },
+	{ _MMIO(0x9888), 0x021dc000 },
+	{ _MMIO(0x9888), 0x041d4000 },
+	{ _MMIO(0x9888), 0x0c1fa000 },
+	{ _MMIO(0x9888), 0x0e1f002a },
+	{ _MMIO(0x9888), 0x0a384000 },
+	{ _MMIO(0x9888), 0x00394000 },
+	{ _MMIO(0x9888), 0x02395000 },
+	{ _MMIO(0x9888), 0x04399000 },
+	{ _MMIO(0x9888), 0x069b0034 },
+	{ _MMIO(0x9888), 0x109b0000 },
+	{ _MMIO(0x9888), 0x06dc4000 },
+	{ _MMIO(0x9888), 0x0cbd4000 },
+	{ _MMIO(0x9888), 0x0cbf0981 },
+	{ _MMIO(0x9888), 0x0ebf0a0f },
+	{ _MMIO(0x9888), 0x06d84000 },
+	{ _MMIO(0x9888), 0x0cdac000 },
+	{ _MMIO(0x9888), 0x0edac000 },
+	{ _MMIO(0x9888), 0x0cdb4000 },
+	{ _MMIO(0x9888), 0x0c9dc000 },
+	{ _MMIO(0x9888), 0x0e9dc000 },
+	{ _MMIO(0x9888), 0x109f02a8 },
+	{ _MMIO(0x9888), 0x0e9f0080 },
+	{ _MMIO(0x9888), 0x0cb84000 },
+	{ _MMIO(0x9888), 0x0cb95000 },
+	{ _MMIO(0x9888), 0x0eb95000 },
+	{ _MMIO(0x9888), 0x06b92000 },
+	{ _MMIO(0x9888), 0x0f88000f },
+	{ _MMIO(0x9888), 0x0d880400 },
+	{ _MMIO(0x9888), 0x038a8000 },
+	{ _MMIO(0x9888), 0x058a8000 },
+	{ _MMIO(0x9888), 0x078a8000 },
+	{ _MMIO(0x9888), 0x098a8000 },
+	{ _MMIO(0x9888), 0x0b8a8000 },
+	{ _MMIO(0x9888), 0x258b8009 },
+	{ _MMIO(0x9888), 0x278b002a },
+	{ _MMIO(0x9888), 0x238b2a80 },
+	{ _MMIO(0x9888), 0x198c4000 },
+	{ _MMIO(0x9888), 0x1b8c0015 },
+	{ _MMIO(0x9888), 0x0d8c4000 },
+	{ _MMIO(0x9888), 0x0d8da000 },
+	{ _MMIO(0x9888), 0x0f8da000 },
+	{ _MMIO(0x9888), 0x078d2000 },
+	{ _MMIO(0x9888), 0x2185800a },
+	{ _MMIO(0x9888), 0x2385002a },
+	{ _MMIO(0x9888), 0x1f85aa00 },
+	{ _MMIO(0x9888), 0x1b830154 },
+	{ _MMIO(0x9888), 0x03834000 },
+	{ _MMIO(0x9888), 0x05834000 },
+	{ _MMIO(0x9888), 0x07834000 },
+	{ _MMIO(0x9888), 0x09834000 },
+	{ _MMIO(0x9888), 0x0b834000 },
+	{ _MMIO(0x9888), 0x0d834000 },
+	{ _MMIO(0x9888), 0x0d84c000 },
+	{ _MMIO(0x9888), 0x0f84c000 },
+	{ _MMIO(0x9888), 0x01848000 },
+	{ _MMIO(0x9888), 0x0384c000 },
+	{ _MMIO(0x9888), 0x0584c000 },
+	{ _MMIO(0x9888), 0x07844000 },
+	{ _MMIO(0x9888), 0x1d80c000 },
+	{ _MMIO(0x9888), 0x1f80c000 },
+	{ _MMIO(0x9888), 0x11808000 },
+	{ _MMIO(0x9888), 0x1380c000 },
+	{ _MMIO(0x9888), 0x1580c000 },
+	{ _MMIO(0x9888), 0x17804000 },
+	{ _MMIO(0x9888), 0x53800000 },
+	{ _MMIO(0x9888), 0x45800c00 },
+	{ _MMIO(0x9888), 0x47800c63 },
+	{ _MMIO(0x9888), 0x21800000 },
+	{ _MMIO(0x9888), 0x31800000 },
+	{ _MMIO(0x9888), 0x4d800000 },
+	{ _MMIO(0x9888), 0x3f8014a5 },
+	{ _MMIO(0x9888), 0x4f800000 },
+	{ _MMIO(0x9888), 0x41800045 },
+};
+
+static int
+get_l3_3_mux_config(struct drm_i915_private *dev_priv,
+		    const struct i915_oa_reg **regs,
+		    int *lens)
+{
+	int n = 0;
+
+	BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
+	BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
+
+	regs[n] = mux_config_l3_3;
+	lens[n] = ARRAY_SIZE(mux_config_l3_3);
+	n++;
+
+	return n;
+}
+
+static const struct i915_oa_reg b_counter_config_l3_4[] = {
+	{ _MMIO(0x2740), 0x00000000 },
+	{ _MMIO(0x2744), 0x00800000 },
+	{ _MMIO(0x2710), 0x00000000 },
+	{ _MMIO(0x2714), 0xf0800000 },
+	{ _MMIO(0x2720), 0x00000000 },
+	{ _MMIO(0x2724), 0xf0800000 },
+	{ _MMIO(0x2770), 0x00100070 },
+	{ _MMIO(0x2774), 0x0000fff1 },
+	{ _MMIO(0x2778), 0x00014002 },
+	{ _MMIO(0x277c), 0x0000c3ff },
+	{ _MMIO(0x2780), 0x00010002 },
+	{ _MMIO(0x2784), 0x0000c7ff },
+	{ _MMIO(0x2788), 0x00004002 },
+	{ _MMIO(0x278c), 0x0000d3ff },
+	{ _MMIO(0x2790), 0x00100700 },
+	{ _MMIO(0x2794), 0x0000ff1f },
+	{ _MMIO(0x2798), 0x00001402 },
+	{ _MMIO(0x279c), 0x0000fc3f },
+	{ _MMIO(0x27a0), 0x00001002 },
+	{ _MMIO(0x27a4), 0x0000fc7f },
+	{ _MMIO(0x27a8), 0x00000402 },
+	{ _MMIO(0x27ac), 0x0000fd3f },
+};
+
+static const struct i915_oa_reg flex_eu_config_l3_4[] = {
+	{ _MMIO(0xe458), 0x00005004 },
+	{ _MMIO(0xe558), 0x00010003 },
+	{ _MMIO(0xe658), 0x00012011 },
+	{ _MMIO(0xe758), 0x00015014 },
+	{ _MMIO(0xe45c), 0x00051050 },
+	{ _MMIO(0xe55c), 0x00053052 },
+	{ _MMIO(0xe65c), 0x00055054 },
+};
+
+static const struct i915_oa_reg mux_config_l3_4[] = {
+	{ _MMIO(0x9888), 0x121a0340 },
+	{ _MMIO(0x9888), 0x103f0017 },
+	{ _MMIO(0x9888), 0x123f0020 },
+	{ _MMIO(0x9888), 0x129a0340 },
+	{ _MMIO(0x9888), 0x10bf0017 },
+	{ _MMIO(0x9888), 0x12bf0020 },
+	{ _MMIO(0x9888), 0x041a3400 },
+	{ _MMIO(0x9888), 0x101a0000 },
+	{ _MMIO(0x9888), 0x043b8000 },
+	{ _MMIO(0x9888), 0x0a3e0010 },
+	{ _MMIO(0x9888), 0x003f0200 },
+	{ _MMIO(0x9888), 0x023f0113 },
+	{ _MMIO(0x9888), 0x043f0014 },
+	{ _MMIO(0x9888), 0x02592000 },
+	{ _MMIO(0x9888), 0x005a8000 },
+	{ _MMIO(0x9888), 0x025ac000 },
+	{ _MMIO(0x9888), 0x045a4000 },
+	{ _MMIO(0x9888), 0x0a1c8000 },
+	{ _MMIO(0x9888), 0x001d8000 },
+	{ _MMIO(0x9888), 0x021dc000 },
+	{ _MMIO(0x9888), 0x041d4000 },
+	{ _MMIO(0x9888), 0x0a1e8000 },
+	{ _MMIO(0x9888), 0x0c1fa000 },
+	{ _MMIO(0x9888), 0x0e1f001a },
+	{ _MMIO(0x9888), 0x00394000 },
+	{ _MMIO(0x9888), 0x02395000 },
+	{ _MMIO(0x9888), 0x04391000 },
+	{ _MMIO(0x9888), 0x069a0034 },
+	{ _MMIO(0x9888), 0x109a0000 },
+	{ _MMIO(0x9888), 0x06bb4000 },
+	{ _MMIO(0x9888), 0x0abe0040 },
+	{ _MMIO(0x9888), 0x0cbf0984 },
+	{ _MMIO(0x9888), 0x0ebf0a02 },
+	{ _MMIO(0x9888), 0x02d94000 },
+	{ _MMIO(0x9888), 0x0cdac000 },
+	{ _MMIO(0x9888), 0x0edac000 },
+	{ _MMIO(0x9888), 0x0c9c0400 },
+	{ _MMIO(0x9888), 0x0c9dc000 },
+	{ _MMIO(0x9888), 0x0e9dc000 },
+	{ _MMIO(0x9888), 0x0c9e0400 },
+	{ _MMIO(0x9888), 0x109f02a8 },
+	{ _MMIO(0x9888), 0x0e9f0040 },
+	{ _MMIO(0x9888), 0x0cb95000 },
+	{ _MMIO(0x9888), 0x0eb95000 },
+	{ _MMIO(0x9888), 0x0f88000f },
+	{ _MMIO(0x9888), 0x0d880400 },
+	{ _MMIO(0x9888), 0x038a8000 },
+	{ _MMIO(0x9888), 0x058a8000 },
+	{ _MMIO(0x9888), 0x078a8000 },
+	{ _MMIO(0x9888), 0x098a8000 },
+	{ _MMIO(0x9888), 0x0b8a8000 },
+	{ _MMIO(0x9888), 0x258b8009 },
+	{ _MMIO(0x9888), 0x278b002a },
+	{ _MMIO(0x9888), 0x238b2a80 },
+	{ _MMIO(0x9888), 0x198c4000 },
+	{ _MMIO(0x9888), 0x1b8c0015 },
+	{ _MMIO(0x9888), 0x0d8c4000 },
+	{ _MMIO(0x9888), 0x0d8da000 },
+	{ _MMIO(0x9888), 0x0f8da000 },
+	{ _MMIO(0x9888), 0x078d2000 },
+	{ _MMIO(0x9888), 0x2185800a },
+	{ _MMIO(0x9888), 0x2385002a },
+	{ _MMIO(0x9888), 0x1f85aa00 },
+	{ _MMIO(0x9888), 0x1b830154 },
+	{ _MMIO(0x9888), 0x03834000 },
+	{ _MMIO(0x9888), 0x05834000 },
+	{ _MMIO(0x9888), 0x07834000 },
+	{ _MMIO(0x9888), 0x09834000 },
+	{ _MMIO(0x9888), 0x0b834000 },
+	{ _MMIO(0x9888), 0x0d834000 },
+	{ _MMIO(0x9888), 0x0d84c000 },
+	{ _MMIO(0x9888), 0x0f84c000 },
+	{ _MMIO(0x9888), 0x01848000 },
+	{ _MMIO(0x9888), 0x0384c000 },
+	{ _MMIO(0x9888), 0x0584c000 },
+	{ _MMIO(0x9888), 0x07844000 },
+	{ _MMIO(0x9888), 0x1d80c000 },
+	{ _MMIO(0x9888), 0x1f80c000 },
+	{ _MMIO(0x9888), 0x11808000 },
+	{ _MMIO(0x9888), 0x1380c000 },
+	{ _MMIO(0x9888), 0x1580c000 },
+	{ _MMIO(0x9888), 0x17804000 },
+	{ _MMIO(0x9888), 0x53800000 },
+	{ _MMIO(0x9888), 0x45800800 },
+	{ _MMIO(0x9888), 0x47800842 },
+	{ _MMIO(0x9888), 0x21800000 },
+	{ _MMIO(0x9888), 0x31800000 },
+	{ _MMIO(0x9888), 0x4d800000 },
+	{ _MMIO(0x9888), 0x3f801084 },
+	{ _MMIO(0x9888), 0x4f800000 },
+	{ _MMIO(0x9888), 0x41800044 },
+};
+
+static int
+get_l3_4_mux_config(struct drm_i915_private *dev_priv,
+		    const struct i915_oa_reg **regs,
+		    int *lens)
+{
+	int n = 0;
+
+	BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
+	BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
+
+	regs[n] = mux_config_l3_4;
+	lens[n] = ARRAY_SIZE(mux_config_l3_4);
+	n++;
+
+	return n;
+}
+
+static const struct i915_oa_reg b_counter_config_rasterizer_and_pixel_backend[] = {
+	{ _MMIO(0x2740), 0x00000000 },
+	{ _MMIO(0x2744), 0x00800000 },
+	{ _MMIO(0x2710), 0x00000000 },
+	{ _MMIO(0x2714), 0xf0800000 },
+	{ _MMIO(0x2720), 0x00000000 },
+	{ _MMIO(0x2724), 0x30800000 },
+	{ _MMIO(0x2770), 0x00006000 },
+	{ _MMIO(0x2774), 0x0000f3ff },
+	{ _MMIO(0x2778), 0x00001800 },
+	{ _MMIO(0x277c), 0x0000fcff },
+	{ _MMIO(0x2780), 0x00000600 },
+	{ _MMIO(0x2784), 0x0000ff3f },
+	{ _MMIO(0x2788), 0x00000180 },
+	{ _MMIO(0x278c), 0x0000ffcf },
+	{ _MMIO(0x2790), 0x00000060 },
+	{ _MMIO(0x2794), 0x0000fff3 },
+	{ _MMIO(0x2798), 0x00000018 },
+	{ _MMIO(0x279c), 0x0000fffc },
+};
+
+static const struct i915_oa_reg flex_eu_config_rasterizer_and_pixel_backend[] = {
+	{ _MMIO(0xe458), 0x00005004 },
+	{ _MMIO(0xe558), 0x00010003 },
+	{ _MMIO(0xe658), 0x00012011 },
+	{ _MMIO(0xe758), 0x00015014 },
+	{ _MMIO(0xe45c), 0x00051050 },
+	{ _MMIO(0xe55c), 0x00053052 },
+	{ _MMIO(0xe65c), 0x00055054 },
+};
+
+static const struct i915_oa_reg mux_config_rasterizer_and_pixel_backend[] = {
+	{ _MMIO(0x9888), 0x143b000e },
+	{ _MMIO(0x9888), 0x043c55c0 },
+	{ _MMIO(0x9888), 0x0a1e0280 },
+	{ _MMIO(0x9888), 0x0c1e0408 },
+	{ _MMIO(0x9888), 0x10390000 },
+	{ _MMIO(0x9888), 0x12397a1f },
+	{ _MMIO(0x9888), 0x14bb000e },
+	{ _MMIO(0x9888), 0x04bc5000 },
+	{ _MMIO(0x9888), 0x0a9e0296 },
+	{ _MMIO(0x9888), 0x0c9e0008 },
+	{ _MMIO(0x9888), 0x10b90000 },
+	{ _MMIO(0x9888), 0x12b97a1f },
+	{ _MMIO(0x9888), 0x063b0042 },
+	{ _MMIO(0x9888), 0x103b0000 },
+	{ _MMIO(0x9888), 0x083c0000 },
+	{ _MMIO(0x9888), 0x0a3e0040 },
+	{ _MMIO(0x9888), 0x043f8000 },
+	{ _MMIO(0x9888), 0x02594000 },
+	{ _MMIO(0x9888), 0x045a8000 },
+	{ _MMIO(0x9888), 0x0c1c0400 },
+	{ _MMIO(0x9888), 0x041d8000 },
+	{ _MMIO(0x9888), 0x081e02c0 },
+	{ _MMIO(0x9888), 0x0e1e0000 },
+	{ _MMIO(0x9888), 0x0c1fa800 },
+	{ _MMIO(0x9888), 0x0e1f0260 },
+	{ _MMIO(0x9888), 0x101f0014 },
+	{ _MMIO(0x9888), 0x003905e0 },
+	{ _MMIO(0x9888), 0x06390bc0 },
+	{ _MMIO(0x9888), 0x02390018 },
+	{ _MMIO(0x9888), 0x04394000 },
+	{ _MMIO(0x9888), 0x04bb0042 },
+	{ _MMIO(0x9888), 0x10bb0000 },
+	{ _MMIO(0x9888), 0x02bc05c0 },
+	{ _MMIO(0x9888), 0x08bc0000 },
+	{ _MMIO(0x9888), 0x0abe0004 },
+	{ _MMIO(0x9888), 0x02bf8000 },
+	{ _MMIO(0x9888), 0x02d91000 },
+	{ _MMIO(0x9888), 0x02da8000 },
+	{ _MMIO(0x9888), 0x089c8000 },
+	{ _MMIO(0x9888), 0x029d8000 },
+	{ _MMIO(0x9888), 0x089e8000 },
+	{ _MMIO(0x9888), 0x0e9e0000 },
+	{ _MMIO(0x9888), 0x0e9fa806 },
+	{ _MMIO(0x9888), 0x109f0142 },
+	{ _MMIO(0x9888), 0x08b90617 },
+	{ _MMIO(0x9888), 0x0ab90be0 },
+	{ _MMIO(0x9888), 0x02b94000 },
+	{ _MMIO(0x9888), 0x0d88f000 },
+	{ _MMIO(0x9888), 0x0f88000c },
+	{ _MMIO(0x9888), 0x07888000 },
+	{ _MMIO(0x9888), 0x09888000 },
+	{ _MMIO(0x9888), 0x018a8000 },
+	{ _MMIO(0x9888), 0x0f8a8000 },
+	{ _MMIO(0x9888), 0x1b8a2800 },
+	{ _MMIO(0x9888), 0x038a8000 },
+	{ _MMIO(0x9888), 0x058a8000 },
+	{ _MMIO(0x9888), 0x0b8a8000 },
+	{ _MMIO(0x9888), 0x0d8a8000 },
+	{ _MMIO(0x9888), 0x238b52a0 },
+	{ _MMIO(0x9888), 0x258b6a95 },
+	{ _MMIO(0x9888), 0x278b0029 },
+	{ _MMIO(0x9888), 0x178c2000 },
+	{ _MMIO(0x9888), 0x198c1500 },
+	{ _MMIO(0x9888), 0x1b8c0014 },
+	{ _MMIO(0x9888), 0x078c4000 },
+	{ _MMIO(0x9888), 0x098c4000 },
+	{ _MMIO(0x9888), 0x098da000 },
+	{ _MMIO(0x9888), 0x0b8da000 },
+	{ _MMIO(0x9888), 0x0f8da000 },
+	{ _MMIO(0x9888), 0x038d8000 },
+	{ _MMIO(0x9888), 0x058d2000 },
+	{ _MMIO(0x9888), 0x1f85aa80 },
+	{ _MMIO(0x9888), 0x2185aaaa },
+	{ _MMIO(0x9888), 0x2385002a },
+	{ _MMIO(0x9888), 0x01834000 },
+	{ _MMIO(0x9888), 0x0f834000 },
+	{ _MMIO(0x9888), 0x19835400 },
+	{ _MMIO(0x9888), 0x1b830155 },
+	{ _MMIO(0x9888), 0x03834000 },
+	{ _MMIO(0x9888), 0x05834000 },
+	{ _MMIO(0x9888), 0x07834000 },
+	{ _MMIO(0x9888), 0x09834000 },
+	{ _MMIO(0x9888), 0x0b834000 },
+	{ _MMIO(0x9888), 0x0d834000 },
+	{ _MMIO(0x9888), 0x0184c000 },
+	{ _MMIO(0x9888), 0x0784c000 },
+	{ _MMIO(0x9888), 0x0984c000 },
+	{ _MMIO(0x9888), 0x0b84c000 },
+	{ _MMIO(0x9888), 0x0d84c000 },
+	{ _MMIO(0x9888), 0x0f84c000 },
+	{ _MMIO(0x9888), 0x0384c000 },
+	{ _MMIO(0x9888), 0x0584c000 },
+	{ _MMIO(0x9888), 0x1180c000 },
+	{ _MMIO(0x9888), 0x1780c000 },
+	{ _MMIO(0x9888), 0x1980c000 },
+	{ _MMIO(0x9888), 0x1b80c000 },
+	{ _MMIO(0x9888), 0x1d80c000 },
+	{ _MMIO(0x9888), 0x1f80c000 },
+	{ _MMIO(0x9888), 0x1380c000 },
+	{ _MMIO(0x9888), 0x1580c000 },
+	{ _MMIO(0x9888), 0x4d800444 },
+	{ _MMIO(0x9888), 0x3d800000 },
+	{ _MMIO(0x9888), 0x4f804000 },
+	{ _MMIO(0x9888), 0x43801080 },
+	{ _MMIO(0x9888), 0x51800000 },
+	{ _MMIO(0x9888), 0x45800084 },
+	{ _MMIO(0x9888), 0x53800044 },
+	{ _MMIO(0x9888), 0x47801080 },
+	{ _MMIO(0x9888), 0x21800000 },
+	{ _MMIO(0x9888), 0x31800000 },
+	{ _MMIO(0x9888), 0x3f800000 },
+	{ _MMIO(0x9888), 0x41800840 },
+};
+
+static int
+get_rasterizer_and_pixel_backend_mux_config(struct drm_i915_private *dev_priv,
+					    const struct i915_oa_reg **regs,
+					    int *lens)
+{
+	int n = 0;
+
+	BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
+	BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
+
+	regs[n] = mux_config_rasterizer_and_pixel_backend;
+	lens[n] = ARRAY_SIZE(mux_config_rasterizer_and_pixel_backend);
+	n++;
+
+	return n;
+}
+
+static const struct i915_oa_reg b_counter_config_sampler_1[] = {
+	{ _MMIO(0x2740), 0x00000000 },
+	{ _MMIO(0x2744), 0x00800000 },
+	{ _MMIO(0x2710), 0x00000000 },
+	{ _MMIO(0x2714), 0x70800000 },
+	{ _MMIO(0x2720), 0x00000000 },
+	{ _MMIO(0x2724), 0x00800000 },
+	{ _MMIO(0x2770), 0x0000c000 },
+	{ _MMIO(0x2774), 0x0000e7ff },
+	{ _MMIO(0x2778), 0x00003000 },
+	{ _MMIO(0x277c), 0x0000f9ff },
+	{ _MMIO(0x2780), 0x00000c00 },
+	{ _MMIO(0x2784), 0x0000fe7f },
+};
+
+static const struct i915_oa_reg flex_eu_config_sampler_1[] = {
+	{ _MMIO(0xe458), 0x00005004 },
+	{ _MMIO(0xe558), 0x00010003 },
+	{ _MMIO(0xe658), 0x00012011 },
+	{ _MMIO(0xe758), 0x00015014 },
+	{ _MMIO(0xe45c), 0x00051050 },
+	{ _MMIO(0xe55c), 0x00053052 },
+	{ _MMIO(0xe65c), 0x00055054 },
+};
+
+static const struct i915_oa_reg mux_config_sampler_1[] = {
+	{ _MMIO(0x9888), 0x18921400 },
+	{ _MMIO(0x9888), 0x149500ab },
+	{ _MMIO(0x9888), 0x18b21400 },
+	{ _MMIO(0x9888), 0x14b500ab },
+	{ _MMIO(0x9888), 0x18d21400 },
+	{ _MMIO(0x9888), 0x14d500ab },
+	{ _MMIO(0x9888), 0x0cdc8000 },
+	{ _MMIO(0x9888), 0x0edc4000 },
+	{ _MMIO(0x9888), 0x02dcc000 },
+	{ _MMIO(0x9888), 0x04dcc000 },
+	{ _MMIO(0x9888), 0x1abd00a0 },
+	{ _MMIO(0x9888), 0x0abd8000 },
+	{ _MMIO(0x9888), 0x0cd88000 },
+	{ _MMIO(0x9888), 0x0ed84000 },
+	{ _MMIO(0x9888), 0x04d88000 },
+	{ _MMIO(0x9888), 0x1adb0050 },
+	{ _MMIO(0x9888), 0x04db8000 },
+	{ _MMIO(0x9888), 0x06db8000 },
+	{ _MMIO(0x9888), 0x08db8000 },
+	{ _MMIO(0x9888), 0x0adb4000 },
+	{ _MMIO(0x9888), 0x109f02a0 },
+	{ _MMIO(0x9888), 0x0c9fa000 },
+	{ _MMIO(0x9888), 0x0e9f00aa },
+	{ _MMIO(0x9888), 0x18b82500 },
+	{ _MMIO(0x9888), 0x02b88000 },
+	{ _MMIO(0x9888), 0x04b84000 },
+	{ _MMIO(0x9888), 0x06b84000 },
+	{ _MMIO(0x9888), 0x08b84000 },
+	{ _MMIO(0x9888), 0x0ab84000 },
+	{ _MMIO(0x9888), 0x0cb88000 },
+	{ _MMIO(0x9888), 0x0cb98000 },
+	{ _MMIO(0x9888), 0x0eb9a000 },
+	{ _MMIO(0x9888), 0x00b98000 },
+	{ _MMIO(0x9888), 0x02b9a000 },
+	{ _MMIO(0x9888), 0x04b9a000 },
+	{ _MMIO(0x9888), 0x06b92000 },
+	{ _MMIO(0x9888), 0x1aba0200 },
+	{ _MMIO(0x9888), 0x02ba8000 },
+	{ _MMIO(0x9888), 0x0cba8000 },
+	{ _MMIO(0x9888), 0x04908000 },
+	{ _MMIO(0x9888), 0x04918000 },
+	{ _MMIO(0x9888), 0x04927300 },
+	{ _MMIO(0x9888), 0x10920000 },
+	{ _MMIO(0x9888), 0x1893000a },
+	{ _MMIO(0x9888), 0x0a934000 },
+	{ _MMIO(0x9888), 0x0a946000 },
+	{ _MMIO(0x9888), 0x0c959000 },
+	{ _MMIO(0x9888), 0x0e950098 },
+	{ _MMIO(0x9888), 0x10950000 },
+	{ _MMIO(0x9888), 0x04b04000 },
+	{ _MMIO(0x9888), 0x04b14000 },
+	{ _MMIO(0x9888), 0x04b20073 },
+	{ _MMIO(0x9888), 0x10b20000 },
+	{ _MMIO(0x9888), 0x04b38000 },
+	{ _MMIO(0x9888), 0x06b38000 },
+	{ _MMIO(0x9888), 0x08b34000 },
+	{ _MMIO(0x9888), 0x04b4c000 },
+	{ _MMIO(0x9888), 0x02b59890 },
+	{ _MMIO(0x9888), 0x10b50000 },
+	{ _MMIO(0x9888), 0x06d04000 },
+	{ _MMIO(0x9888), 0x06d14000 },
+	{ _MMIO(0x9888), 0x06d20073 },
+	{ _MMIO(0x9888), 0x10d20000 },
+	{ _MMIO(0x9888), 0x18d30020 },
+	{ _MMIO(0x9888), 0x02d38000 },
+	{ _MMIO(0x9888), 0x0cd34000 },
+	{ _MMIO(0x9888), 0x0ad48000 },
+	{ _MMIO(0x9888), 0x04d42000 },
+	{ _MMIO(0x9888), 0x0ed59000 },
+	{ _MMIO(0x9888), 0x00d59800 },
+	{ _MMIO(0x9888), 0x10d50000 },
+	{ _MMIO(0x9888), 0x0f88000e },
+	{ _MMIO(0x9888), 0x03888000 },
+	{ _MMIO(0x9888), 0x05888000 },
+	{ _MMIO(0x9888), 0x07888000 },
+	{ _MMIO(0x9888), 0x09888000 },
+	{ _MMIO(0x9888), 0x0b888000 },
+	{ _MMIO(0x9888), 0x0d880400 },
+	{ _MMIO(0x9888), 0x278b002a },
+	{ _MMIO(0x9888), 0x238b5500 },
+	{ _MMIO(0x9888), 0x258b000a },
+	{ _MMIO(0x9888), 0x1b8c0015 },
+	{ _MMIO(0x9888), 0x038c4000 },
+	{ _MMIO(0x9888), 0x058c4000 },
+	{ _MMIO(0x9888), 0x078c4000 },
+	{ _MMIO(0x9888), 0x098c4000 },
+	{ _MMIO(0x9888), 0x0b8c4000 },
+	{ _MMIO(0x9888), 0x0d8c4000 },
+	{ _MMIO(0x9888), 0x0d8d8000 },
+	{ _MMIO(0x9888), 0x0f8da000 },
+	{ _MMIO(0x9888), 0x018d8000 },
+	{ _MMIO(0x9888), 0x038da000 },
+	{ _MMIO(0x9888), 0x058da000 },
+	{ _MMIO(0x9888), 0x078d2000 },
+	{ _MMIO(0x9888), 0x2385002a },
+	{ _MMIO(0x9888), 0x1f85aa00 },
+	{ _MMIO(0x9888), 0x2185000a },
+	{ _MMIO(0x9888), 0x1b830150 },
+	{ _MMIO(0x9888), 0x03834000 },
+	{ _MMIO(0x9888), 0x05834000 },
+	{ _MMIO(0x9888), 0x07834000 },
+	{ _MMIO(0x9888), 0x09834000 },
+	{ _MMIO(0x9888), 0x0b834000 },
+	{ _MMIO(0x9888), 0x0d834000 },
+	{ _MMIO(0x9888), 0x0d848000 },
+	{ _MMIO(0x9888), 0x0f84c000 },
+	{ _MMIO(0x9888), 0x01848000 },
+	{ _MMIO(0x9888), 0x0384c000 },
+	{ _MMIO(0x9888), 0x0584c000 },
+	{ _MMIO(0x9888), 0x07844000 },
+	{ _MMIO(0x9888), 0x1d808000 },
+	{ _MMIO(0x9888), 0x1f80c000 },
+	{ _MMIO(0x9888), 0x11808000 },
+	{ _MMIO(0x9888), 0x1380c000 },
+	{ _MMIO(0x9888), 0x1580c000 },
+	{ _MMIO(0x9888), 0x17804000 },
+	{ _MMIO(0x9888), 0x53800000 },
+	{ _MMIO(0x9888), 0x47801021 },
+	{ _MMIO(0x9888), 0x21800000 },
+	{ _MMIO(0x9888), 0x31800000 },
+	{ _MMIO(0x9888), 0x4d800000 },
+	{ _MMIO(0x9888), 0x3f800c64 },
+	{ _MMIO(0x9888), 0x4f800000 },
+	{ _MMIO(0x9888), 0x41800c02 },
+};
+
+static int
+get_sampler_1_mux_config(struct drm_i915_private *dev_priv,
+			 const struct i915_oa_reg **regs,
+			 int *lens)
+{
+	int n = 0;
+
+	BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
+	BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
+
+	regs[n] = mux_config_sampler_1;
+	lens[n] = ARRAY_SIZE(mux_config_sampler_1);
+	n++;
+
+	return n;
+}
+
+static const struct i915_oa_reg b_counter_config_sampler_2[] = {
+	{ _MMIO(0x2740), 0x00000000 },
+	{ _MMIO(0x2744), 0x00800000 },
+	{ _MMIO(0x2710), 0x00000000 },
+	{ _MMIO(0x2714), 0x70800000 },
+	{ _MMIO(0x2720), 0x00000000 },
+	{ _MMIO(0x2724), 0x00800000 },
+	{ _MMIO(0x2770), 0x0000c000 },
+	{ _MMIO(0x2774), 0x0000e7ff },
+	{ _MMIO(0x2778), 0x00003000 },
+	{ _MMIO(0x277c), 0x0000f9ff },
+	{ _MMIO(0x2780), 0x00000c00 },
+	{ _MMIO(0x2784), 0x0000fe7f },
+};
+
+static const struct i915_oa_reg flex_eu_config_sampler_2[] = {
+	{ _MMIO(0xe458), 0x00005004 },
+	{ _MMIO(0xe558), 0x00010003 },
+	{ _MMIO(0xe658), 0x00012011 },
+	{ _MMIO(0xe758), 0x00015014 },
+	{ _MMIO(0xe45c), 0x00051050 },
+	{ _MMIO(0xe55c), 0x00053052 },
+	{ _MMIO(0xe65c), 0x00055054 },
+};
+
+static const struct i915_oa_reg mux_config_sampler_2[] = {
+	{ _MMIO(0x9888), 0x18121400 },
+	{ _MMIO(0x9888), 0x141500ab },
+	{ _MMIO(0x9888), 0x18321400 },
+	{ _MMIO(0x9888), 0x143500ab },
+	{ _MMIO(0x9888), 0x18521400 },
+	{ _MMIO(0x9888), 0x145500ab },
+	{ _MMIO(0x9888), 0x0c5c8000 },
+	{ _MMIO(0x9888), 0x0e5c4000 },
+	{ _MMIO(0x9888), 0x025cc000 },
+	{ _MMIO(0x9888), 0x045cc000 },
+	{ _MMIO(0x9888), 0x1a3d00a0 },
+	{ _MMIO(0x9888), 0x0a3d8000 },
+	{ _MMIO(0x9888), 0x0c588000 },
+	{ _MMIO(0x9888), 0x0e584000 },
+	{ _MMIO(0x9888), 0x04588000 },
+	{ _MMIO(0x9888), 0x1a5b0050 },
+	{ _MMIO(0x9888), 0x045b8000 },
+	{ _MMIO(0x9888), 0x065b8000 },
+	{ _MMIO(0x9888), 0x085b8000 },
+	{ _MMIO(0x9888), 0x0a5b4000 },
+	{ _MMIO(0x9888), 0x101f02a0 },
+	{ _MMIO(0x9888), 0x0c1fa000 },
+	{ _MMIO(0x9888), 0x0e1f00aa },
+	{ _MMIO(0x9888), 0x18382500 },
+	{ _MMIO(0x9888), 0x02388000 },
+	{ _MMIO(0x9888), 0x04384000 },
+	{ _MMIO(0x9888), 0x06384000 },
+	{ _MMIO(0x9888), 0x08384000 },
+	{ _MMIO(0x9888), 0x0a384000 },
+	{ _MMIO(0x9888), 0x0c388000 },
+	{ _MMIO(0x9888), 0x0c398000 },
+	{ _MMIO(0x9888), 0x0e39a000 },
+	{ _MMIO(0x9888), 0x00398000 },
+	{ _MMIO(0x9888), 0x0239a000 },
+	{ _MMIO(0x9888), 0x0439a000 },
+	{ _MMIO(0x9888), 0x06392000 },
+	{ _MMIO(0x9888), 0x1a3a0200 },
+	{ _MMIO(0x9888), 0x023a8000 },
+	{ _MMIO(0x9888), 0x0c3a8000 },
+	{ _MMIO(0x9888), 0x04108000 },
+	{ _MMIO(0x9888), 0x04118000 },
+	{ _MMIO(0x9888), 0x04127300 },
+	{ _MMIO(0x9888), 0x10120000 },
+	{ _MMIO(0x9888), 0x1813000a },
+	{ _MMIO(0x9888), 0x0a134000 },
+	{ _MMIO(0x9888), 0x0a146000 },
+	{ _MMIO(0x9888), 0x0c159000 },
+	{ _MMIO(0x9888), 0x0e150098 },
+	{ _MMIO(0x9888), 0x10150000 },
+	{ _MMIO(0x9888), 0x04304000 },
+	{ _MMIO(0x9888), 0x04314000 },
+	{ _MMIO(0x9888), 0x04320073 },
+	{ _MMIO(0x9888), 0x10320000 },
+	{ _MMIO(0x9888), 0x04338000 },
+	{ _MMIO(0x9888), 0x06338000 },
+	{ _MMIO(0x9888), 0x08334000 },
+	{ _MMIO(0x9888), 0x0434c000 },
+	{ _MMIO(0x9888), 0x02359890 },
+	{ _MMIO(0x9888), 0x10350000 },
+	{ _MMIO(0x9888), 0x06504000 },
+	{ _MMIO(0x9888), 0x06514000 },
+	{ _MMIO(0x9888), 0x06520073 },
+	{ _MMIO(0x9888), 0x10520000 },
+	{ _MMIO(0x9888), 0x18530020 },
+	{ _MMIO(0x9888), 0x02538000 },
+	{ _MMIO(0x9888), 0x0c534000 },
+	{ _MMIO(0x9888), 0x0a548000 },
+	{ _MMIO(0x9888), 0x04542000 },
+	{ _MMIO(0x9888), 0x0e559000 },
+	{ _MMIO(0x9888), 0x00559800 },
+	{ _MMIO(0x9888), 0x10550000 },
+	{ _MMIO(0x9888), 0x1b8aa000 },
+	{ _MMIO(0x9888), 0x1d8a0002 },
+	{ _MMIO(0x9888), 0x038a8000 },
+	{ _MMIO(0x9888), 0x058a8000 },
+	{ _MMIO(0x9888), 0x078a8000 },
+	{ _MMIO(0x9888), 0x098a8000 },
+	{ _MMIO(0x9888), 0x0b8a8000 },
+	{ _MMIO(0x9888), 0x0d8a8000 },
+	{ _MMIO(0x9888), 0x278b0015 },
+	{ _MMIO(0x9888), 0x238b2a80 },
+	{ _MMIO(0x9888), 0x258b0005 },
+	{ _MMIO(0x9888), 0x2385002a },
+	{ _MMIO(0x9888), 0x1f85aa00 },
+	{ _MMIO(0x9888), 0x2185000a },
+	{ _MMIO(0x9888), 0x1b830150 },
+	{ _MMIO(0x9888), 0x03834000 },
+	{ _MMIO(0x9888), 0x05834000 },
+	{ _MMIO(0x9888), 0x07834000 },
+	{ _MMIO(0x9888), 0x09834000 },
+	{ _MMIO(0x9888), 0x0b834000 },
+	{ _MMIO(0x9888), 0x0d834000 },
+	{ _MMIO(0x9888), 0x0d848000 },
+	{ _MMIO(0x9888), 0x0f84c000 },
+	{ _MMIO(0x9888), 0x01848000 },
+	{ _MMIO(0x9888), 0x0384c000 },
+	{ _MMIO(0x9888), 0x0584c000 },
+	{ _MMIO(0x9888), 0x07844000 },
+	{ _MMIO(0x9888), 0x1d808000 },
+	{ _MMIO(0x9888), 0x1f80c000 },
+	{ _MMIO(0x9888), 0x11808000 },
+	{ _MMIO(0x9888), 0x1380c000 },
+	{ _MMIO(0x9888), 0x1580c000 },
+	{ _MMIO(0x9888), 0x17804000 },
+	{ _MMIO(0x9888), 0x53800000 },
+	{ _MMIO(0x9888), 0x47801021 },
+	{ _MMIO(0x9888), 0x21800000 },
+	{ _MMIO(0x9888), 0x31800000 },
+	{ _MMIO(0x9888), 0x4d800000 },
+	{ _MMIO(0x9888), 0x3f800c64 },
+	{ _MMIO(0x9888), 0x4f800000 },
+	{ _MMIO(0x9888), 0x41800c02 },
+};
+
+static int
+get_sampler_2_mux_config(struct drm_i915_private *dev_priv,
+			 const struct i915_oa_reg **regs,
+			 int *lens)
+{
+	int n = 0;
+
+	BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
+	BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
+
+	regs[n] = mux_config_sampler_2;
+	lens[n] = ARRAY_SIZE(mux_config_sampler_2);
+	n++;
+
+	return n;
+}
+
+static const struct i915_oa_reg b_counter_config_tdl_1[] = {
+	{ _MMIO(0x2740), 0x00000000 },
+	{ _MMIO(0x2744), 0x00800000 },
+	{ _MMIO(0x2710), 0x00000000 },
+	{ _MMIO(0x2714), 0xf0800000 },
+	{ _MMIO(0x2720), 0x00000000 },
+	{ _MMIO(0x2724), 0x30800000 },
+	{ _MMIO(0x2770), 0x00000002 },
+	{ _MMIO(0x2774), 0x0000fdff },
+	{ _MMIO(0x2778), 0x00000000 },
+	{ _MMIO(0x277c), 0x0000fe7f },
+	{ _MMIO(0x2780), 0x00000002 },
+	{ _MMIO(0x2784), 0x0000ffbf },
+	{ _MMIO(0x2788), 0x00000000 },
+	{ _MMIO(0x278c), 0x0000ffcf },
+	{ _MMIO(0x2790), 0x00000002 },
+	{ _MMIO(0x2794), 0x0000fff7 },
+	{ _MMIO(0x2798), 0x00000000 },
+	{ _MMIO(0x279c), 0x0000fff9 },
+};
+
+static const struct i915_oa_reg flex_eu_config_tdl_1[] = {
+	{ _MMIO(0xe458), 0x00005004 },
+	{ _MMIO(0xe558), 0x00010003 },
+	{ _MMIO(0xe658), 0x00012011 },
+	{ _MMIO(0xe758), 0x00015014 },
+	{ _MMIO(0xe45c), 0x00051050 },
+	{ _MMIO(0xe55c), 0x00053052 },
+	{ _MMIO(0xe65c), 0x00055054 },
+};
+
+static const struct i915_oa_reg mux_config_tdl_1[] = {
+	{ _MMIO(0x9888), 0x16154d60 },
+	{ _MMIO(0x9888), 0x16352e60 },
+	{ _MMIO(0x9888), 0x16554d60 },
+	{ _MMIO(0x9888), 0x16950000 },
+	{ _MMIO(0x9888), 0x16b50000 },
+	{ _MMIO(0x9888), 0x16d50000 },
+	{ _MMIO(0x9888), 0x005c8000 },
+	{ _MMIO(0x9888), 0x045cc000 },
+	{ _MMIO(0x9888), 0x065c4000 },
+	{ _MMIO(0x9888), 0x083d8000 },
+	{ _MMIO(0x9888), 0x0a3d8000 },
+	{ _MMIO(0x9888), 0x0458c000 },
+	{ _MMIO(0x9888), 0x025b8000 },
+	{ _MMIO(0x9888), 0x085b4000 },
+	{ _MMIO(0x9888), 0x0a5b4000 },
+	{ _MMIO(0x9888), 0x0c5b8000 },
+	{ _MMIO(0x9888), 0x0c1fa000 },
+	{ _MMIO(0x9888), 0x0e1f00aa },
+	{ _MMIO(0x9888), 0x02384000 },
+	{ _MMIO(0x9888), 0x04388000 },
+	{ _MMIO(0x9888), 0x06388000 },
+	{ _MMIO(0x9888), 0x08384000 },
+	{ _MMIO(0x9888), 0x0a384000 },
+	{ _MMIO(0x9888), 0x0c384000 },
+	{ _MMIO(0x9888), 0x00398000 },
+	{ _MMIO(0x9888), 0x0239a000 },
+	{ _MMIO(0x9888), 0x0439a000 },
+	{ _MMIO(0x9888), 0x06392000 },
+	{ _MMIO(0x9888), 0x043a8000 },
+	{ _MMIO(0x9888), 0x063a8000 },
+	{ _MMIO(0x9888), 0x08138000 },
+	{ _MMIO(0x9888), 0x0a138000 },
+	{ _MMIO(0x9888), 0x06143000 },
+	{ _MMIO(0x9888), 0x0415cfc7 },
+	{ _MMIO(0x9888), 0x10150000 },
+	{ _MMIO(0x9888), 0x02338000 },
+	{ _MMIO(0x9888), 0x0c338000 },
+	{ _MMIO(0x9888), 0x04342000 },
+	{ _MMIO(0x9888), 0x06344000 },
+	{ _MMIO(0x9888), 0x0035c700 },
+	{ _MMIO(0x9888), 0x063500cf },
+	{ _MMIO(0x9888), 0x10350000 },
+	{ _MMIO(0x9888), 0x04538000 },
+	{ _MMIO(0x9888), 0x06538000 },
+	{ _MMIO(0x9888), 0x0454c000 },
+	{ _MMIO(0x9888), 0x0255cfc7 },
+	{ _MMIO(0x9888), 0x10550000 },
+	{ _MMIO(0x9888), 0x06dc8000 },
+	{ _MMIO(0x9888), 0x08dc4000 },
+	{ _MMIO(0x9888), 0x0cdcc000 },
+	{ _MMIO(0x9888), 0x0edcc000 },
+	{ _MMIO(0x9888), 0x1abd00a8 },
+	{ _MMIO(0x9888), 0x0cd8c000 },
+	{ _MMIO(0x9888), 0x0ed84000 },
+	{ _MMIO(0x9888), 0x0edb8000 },
+	{ _MMIO(0x9888), 0x18db0800 },
+	{ _MMIO(0x9888), 0x1adb0254 },
+	{ _MMIO(0x9888), 0x0e9faa00 },
+	{ _MMIO(0x9888), 0x109f02aa },
+	{ _MMIO(0x9888), 0x0eb84000 },
+	{ _MMIO(0x9888), 0x16b84000 },
+	{ _MMIO(0x9888), 0x18b8156a },
+	{ _MMIO(0x9888), 0x06b98000 },
+	{ _MMIO(0x9888), 0x08b9a000 },
+	{ _MMIO(0x9888), 0x0ab9a000 },
+	{ _MMIO(0x9888), 0x0cb9a000 },
+	{ _MMIO(0x9888), 0x0eb9a000 },
+	{ _MMIO(0x9888), 0x18baa000 },
+	{ _MMIO(0x9888), 0x1aba0002 },
+	{ _MMIO(0x9888), 0x16934000 },
+	{ _MMIO(0x9888), 0x1893000a },
+	{ _MMIO(0x9888), 0x0a947000 },
+	{ _MMIO(0x9888), 0x0c95c5c1 },
+	{ _MMIO(0x9888), 0x0e9500c3 },
+	{ _MMIO(0x9888), 0x10950000 },
+	{ _MMIO(0x9888), 0x0eb38000 },
+	{ _MMIO(0x9888), 0x16b30040 },
+	{ _MMIO(0x9888), 0x18b30020 },
+	{ _MMIO(0x9888), 0x06b48000 },
+	{ _MMIO(0x9888), 0x08b41000 },
+	{ _MMIO(0x9888), 0x0ab48000 },
+	{ _MMIO(0x9888), 0x06b5c500 },
+	{ _MMIO(0x9888), 0x08b500c3 },
+	{ _MMIO(0x9888), 0x0eb5c100 },
+	{ _MMIO(0x9888), 0x10b50000 },
+	{ _MMIO(0x9888), 0x16d31500 },
+	{ _MMIO(0x9888), 0x08d4e000 },
+	{ _MMIO(0x9888), 0x08d5c100 },
+	{ _MMIO(0x9888), 0x0ad5c3c5 },
+	{ _MMIO(0x9888), 0x10d50000 },
+	{ _MMIO(0x9888), 0x0d88f800 },
+	{ _MMIO(0x9888), 0x0f88000f },
+	{ _MMIO(0x9888), 0x038a8000 },
+	{ _MMIO(0x9888), 0x058a8000 },
+	{ _MMIO(0x9888), 0x078a8000 },
+	{ _MMIO(0x9888), 0x098a8000 },
+	{ _MMIO(0x9888), 0x0b8a8000 },
+	{ _MMIO(0x9888), 0x0d8a8000 },
+	{ _MMIO(0x9888), 0x258baaa5 },
+	{ _MMIO(0x9888), 0x278b002a },
+	{ _MMIO(0x9888), 0x238b2a80 },
+	{ _MMIO(0x9888), 0x0f8c4000 },
+	{ _MMIO(0x9888), 0x178c2000 },
+	{ _MMIO(0x9888), 0x198c5500 },
+	{ _MMIO(0x9888), 0x1b8c0015 },
+	{ _MMIO(0x9888), 0x078d8000 },
+	{ _MMIO(0x9888), 0x098da000 },
+	{ _MMIO(0x9888), 0x0b8da000 },
+	{ _MMIO(0x9888), 0x0d8da000 },
+	{ _MMIO(0x9888), 0x0f8da000 },
+	{ _MMIO(0x9888), 0x2185aaaa },
+	{ _MMIO(0x9888), 0x2385002a },
+	{ _MMIO(0x9888), 0x1f85aa00 },
+	{ _MMIO(0x9888), 0x0f834000 },
+	{ _MMIO(0x9888), 0x19835400 },
+	{ _MMIO(0x9888), 0x1b830155 },
+	{ _MMIO(0x9888), 0x03834000 },
+	{ _MMIO(0x9888), 0x05834000 },
+	{ _MMIO(0x9888), 0x07834000 },
+	{ _MMIO(0x9888), 0x09834000 },
+	{ _MMIO(0x9888), 0x0b834000 },
+	{ _MMIO(0x9888), 0x0d834000 },
+	{ _MMIO(0x9888), 0x0784c000 },
+	{ _MMIO(0x9888), 0x0984c000 },
+	{ _MMIO(0x9888), 0x0b84c000 },
+	{ _MMIO(0x9888), 0x0d84c000 },
+	{ _MMIO(0x9888), 0x0f84c000 },
+	{ _MMIO(0x9888), 0x01848000 },
+	{ _MMIO(0x9888), 0x0384c000 },
+	{ _MMIO(0x9888), 0x0584c000 },
+	{ _MMIO(0x9888), 0x1780c000 },
+	{ _MMIO(0x9888), 0x1980c000 },
+	{ _MMIO(0x9888), 0x1b80c000 },
+	{ _MMIO(0x9888), 0x1d80c000 },
+	{ _MMIO(0x9888), 0x1f80c000 },
+	{ _MMIO(0x9888), 0x11808000 },
+	{ _MMIO(0x9888), 0x1380c000 },
+	{ _MMIO(0x9888), 0x1580c000 },
+	{ _MMIO(0x9888), 0x4f800000 },
+	{ _MMIO(0x9888), 0x43800c42 },
+	{ _MMIO(0x9888), 0x51800000 },
+	{ _MMIO(0x9888), 0x45800063 },
+	{ _MMIO(0x9888), 0x53800000 },
+	{ _MMIO(0x9888), 0x47800800 },
+	{ _MMIO(0x9888), 0x21800000 },
+	{ _MMIO(0x9888), 0x31800000 },
+	{ _MMIO(0x9888), 0x4d800000 },
+	{ _MMIO(0x9888), 0x3f8014a4 },
+	{ _MMIO(0x9888), 0x41801042 },
+};
+
+static int
+get_tdl_1_mux_config(struct drm_i915_private *dev_priv,
+		     const struct i915_oa_reg **regs,
+		     int *lens)
+{
+	int n = 0;
+
+	BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
+	BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
+
+	regs[n] = mux_config_tdl_1;
+	lens[n] = ARRAY_SIZE(mux_config_tdl_1);
+	n++;
+
+	return n;
+}
+
+static const struct i915_oa_reg b_counter_config_tdl_2[] = {
+	{ _MMIO(0x2740), 0x00000000 },
+	{ _MMIO(0x2744), 0x00800000 },
+	{ _MMIO(0x2710), 0x00000000 },
+	{ _MMIO(0x2714), 0xf0800000 },
+	{ _MMIO(0x2720), 0x00000000 },
+	{ _MMIO(0x2724), 0x30800000 },
+	{ _MMIO(0x2770), 0x00000002 },
+	{ _MMIO(0x2774), 0x0000fdff },
+	{ _MMIO(0x2778), 0x00000000 },
+	{ _MMIO(0x277c), 0x0000fe7f },
+	{ _MMIO(0x2780), 0x00000000 },
+	{ _MMIO(0x2784), 0x0000ff9f },
+	{ _MMIO(0x2788), 0x00000000 },
+	{ _MMIO(0x278c), 0x0000ffe7 },
+	{ _MMIO(0x2790), 0x00000002 },
+	{ _MMIO(0x2794), 0x0000fffb },
+	{ _MMIO(0x2798), 0x00000002 },
+	{ _MMIO(0x279c), 0x0000fffd },
+};
+
+static const struct i915_oa_reg flex_eu_config_tdl_2[] = {
+	{ _MMIO(0xe458), 0x00005004 },
+	{ _MMIO(0xe558), 0x00010003 },
+	{ _MMIO(0xe658), 0x00012011 },
+	{ _MMIO(0xe758), 0x00015014 },
+	{ _MMIO(0xe45c), 0x00051050 },
+	{ _MMIO(0xe55c), 0x00053052 },
+	{ _MMIO(0xe65c), 0x00055054 },
+};
+
+static const struct i915_oa_reg mux_config_tdl_2[] = {
+	{ _MMIO(0x9888), 0x16150000 },
+	{ _MMIO(0x9888), 0x16350000 },
+	{ _MMIO(0x9888), 0x16550000 },
+	{ _MMIO(0x9888), 0x16952e60 },
+	{ _MMIO(0x9888), 0x16b54d60 },
+	{ _MMIO(0x9888), 0x16d52e60 },
+	{ _MMIO(0x9888), 0x065c8000 },
+	{ _MMIO(0x9888), 0x085cc000 },
+	{ _MMIO(0x9888), 0x0a5cc000 },
+	{ _MMIO(0x9888), 0x0c5c4000 },
+	{ _MMIO(0x9888), 0x0e3d8000 },
+	{ _MMIO(0x9888), 0x183da000 },
+	{ _MMIO(0x9888), 0x06588000 },
+	{ _MMIO(0x9888), 0x08588000 },
+	{ _MMIO(0x9888), 0x0a584000 },
+	{ _MMIO(0x9888), 0x0e5b4000 },
+	{ _MMIO(0x9888), 0x185b5800 },
+	{ _MMIO(0x9888), 0x1a5b000a },
+	{ _MMIO(0x9888), 0x0e1faa00 },
+	{ _MMIO(0x9888), 0x101f02aa },
+	{ _MMIO(0x9888), 0x0e384000 },
+	{ _MMIO(0x9888), 0x16384000 },
+	{ _MMIO(0x9888), 0x18382a55 },
+	{ _MMIO(0x9888), 0x06398000 },
+	{ _MMIO(0x9888), 0x0839a000 },
+	{ _MMIO(0x9888), 0x0a39a000 },
+	{ _MMIO(0x9888), 0x0c39a000 },
+	{ _MMIO(0x9888), 0x0e39a000 },
+	{ _MMIO(0x9888), 0x1a3a02a0 },
+	{ _MMIO(0x9888), 0x0e138000 },
+	{ _MMIO(0x9888), 0x16130500 },
+	{ _MMIO(0x9888), 0x06148000 },
+	{ _MMIO(0x9888), 0x08146000 },
+	{ _MMIO(0x9888), 0x0615c100 },
+	{ _MMIO(0x9888), 0x0815c500 },
+	{ _MMIO(0x9888), 0x0a1500c3 },
+	{ _MMIO(0x9888), 0x10150000 },
+	{ _MMIO(0x9888), 0x16335040 },
+	{ _MMIO(0x9888), 0x08349000 },
+	{ _MMIO(0x9888), 0x0a341000 },
+	{ _MMIO(0x9888), 0x083500c1 },
+	{ _MMIO(0x9888), 0x0a35c500 },
+	{ _MMIO(0x9888), 0x0c3500c3 },
+	{ _MMIO(0x9888), 0x10350000 },
+	{ _MMIO(0x9888), 0x1853002a },
+	{ _MMIO(0x9888), 0x0a54e000 },
+	{ _MMIO(0x9888), 0x0c55c500 },
+	{ _MMIO(0x9888), 0x0e55c1c3 },
+	{ _MMIO(0x9888), 0x10550000 },
+	{ _MMIO(0x9888), 0x00dc8000 },
+	{ _MMIO(0x9888), 0x02dcc000 },
+	{ _MMIO(0x9888), 0x04dc4000 },
+	{ _MMIO(0x9888), 0x04bd8000 },
+	{ _MMIO(0x9888), 0x06bd8000 },
+	{ _MMIO(0x9888), 0x02d8c000 },
+	{ _MMIO(0x9888), 0x02db8000 },
+	{ _MMIO(0x9888), 0x04db4000 },
+	{ _MMIO(0x9888), 0x06db4000 },
+	{ _MMIO(0x9888), 0x08db8000 },
+	{ _MMIO(0x9888), 0x0c9fa000 },
+	{ _MMIO(0x9888), 0x0e9f00aa },
+	{ _MMIO(0x9888), 0x02b84000 },
+	{ _MMIO(0x9888), 0x04b84000 },
+	{ _MMIO(0x9888), 0x06b84000 },
+	{ _MMIO(0x9888), 0x08b84000 },
+	{ _MMIO(0x9888), 0x0ab88000 },
+	{ _MMIO(0x9888), 0x0cb88000 },
+	{ _MMIO(0x9888), 0x00b98000 },
+	{ _MMIO(0x9888), 0x02b9a000 },
+	{ _MMIO(0x9888), 0x04b9a000 },
+	{ _MMIO(0x9888), 0x06b92000 },
+	{ _MMIO(0x9888), 0x0aba8000 },
+	{ _MMIO(0x9888), 0x0cba8000 },
+	{ _MMIO(0x9888), 0x04938000 },
+	{ _MMIO(0x9888), 0x06938000 },
+	{ _MMIO(0x9888), 0x0494c000 },
+	{ _MMIO(0x9888), 0x0295cfc7 },
+	{ _MMIO(0x9888), 0x10950000 },
+	{ _MMIO(0x9888), 0x02b38000 },
+	{ _MMIO(0x9888), 0x08b38000 },
+	{ _MMIO(0x9888), 0x04b42000 },
+	{ _MMIO(0x9888), 0x06b41000 },
+	{ _MMIO(0x9888), 0x00b5c700 },
+	{ _MMIO(0x9888), 0x04b500cf },
+	{ _MMIO(0x9888), 0x10b50000 },
+	{ _MMIO(0x9888), 0x0ad38000 },
+	{ _MMIO(0x9888), 0x0cd38000 },
+	{ _MMIO(0x9888), 0x06d46000 },
+	{ _MMIO(0x9888), 0x04d5c700 },
+	{ _MMIO(0x9888), 0x06d500cf },
+	{ _MMIO(0x9888), 0x10d50000 },
+	{ _MMIO(0x9888), 0x03888000 },
+	{ _MMIO(0x9888), 0x05888000 },
+	{ _MMIO(0x9888), 0x07888000 },
+	{ _MMIO(0x9888), 0x09888000 },
+	{ _MMIO(0x9888), 0x0b888000 },
+	{ _MMIO(0x9888), 0x0d880400 },
+	{ _MMIO(0x9888), 0x0f8a8000 },
+	{ _MMIO(0x9888), 0x198a8000 },
+	{ _MMIO(0x9888), 0x1b8aaaa0 },
+	{ _MMIO(0x9888), 0x1d8a0002 },
+	{ _MMIO(0x9888), 0x258b555a },
+	{ _MMIO(0x9888), 0x278b0015 },
+	{ _MMIO(0x9888), 0x238b5500 },
+	{ _MMIO(0x9888), 0x038c4000 },
+	{ _MMIO(0x9888), 0x058c4000 },
+	{ _MMIO(0x9888), 0x078c4000 },
+	{ _MMIO(0x9888), 0x098c4000 },
+	{ _MMIO(0x9888), 0x0b8c4000 },
+	{ _MMIO(0x9888), 0x0d8c4000 },
+	{ _MMIO(0x9888), 0x018d8000 },
+	{ _MMIO(0x9888), 0x038da000 },
+	{ _MMIO(0x9888), 0x058da000 },
+	{ _MMIO(0x9888), 0x078d2000 },
+	{ _MMIO(0x9888), 0x2185aaaa },
+	{ _MMIO(0x9888), 0x2385002a },
+	{ _MMIO(0x9888), 0x1f85aa00 },
+	{ _MMIO(0x9888), 0x0f834000 },
+	{ _MMIO(0x9888), 0x19835400 },
+	{ _MMIO(0x9888), 0x1b830155 },
+	{ _MMIO(0x9888), 0x03834000 },
+	{ _MMIO(0x9888), 0x05834000 },
+	{ _MMIO(0x9888), 0x07834000 },
+	{ _MMIO(0x9888), 0x09834000 },
+	{ _MMIO(0x9888), 0x0b834000 },
+	{ _MMIO(0x9888), 0x0d834000 },
+	{ _MMIO(0x9888), 0x0784c000 },
+	{ _MMIO(0x9888), 0x0984c000 },
+	{ _MMIO(0x9888), 0x0b84c000 },
+	{ _MMIO(0x9888), 0x0d84c000 },
+	{ _MMIO(0x9888), 0x0f84c000 },
+	{ _MMIO(0x9888), 0x01848000 },
+	{ _MMIO(0x9888), 0x0384c000 },
+	{ _MMIO(0x9888), 0x0584c000 },
+	{ _MMIO(0x9888), 0x1780c000 },
+	{ _MMIO(0x9888), 0x1980c000 },
+	{ _MMIO(0x9888), 0x1b80c000 },
+	{ _MMIO(0x9888), 0x1d80c000 },
+	{ _MMIO(0x9888), 0x1f80c000 },
+	{ _MMIO(0x9888), 0x11808000 },
+	{ _MMIO(0x9888), 0x1380c000 },
+	{ _MMIO(0x9888), 0x1580c000 },
+	{ _MMIO(0x9888), 0x4f800000 },
+	{ _MMIO(0x9888), 0x43800882 },
+	{ _MMIO(0x9888), 0x51800000 },
+	{ _MMIO(0x9888), 0x45801082 },
+	{ _MMIO(0x9888), 0x53800000 },
+	{ _MMIO(0x9888), 0x478014a5 },
+	{ _MMIO(0x9888), 0x21800000 },
+	{ _MMIO(0x9888), 0x31800000 },
+	{ _MMIO(0x9888), 0x4d800000 },
+	{ _MMIO(0x9888), 0x3f800002 },
+	{ _MMIO(0x9888), 0x41800c62 },
+};
+
+static int
+get_tdl_2_mux_config(struct drm_i915_private *dev_priv,
+		     const struct i915_oa_reg **regs,
+		     int *lens)
+{
+	int n = 0;
+
+	BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
+	BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
+
+	regs[n] = mux_config_tdl_2;
+	lens[n] = ARRAY_SIZE(mux_config_tdl_2);
+	n++;
+
+	return n;
+}
+
+static const struct i915_oa_reg b_counter_config_test_oa[] = {
+	{ _MMIO(0x2740), 0x00000000 },
+	{ _MMIO(0x2744), 0x00800000 },
+	{ _MMIO(0x2714), 0xf0800000 },
+	{ _MMIO(0x2710), 0x00000000 },
+	{ _MMIO(0x2724), 0xf0800000 },
+	{ _MMIO(0x2720), 0x00000000 },
+	{ _MMIO(0x2770), 0x00000004 },
+	{ _MMIO(0x2774), 0x00000000 },
+	{ _MMIO(0x2778), 0x00000003 },
+	{ _MMIO(0x277c), 0x00000000 },
+	{ _MMIO(0x2780), 0x00000007 },
+	{ _MMIO(0x2784), 0x00000000 },
+	{ _MMIO(0x2788), 0x00100002 },
+	{ _MMIO(0x278c), 0x0000fff7 },
+	{ _MMIO(0x2790), 0x00100002 },
+	{ _MMIO(0x2794), 0x0000ffcf },
+	{ _MMIO(0x2798), 0x00100082 },
+	{ _MMIO(0x279c), 0x0000ffef },
+	{ _MMIO(0x27a0), 0x001000c2 },
+	{ _MMIO(0x27a4), 0x0000ffe7 },
+	{ _MMIO(0x27a8), 0x00100001 },
+	{ _MMIO(0x27ac), 0x0000ffe7 },
+};
+
+static const struct i915_oa_reg flex_eu_config_test_oa[] = {
+};
+
+static const struct i915_oa_reg mux_config_test_oa[] = {
+	{ _MMIO(0x9888), 0x59800000 },
+	{ _MMIO(0x9888), 0x59800001 },
+	{ _MMIO(0x9888), 0x338b0000 },
+	{ _MMIO(0x9888), 0x258b0066 },
+	{ _MMIO(0x9888), 0x058b0000 },
+	{ _MMIO(0x9888), 0x038b0000 },
+	{ _MMIO(0x9888), 0x03844000 },
+	{ _MMIO(0x9888), 0x47800080 },
+	{ _MMIO(0x9888), 0x57800000 },
+	{ _MMIO(0x1823a4), 0x00000000 },
+	{ _MMIO(0x9888), 0x59800000 },
+};
+
+static int
+get_test_oa_mux_config(struct drm_i915_private *dev_priv,
+		       const struct i915_oa_reg **regs,
+		       int *lens)
+{
+	int n = 0;
+
+	BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
+	BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
+
+	regs[n] = mux_config_test_oa;
+	lens[n] = ARRAY_SIZE(mux_config_test_oa);
+	n++;
+
+	return n;
+}
+
+int i915_oa_select_metric_set_chv(struct drm_i915_private *dev_priv)
+{
+	dev_priv->perf.oa.n_mux_configs = 0;
+	dev_priv->perf.oa.b_counter_regs = NULL;
+	dev_priv->perf.oa.b_counter_regs_len = 0;
+	dev_priv->perf.oa.flex_regs = NULL;
+	dev_priv->perf.oa.flex_regs_len = 0;
+
+	switch (dev_priv->perf.oa.metrics_set) {
+	case METRIC_SET_ID_RENDER_BASIC:
+		dev_priv->perf.oa.n_mux_configs =
+			get_render_basic_mux_config(dev_priv,
+						    dev_priv->perf.oa.mux_regs,
+						    dev_priv->perf.oa.mux_regs_lens);
+		if (dev_priv->perf.oa.n_mux_configs == 0) {
+			DRM_DEBUG_DRIVER("No suitable MUX config for \"RENDER_BASIC\" metric set\n");
+
+			/* EINVAL because *_register_sysfs already checked this
+			 * and so it wouldn't have been advertised to userspace and
+			 * so shouldn't have been requested
+			 */
+			return -EINVAL;
+		}
+
+		dev_priv->perf.oa.b_counter_regs =
+			b_counter_config_render_basic;
+		dev_priv->perf.oa.b_counter_regs_len =
+			ARRAY_SIZE(b_counter_config_render_basic);
+
+		dev_priv->perf.oa.flex_regs =
+			flex_eu_config_render_basic;
+		dev_priv->perf.oa.flex_regs_len =
+			ARRAY_SIZE(flex_eu_config_render_basic);
+
+		return 0;
+	case METRIC_SET_ID_COMPUTE_BASIC:
+		dev_priv->perf.oa.n_mux_configs =
+			get_compute_basic_mux_config(dev_priv,
+						     dev_priv->perf.oa.mux_regs,
+						     dev_priv->perf.oa.mux_regs_lens);
+		if (dev_priv->perf.oa.n_mux_configs == 0) {
+			DRM_DEBUG_DRIVER("No suitable MUX config for \"COMPUTE_BASIC\" metric set\n");
+
+			/* EINVAL because *_register_sysfs already checked this
+			 * and so it wouldn't have been advertised to userspace and
+			 * so shouldn't have been requested
+			 */
+			return -EINVAL;
+		}
+
+		dev_priv->perf.oa.b_counter_regs =
+			b_counter_config_compute_basic;
+		dev_priv->perf.oa.b_counter_regs_len =
+			ARRAY_SIZE(b_counter_config_compute_basic);
+
+		dev_priv->perf.oa.flex_regs =
+			flex_eu_config_compute_basic;
+		dev_priv->perf.oa.flex_regs_len =
+			ARRAY_SIZE(flex_eu_config_compute_basic);
+
+		return 0;
+	case METRIC_SET_ID_RENDER_PIPE_PROFILE:
+		dev_priv->perf.oa.n_mux_configs =
+			get_render_pipe_profile_mux_config(dev_priv,
+							   dev_priv->perf.oa.mux_regs,
+							   dev_priv->perf.oa.mux_regs_lens);
+		if (dev_priv->perf.oa.n_mux_configs == 0) {
+			DRM_DEBUG_DRIVER("No suitable MUX config for \"RENDER_PIPE_PROFILE\" metric set\n");
+
+			/* EINVAL because *_register_sysfs already checked this
+			 * and so it wouldn't have been advertised to userspace and
+			 * so shouldn't have been requested
+			 */
+			return -EINVAL;
+		}
+
+		dev_priv->perf.oa.b_counter_regs =
+			b_counter_config_render_pipe_profile;
+		dev_priv->perf.oa.b_counter_regs_len =
+			ARRAY_SIZE(b_counter_config_render_pipe_profile);
+
+		dev_priv->perf.oa.flex_regs =
+			flex_eu_config_render_pipe_profile;
+		dev_priv->perf.oa.flex_regs_len =
+			ARRAY_SIZE(flex_eu_config_render_pipe_profile);
+
+		return 0;
+	case METRIC_SET_ID_HDC_AND_SF:
+		dev_priv->perf.oa.n_mux_configs =
+			get_hdc_and_sf_mux_config(dev_priv,
+						  dev_priv->perf.oa.mux_regs,
+						  dev_priv->perf.oa.mux_regs_lens);
+		if (dev_priv->perf.oa.n_mux_configs == 0) {
+			DRM_DEBUG_DRIVER("No suitable MUX config for \"HDC_AND_SF\" metric set\n");
+
+			/* EINVAL because *_register_sysfs already checked this
+			 * and so it wouldn't have been advertised to userspace and
+			 * so shouldn't have been requested
+			 */
+			return -EINVAL;
+		}
+
+		dev_priv->perf.oa.b_counter_regs =
+			b_counter_config_hdc_and_sf;
+		dev_priv->perf.oa.b_counter_regs_len =
+			ARRAY_SIZE(b_counter_config_hdc_and_sf);
+
+		dev_priv->perf.oa.flex_regs =
+			flex_eu_config_hdc_and_sf;
+		dev_priv->perf.oa.flex_regs_len =
+			ARRAY_SIZE(flex_eu_config_hdc_and_sf);
+
+		return 0;
+	case METRIC_SET_ID_L3_1:
+		dev_priv->perf.oa.n_mux_configs =
+			get_l3_1_mux_config(dev_priv,
+					    dev_priv->perf.oa.mux_regs,
+					    dev_priv->perf.oa.mux_regs_lens);
+		if (dev_priv->perf.oa.n_mux_configs == 0) {
+			DRM_DEBUG_DRIVER("No suitable MUX config for \"L3_1\" metric set\n");
+
+			/* EINVAL because *_register_sysfs already checked this
+			 * and so it wouldn't have been advertised to userspace and
+			 * so shouldn't have been requested
+			 */
+			return -EINVAL;
+		}
+
+		dev_priv->perf.oa.b_counter_regs =
+			b_counter_config_l3_1;
+		dev_priv->perf.oa.b_counter_regs_len =
+			ARRAY_SIZE(b_counter_config_l3_1);
+
+		dev_priv->perf.oa.flex_regs =
+			flex_eu_config_l3_1;
+		dev_priv->perf.oa.flex_regs_len =
+			ARRAY_SIZE(flex_eu_config_l3_1);
+
+		return 0;
+	case METRIC_SET_ID_L3_2:
+		dev_priv->perf.oa.n_mux_configs =
+			get_l3_2_mux_config(dev_priv,
+					    dev_priv->perf.oa.mux_regs,
+					    dev_priv->perf.oa.mux_regs_lens);
+		if (dev_priv->perf.oa.n_mux_configs == 0) {
+			DRM_DEBUG_DRIVER("No suitable MUX config for \"L3_2\" metric set\n");
+
+			/* EINVAL because *_register_sysfs already checked this
+			 * and so it wouldn't have been advertised to userspace and
+			 * so shouldn't have been requested
+			 */
+			return -EINVAL;
+		}
+
+		dev_priv->perf.oa.b_counter_regs =
+			b_counter_config_l3_2;
+		dev_priv->perf.oa.b_counter_regs_len =
+			ARRAY_SIZE(b_counter_config_l3_2);
+
+		dev_priv->perf.oa.flex_regs =
+			flex_eu_config_l3_2;
+		dev_priv->perf.oa.flex_regs_len =
+			ARRAY_SIZE(flex_eu_config_l3_2);
+
+		return 0;
+	case METRIC_SET_ID_L3_3:
+		dev_priv->perf.oa.n_mux_configs =
+			get_l3_3_mux_config(dev_priv,
+					    dev_priv->perf.oa.mux_regs,
+					    dev_priv->perf.oa.mux_regs_lens);
+		if (dev_priv->perf.oa.n_mux_configs == 0) {
+			DRM_DEBUG_DRIVER("No suitable MUX config for \"L3_3\" metric set\n");
+
+			/* EINVAL because *_register_sysfs already checked this
+			 * and so it wouldn't have been advertised to userspace and
+			 * so shouldn't have been requested
+			 */
+			return -EINVAL;
+		}
+
+		dev_priv->perf.oa.b_counter_regs =
+			b_counter_config_l3_3;
+		dev_priv->perf.oa.b_counter_regs_len =
+			ARRAY_SIZE(b_counter_config_l3_3);
+
+		dev_priv->perf.oa.flex_regs =
+			flex_eu_config_l3_3;
+		dev_priv->perf.oa.flex_regs_len =
+			ARRAY_SIZE(flex_eu_config_l3_3);
+
+		return 0;
+	case METRIC_SET_ID_L3_4:
+		dev_priv->perf.oa.n_mux_configs =
+			get_l3_4_mux_config(dev_priv,
+					    dev_priv->perf.oa.mux_regs,
+					    dev_priv->perf.oa.mux_regs_lens);
+		if (dev_priv->perf.oa.n_mux_configs == 0) {
+			DRM_DEBUG_DRIVER("No suitable MUX config for \"L3_4\" metric set\n");
+
+			/* EINVAL because *_register_sysfs already checked this
+			 * and so it wouldn't have been advertised to userspace and
+			 * so shouldn't have been requested
+			 */
+			return -EINVAL;
+		}
+
+		dev_priv->perf.oa.b_counter_regs =
+			b_counter_config_l3_4;
+		dev_priv->perf.oa.b_counter_regs_len =
+			ARRAY_SIZE(b_counter_config_l3_4);
+
+		dev_priv->perf.oa.flex_regs =
+			flex_eu_config_l3_4;
+		dev_priv->perf.oa.flex_regs_len =
+			ARRAY_SIZE(flex_eu_config_l3_4);
+
+		return 0;
+	case METRIC_SET_ID_RASTERIZER_AND_PIXEL_BACKEND:
+		dev_priv->perf.oa.n_mux_configs =
+			get_rasterizer_and_pixel_backend_mux_config(dev_priv,
+								    dev_priv->perf.oa.mux_regs,
+								    dev_priv->perf.oa.mux_regs_lens);
+		if (dev_priv->perf.oa.n_mux_configs == 0) {
+			DRM_DEBUG_DRIVER("No suitable MUX config for \"RASTERIZER_AND_PIXEL_BACKEND\" metric set\n");
+
+			/* EINVAL because *_register_sysfs already checked this
+			 * and so it wouldn't have been advertised to userspace and
+			 * so shouldn't have been requested
+			 */
+			return -EINVAL;
+		}
+
+		dev_priv->perf.oa.b_counter_regs =
+			b_counter_config_rasterizer_and_pixel_backend;
+		dev_priv->perf.oa.b_counter_regs_len =
+			ARRAY_SIZE(b_counter_config_rasterizer_and_pixel_backend);
+
+		dev_priv->perf.oa.flex_regs =
+			flex_eu_config_rasterizer_and_pixel_backend;
+		dev_priv->perf.oa.flex_regs_len =
+			ARRAY_SIZE(flex_eu_config_rasterizer_and_pixel_backend);
+
+		return 0;
+	case METRIC_SET_ID_SAMPLER_1:
+		dev_priv->perf.oa.n_mux_configs =
+			get_sampler_1_mux_config(dev_priv,
+						 dev_priv->perf.oa.mux_regs,
+						 dev_priv->perf.oa.mux_regs_lens);
+		if (dev_priv->perf.oa.n_mux_configs == 0) {
+			DRM_DEBUG_DRIVER("No suitable MUX config for \"SAMPLER_1\" metric set\n");
+
+			/* EINVAL because *_register_sysfs already checked this
+			 * and so it wouldn't have been advertised to userspace and
+			 * so shouldn't have been requested
+			 */
+			return -EINVAL;
+		}
+
+		dev_priv->perf.oa.b_counter_regs =
+			b_counter_config_sampler_1;
+		dev_priv->perf.oa.b_counter_regs_len =
+			ARRAY_SIZE(b_counter_config_sampler_1);
+
+		dev_priv->perf.oa.flex_regs =
+			flex_eu_config_sampler_1;
+		dev_priv->perf.oa.flex_regs_len =
+			ARRAY_SIZE(flex_eu_config_sampler_1);
+
+		return 0;
+	case METRIC_SET_ID_SAMPLER_2:
+		dev_priv->perf.oa.n_mux_configs =
+			get_sampler_2_mux_config(dev_priv,
+						 dev_priv->perf.oa.mux_regs,
+						 dev_priv->perf.oa.mux_regs_lens);
+		if (dev_priv->perf.oa.n_mux_configs == 0) {
+			DRM_DEBUG_DRIVER("No suitable MUX config for \"SAMPLER_2\" metric set\n");
+
+			/* EINVAL because *_register_sysfs already checked this
+			 * and so it wouldn't have been advertised to userspace and
+			 * so shouldn't have been requested
+			 */
+			return -EINVAL;
+		}
+
+		dev_priv->perf.oa.b_counter_regs =
+			b_counter_config_sampler_2;
+		dev_priv->perf.oa.b_counter_regs_len =
+			ARRAY_SIZE(b_counter_config_sampler_2);
+
+		dev_priv->perf.oa.flex_regs =
+			flex_eu_config_sampler_2;
+		dev_priv->perf.oa.flex_regs_len =
+			ARRAY_SIZE(flex_eu_config_sampler_2);
+
+		return 0;
+	case METRIC_SET_ID_TDL_1:
+		dev_priv->perf.oa.n_mux_configs =
+			get_tdl_1_mux_config(dev_priv,
+					     dev_priv->perf.oa.mux_regs,
+					     dev_priv->perf.oa.mux_regs_lens);
+		if (dev_priv->perf.oa.n_mux_configs == 0) {
+			DRM_DEBUG_DRIVER("No suitable MUX config for \"TDL_1\" metric set\n");
+
+			/* EINVAL because *_register_sysfs already checked this
+			 * and so it wouldn't have been advertised to userspace and
+			 * so shouldn't have been requested
+			 */
+			return -EINVAL;
+		}
+
+		dev_priv->perf.oa.b_counter_regs =
+			b_counter_config_tdl_1;
+		dev_priv->perf.oa.b_counter_regs_len =
+			ARRAY_SIZE(b_counter_config_tdl_1);
+
+		dev_priv->perf.oa.flex_regs =
+			flex_eu_config_tdl_1;
+		dev_priv->perf.oa.flex_regs_len =
+			ARRAY_SIZE(flex_eu_config_tdl_1);
+
+		return 0;
+	case METRIC_SET_ID_TDL_2:
+		dev_priv->perf.oa.n_mux_configs =
+			get_tdl_2_mux_config(dev_priv,
+					     dev_priv->perf.oa.mux_regs,
+					     dev_priv->perf.oa.mux_regs_lens);
+		if (dev_priv->perf.oa.n_mux_configs == 0) {
+			DRM_DEBUG_DRIVER("No suitable MUX config for \"TDL_2\" metric set\n");
+
+			/* EINVAL because *_register_sysfs already checked this
+			 * and so it wouldn't have been advertised to userspace and
+			 * so shouldn't have been requested
+			 */
+			return -EINVAL;
+		}
+
+		dev_priv->perf.oa.b_counter_regs =
+			b_counter_config_tdl_2;
+		dev_priv->perf.oa.b_counter_regs_len =
+			ARRAY_SIZE(b_counter_config_tdl_2);
+
+		dev_priv->perf.oa.flex_regs =
+			flex_eu_config_tdl_2;
+		dev_priv->perf.oa.flex_regs_len =
+			ARRAY_SIZE(flex_eu_config_tdl_2);
+
+		return 0;
+	case METRIC_SET_ID_TEST_OA:
+		dev_priv->perf.oa.n_mux_configs =
+			get_test_oa_mux_config(dev_priv,
+					       dev_priv->perf.oa.mux_regs,
+					       dev_priv->perf.oa.mux_regs_lens);
+		if (dev_priv->perf.oa.n_mux_configs == 0) {
+			DRM_DEBUG_DRIVER("No suitable MUX config for \"TEST_OA\" metric set\n");
+
+			/* EINVAL because *_register_sysfs already checked this
+			 * and so it wouldn't have been advertised to userspace and
+			 * so shouldn't have been requested
+			 */
+			return -EINVAL;
+		}
+
+		dev_priv->perf.oa.b_counter_regs =
+			b_counter_config_test_oa;
+		dev_priv->perf.oa.b_counter_regs_len =
+			ARRAY_SIZE(b_counter_config_test_oa);
+
+		dev_priv->perf.oa.flex_regs =
+			flex_eu_config_test_oa;
+		dev_priv->perf.oa.flex_regs_len =
+			ARRAY_SIZE(flex_eu_config_test_oa);
+
+		return 0;
+	default:
+		return -ENODEV;
+	}
+}
+
+static ssize_t
+show_render_basic_id(struct device *kdev, struct device_attribute *attr, char *buf)
+{
+	return sprintf(buf, "%d\n", METRIC_SET_ID_RENDER_BASIC);
+}
+
+static struct device_attribute dev_attr_render_basic_id = {
+	.attr = { .name = "id", .mode = 0444 },
+	.show = show_render_basic_id,
+	.store = NULL,
+};
+
+static struct attribute *attrs_render_basic[] = {
+	&dev_attr_render_basic_id.attr,
+	NULL,
+};
+
+static struct attribute_group group_render_basic = {
+	.name = "9d8a3af5-c02c-4a4a-b947-f1672469e0fb",
+	.attrs =  attrs_render_basic,
+};
+
+static ssize_t
+show_compute_basic_id(struct device *kdev, struct device_attribute *attr, char *buf)
+{
+	return sprintf(buf, "%d\n", METRIC_SET_ID_COMPUTE_BASIC);
+}
+
+static struct device_attribute dev_attr_compute_basic_id = {
+	.attr = { .name = "id", .mode = 0444 },
+	.show = show_compute_basic_id,
+	.store = NULL,
+};
+
+static struct attribute *attrs_compute_basic[] = {
+	&dev_attr_compute_basic_id.attr,
+	NULL,
+};
+
+static struct attribute_group group_compute_basic = {
+	.name = "f522a89c-ecd1-4522-8331-3383c54af5f5",
+	.attrs =  attrs_compute_basic,
+};
+
+static ssize_t
+show_render_pipe_profile_id(struct device *kdev, struct device_attribute *attr, char *buf)
+{
+	return sprintf(buf, "%d\n", METRIC_SET_ID_RENDER_PIPE_PROFILE);
+}
+
+static struct device_attribute dev_attr_render_pipe_profile_id = {
+	.attr = { .name = "id", .mode = 0444 },
+	.show = show_render_pipe_profile_id,
+	.store = NULL,
+};
+
+static struct attribute *attrs_render_pipe_profile[] = {
+	&dev_attr_render_pipe_profile_id.attr,
+	NULL,
+};
+
+static struct attribute_group group_render_pipe_profile = {
+	.name = "a9ccc03d-a943-4e6b-9cd6-13e063075927",
+	.attrs =  attrs_render_pipe_profile,
+};
+
+static ssize_t
+show_hdc_and_sf_id(struct device *kdev, struct device_attribute *attr, char *buf)
+{
+	return sprintf(buf, "%d\n", METRIC_SET_ID_HDC_AND_SF);
+}
+
+static struct device_attribute dev_attr_hdc_and_sf_id = {
+	.attr = { .name = "id", .mode = 0444 },
+	.show = show_hdc_and_sf_id,
+	.store = NULL,
+};
+
+static struct attribute *attrs_hdc_and_sf[] = {
+	&dev_attr_hdc_and_sf_id.attr,
+	NULL,
+};
+
+static struct attribute_group group_hdc_and_sf = {
+	.name = "2cf0c064-68df-4fac-9b3f-57f51ca8a069",
+	.attrs =  attrs_hdc_and_sf,
+};
+
+static ssize_t
+show_l3_1_id(struct device *kdev, struct device_attribute *attr, char *buf)
+{
+	return sprintf(buf, "%d\n", METRIC_SET_ID_L3_1);
+}
+
+static struct device_attribute dev_attr_l3_1_id = {
+	.attr = { .name = "id", .mode = 0444 },
+	.show = show_l3_1_id,
+	.store = NULL,
+};
+
+static struct attribute *attrs_l3_1[] = {
+	&dev_attr_l3_1_id.attr,
+	NULL,
+};
+
+static struct attribute_group group_l3_1 = {
+	.name = "78a87ff9-543a-49ce-95ea-26d86071ea93",
+	.attrs =  attrs_l3_1,
+};
+
+static ssize_t
+show_l3_2_id(struct device *kdev, struct device_attribute *attr, char *buf)
+{
+	return sprintf(buf, "%d\n", METRIC_SET_ID_L3_2);
+}
+
+static struct device_attribute dev_attr_l3_2_id = {
+	.attr = { .name = "id", .mode = 0444 },
+	.show = show_l3_2_id,
+	.store = NULL,
+};
+
+static struct attribute *attrs_l3_2[] = {
+	&dev_attr_l3_2_id.attr,
+	NULL,
+};
+
+static struct attribute_group group_l3_2 = {
+	.name = "9f2cece5-7bfe-4320-ad66-8c7cc526bec5",
+	.attrs =  attrs_l3_2,
+};
+
+static ssize_t
+show_l3_3_id(struct device *kdev, struct device_attribute *attr, char *buf)
+{
+	return sprintf(buf, "%d\n", METRIC_SET_ID_L3_3);
+}
+
+static struct device_attribute dev_attr_l3_3_id = {
+	.attr = { .name = "id", .mode = 0444 },
+	.show = show_l3_3_id,
+	.store = NULL,
+};
+
+static struct attribute *attrs_l3_3[] = {
+	&dev_attr_l3_3_id.attr,
+	NULL,
+};
+
+static struct attribute_group group_l3_3 = {
+	.name = "d890ef38-d309-47e4-b8b5-aa779bb19ab0",
+	.attrs =  attrs_l3_3,
+};
+
+static ssize_t
+show_l3_4_id(struct device *kdev, struct device_attribute *attr, char *buf)
+{
+	return sprintf(buf, "%d\n", METRIC_SET_ID_L3_4);
+}
+
+static struct device_attribute dev_attr_l3_4_id = {
+	.attr = { .name = "id", .mode = 0444 },
+	.show = show_l3_4_id,
+	.store = NULL,
+};
+
+static struct attribute *attrs_l3_4[] = {
+	&dev_attr_l3_4_id.attr,
+	NULL,
+};
+
+static struct attribute_group group_l3_4 = {
+	.name = "5fdff4a6-9dc8-45e1-bfda-ef54869fbdd4",
+	.attrs =  attrs_l3_4,
+};
+
+static ssize_t
+show_rasterizer_and_pixel_backend_id(struct device *kdev, struct device_attribute *attr, char *buf)
+{
+	return sprintf(buf, "%d\n", METRIC_SET_ID_RASTERIZER_AND_PIXEL_BACKEND);
+}
+
+static struct device_attribute dev_attr_rasterizer_and_pixel_backend_id = {
+	.attr = { .name = "id", .mode = 0444 },
+	.show = show_rasterizer_and_pixel_backend_id,
+	.store = NULL,
+};
+
+static struct attribute *attrs_rasterizer_and_pixel_backend[] = {
+	&dev_attr_rasterizer_and_pixel_backend_id.attr,
+	NULL,
+};
+
+static struct attribute_group group_rasterizer_and_pixel_backend = {
+	.name = "2c0e45e1-7e2c-4a14-ae00-0b7ec868b8aa",
+	.attrs =  attrs_rasterizer_and_pixel_backend,
+};
+
+static ssize_t
+show_sampler_1_id(struct device *kdev, struct device_attribute *attr, char *buf)
+{
+	return sprintf(buf, "%d\n", METRIC_SET_ID_SAMPLER_1);
+}
+
+static struct device_attribute dev_attr_sampler_1_id = {
+	.attr = { .name = "id", .mode = 0444 },
+	.show = show_sampler_1_id,
+	.store = NULL,
+};
+
+static struct attribute *attrs_sampler_1[] = {
+	&dev_attr_sampler_1_id.attr,
+	NULL,
+};
+
+static struct attribute_group group_sampler_1 = {
+	.name = "71148d78-baf5-474f-878a-e23158d0265d",
+	.attrs =  attrs_sampler_1,
+};
+
+static ssize_t
+show_sampler_2_id(struct device *kdev, struct device_attribute *attr, char *buf)
+{
+	return sprintf(buf, "%d\n", METRIC_SET_ID_SAMPLER_2);
+}
+
+static struct device_attribute dev_attr_sampler_2_id = {
+	.attr = { .name = "id", .mode = 0444 },
+	.show = show_sampler_2_id,
+	.store = NULL,
+};
+
+static struct attribute *attrs_sampler_2[] = {
+	&dev_attr_sampler_2_id.attr,
+	NULL,
+};
+
+static struct attribute_group group_sampler_2 = {
+	.name = "b996a2b7-c59c-492d-877a-8cd54fd6df84",
+	.attrs =  attrs_sampler_2,
+};
+
+static ssize_t
+show_tdl_1_id(struct device *kdev, struct device_attribute *attr, char *buf)
+{
+	return sprintf(buf, "%d\n", METRIC_SET_ID_TDL_1);
+}
+
+static struct device_attribute dev_attr_tdl_1_id = {
+	.attr = { .name = "id", .mode = 0444 },
+	.show = show_tdl_1_id,
+	.store = NULL,
+};
+
+static struct attribute *attrs_tdl_1[] = {
+	&dev_attr_tdl_1_id.attr,
+	NULL,
+};
+
+static struct attribute_group group_tdl_1 = {
+	.name = "eb2fecba-b431-42e7-8261-fe9429a6e67a",
+	.attrs =  attrs_tdl_1,
+};
+
+static ssize_t
+show_tdl_2_id(struct device *kdev, struct device_attribute *attr, char *buf)
+{
+	return sprintf(buf, "%d\n", METRIC_SET_ID_TDL_2);
+}
+
+static struct device_attribute dev_attr_tdl_2_id = {
+	.attr = { .name = "id", .mode = 0444 },
+	.show = show_tdl_2_id,
+	.store = NULL,
+};
+
+static struct attribute *attrs_tdl_2[] = {
+	&dev_attr_tdl_2_id.attr,
+	NULL,
+};
+
+static struct attribute_group group_tdl_2 = {
+	.name = "60749470-a648-4a4b-9f10-dbfe1e36e44d",
+	.attrs =  attrs_tdl_2,
+};
+
+static ssize_t
+show_test_oa_id(struct device *kdev, struct device_attribute *attr, char *buf)
+{
+	return sprintf(buf, "%d\n", METRIC_SET_ID_TEST_OA);
+}
+
+static struct device_attribute dev_attr_test_oa_id = {
+	.attr = { .name = "id", .mode = 0444 },
+	.show = show_test_oa_id,
+	.store = NULL,
+};
+
+static struct attribute *attrs_test_oa[] = {
+	&dev_attr_test_oa_id.attr,
+	NULL,
+};
+
+static struct attribute_group group_test_oa = {
+	.name = "4a534b07-cba3-414d-8d60-874830e883aa",
+	.attrs =  attrs_test_oa,
+};
+
+int
+i915_perf_register_sysfs_chv(struct drm_i915_private *dev_priv)
+{
+	const struct i915_oa_reg *mux_regs[ARRAY_SIZE(dev_priv->perf.oa.mux_regs)];
+	int mux_lens[ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens)];
+	int ret = 0;
+
+	if (get_render_basic_mux_config(dev_priv, mux_regs, mux_lens)) {
+		ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_render_basic);
+		if (ret)
+			goto error_render_basic;
+	}
+	if (get_compute_basic_mux_config(dev_priv, mux_regs, mux_lens)) {
+		ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_compute_basic);
+		if (ret)
+			goto error_compute_basic;
+	}
+	if (get_render_pipe_profile_mux_config(dev_priv, mux_regs, mux_lens)) {
+		ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_render_pipe_profile);
+		if (ret)
+			goto error_render_pipe_profile;
+	}
+	if (get_hdc_and_sf_mux_config(dev_priv, mux_regs, mux_lens)) {
+		ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_hdc_and_sf);
+		if (ret)
+			goto error_hdc_and_sf;
+	}
+	if (get_l3_1_mux_config(dev_priv, mux_regs, mux_lens)) {
+		ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_l3_1);
+		if (ret)
+			goto error_l3_1;
+	}
+	if (get_l3_2_mux_config(dev_priv, mux_regs, mux_lens)) {
+		ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_l3_2);
+		if (ret)
+			goto error_l3_2;
+	}
+	if (get_l3_3_mux_config(dev_priv, mux_regs, mux_lens)) {
+		ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_l3_3);
+		if (ret)
+			goto error_l3_3;
+	}
+	if (get_l3_4_mux_config(dev_priv, mux_regs, mux_lens)) {
+		ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_l3_4);
+		if (ret)
+			goto error_l3_4;
+	}
+	if (get_rasterizer_and_pixel_backend_mux_config(dev_priv, mux_regs, mux_lens)) {
+		ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_rasterizer_and_pixel_backend);
+		if (ret)
+			goto error_rasterizer_and_pixel_backend;
+	}
+	if (get_sampler_1_mux_config(dev_priv, mux_regs, mux_lens)) {
+		ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_sampler_1);
+		if (ret)
+			goto error_sampler_1;
+	}
+	if (get_sampler_2_mux_config(dev_priv, mux_regs, mux_lens)) {
+		ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_sampler_2);
+		if (ret)
+			goto error_sampler_2;
+	}
+	if (get_tdl_1_mux_config(dev_priv, mux_regs, mux_lens)) {
+		ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_tdl_1);
+		if (ret)
+			goto error_tdl_1;
+	}
+	if (get_tdl_2_mux_config(dev_priv, mux_regs, mux_lens)) {
+		ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_tdl_2);
+		if (ret)
+			goto error_tdl_2;
+	}
+	if (get_test_oa_mux_config(dev_priv, mux_regs, mux_lens)) {
+		ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_test_oa);
+		if (ret)
+			goto error_test_oa;
+	}
+
+	return 0;
+
+error_test_oa:
+	if (get_tdl_2_mux_config(dev_priv, mux_regs, mux_lens))
+		sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_tdl_2);
+error_tdl_2:
+	if (get_tdl_1_mux_config(dev_priv, mux_regs, mux_lens))
+		sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_tdl_1);
+error_tdl_1:
+	if (get_sampler_2_mux_config(dev_priv, mux_regs, mux_lens))
+		sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_sampler_2);
+error_sampler_2:
+	if (get_sampler_1_mux_config(dev_priv, mux_regs, mux_lens))
+		sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_sampler_1);
+error_sampler_1:
+	if (get_rasterizer_and_pixel_backend_mux_config(dev_priv, mux_regs, mux_lens))
+		sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_rasterizer_and_pixel_backend);
+error_rasterizer_and_pixel_backend:
+	if (get_l3_4_mux_config(dev_priv, mux_regs, mux_lens))
+		sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_l3_4);
+error_l3_4:
+	if (get_l3_3_mux_config(dev_priv, mux_regs, mux_lens))
+		sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_l3_3);
+error_l3_3:
+	if (get_l3_2_mux_config(dev_priv, mux_regs, mux_lens))
+		sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_l3_2);
+error_l3_2:
+	if (get_l3_1_mux_config(dev_priv, mux_regs, mux_lens))
+		sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_l3_1);
+error_l3_1:
+	if (get_hdc_and_sf_mux_config(dev_priv, mux_regs, mux_lens))
+		sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_hdc_and_sf);
+error_hdc_and_sf:
+	if (get_render_pipe_profile_mux_config(dev_priv, mux_regs, mux_lens))
+		sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_render_pipe_profile);
+error_render_pipe_profile:
+	if (get_compute_basic_mux_config(dev_priv, mux_regs, mux_lens))
+		sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_compute_basic);
+error_compute_basic:
+	if (get_render_basic_mux_config(dev_priv, mux_regs, mux_lens))
+		sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_render_basic);
+error_render_basic:
+	return ret;
+}
+
+void
+i915_perf_unregister_sysfs_chv(struct drm_i915_private *dev_priv)
+{
+	const struct i915_oa_reg *mux_regs[ARRAY_SIZE(dev_priv->perf.oa.mux_regs)];
+	int mux_lens[ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens)];
+
+	if (get_render_basic_mux_config(dev_priv, mux_regs, mux_lens))
+		sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_render_basic);
+	if (get_compute_basic_mux_config(dev_priv, mux_regs, mux_lens))
+		sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_compute_basic);
+	if (get_render_pipe_profile_mux_config(dev_priv, mux_regs, mux_lens))
+		sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_render_pipe_profile);
+	if (get_hdc_and_sf_mux_config(dev_priv, mux_regs, mux_lens))
+		sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_hdc_and_sf);
+	if (get_l3_1_mux_config(dev_priv, mux_regs, mux_lens))
+		sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_l3_1);
+	if (get_l3_2_mux_config(dev_priv, mux_regs, mux_lens))
+		sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_l3_2);
+	if (get_l3_3_mux_config(dev_priv, mux_regs, mux_lens))
+		sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_l3_3);
+	if (get_l3_4_mux_config(dev_priv, mux_regs, mux_lens))
+		sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_l3_4);
+	if (get_rasterizer_and_pixel_backend_mux_config(dev_priv, mux_regs, mux_lens))
+		sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_rasterizer_and_pixel_backend);
+	if (get_sampler_1_mux_config(dev_priv, mux_regs, mux_lens))
+		sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_sampler_1);
+	if (get_sampler_2_mux_config(dev_priv, mux_regs, mux_lens))
+		sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_sampler_2);
+	if (get_tdl_1_mux_config(dev_priv, mux_regs, mux_lens))
+		sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_tdl_1);
+	if (get_tdl_2_mux_config(dev_priv, mux_regs, mux_lens))
+		sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_tdl_2);
+	if (get_test_oa_mux_config(dev_priv, mux_regs, mux_lens))
+		sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_test_oa);
+}

+ 40 - 0
drivers/gpu/drm/i915/i915_oa_chv.h

@@ -0,0 +1,40 @@
+/*
+ * Autogenerated file by GPU Top : https://github.com/rib/gputop
+ * DO NOT EDIT manually!
+ *
+ *
+ * Copyright (c) 2015 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __I915_OA_CHV_H__
+#define __I915_OA_CHV_H__
+
+extern int i915_oa_n_builtin_metric_sets_chv;
+
+extern int i915_oa_select_metric_set_chv(struct drm_i915_private *dev_priv);
+
+extern int i915_perf_register_sysfs_chv(struct drm_i915_private *dev_priv);
+
+extern void i915_perf_unregister_sysfs_chv(struct drm_i915_private *dev_priv);
+
+#endif

+ 2602 - 0
drivers/gpu/drm/i915/i915_oa_glk.c

@@ -0,0 +1,2602 @@
+/*
+ * Autogenerated file by GPU Top : https://github.com/rib/gputop
+ * DO NOT EDIT manually!
+ *
+ *
+ * Copyright (c) 2015 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#include <linux/sysfs.h>
+
+#include "i915_drv.h"
+#include "i915_oa_glk.h"
+
+enum metric_set_id {
+	METRIC_SET_ID_RENDER_BASIC = 1,
+	METRIC_SET_ID_COMPUTE_BASIC,
+	METRIC_SET_ID_RENDER_PIPE_PROFILE,
+	METRIC_SET_ID_MEMORY_READS,
+	METRIC_SET_ID_MEMORY_WRITES,
+	METRIC_SET_ID_COMPUTE_EXTENDED,
+	METRIC_SET_ID_COMPUTE_L3_CACHE,
+	METRIC_SET_ID_HDC_AND_SF,
+	METRIC_SET_ID_L3_1,
+	METRIC_SET_ID_RASTERIZER_AND_PIXEL_BACKEND,
+	METRIC_SET_ID_SAMPLER,
+	METRIC_SET_ID_TDL_1,
+	METRIC_SET_ID_TDL_2,
+	METRIC_SET_ID_COMPUTE_EXTRA,
+	METRIC_SET_ID_TEST_OA,
+};
+
+int i915_oa_n_builtin_metric_sets_glk = 15;
+
+static const struct i915_oa_reg b_counter_config_render_basic[] = {
+	{ _MMIO(0x2710), 0x00000000 },
+	{ _MMIO(0x2714), 0x00800000 },
+	{ _MMIO(0x2720), 0x00000000 },
+	{ _MMIO(0x2724), 0x00800000 },
+	{ _MMIO(0x2740), 0x00000000 },
+};
+
+static const struct i915_oa_reg flex_eu_config_render_basic[] = {
+	{ _MMIO(0xe458), 0x00005004 },
+	{ _MMIO(0xe558), 0x00010003 },
+	{ _MMIO(0xe658), 0x00012011 },
+	{ _MMIO(0xe758), 0x00015014 },
+	{ _MMIO(0xe45c), 0x00051050 },
+	{ _MMIO(0xe55c), 0x00053052 },
+	{ _MMIO(0xe65c), 0x00055054 },
+};
+
+static const struct i915_oa_reg mux_config_render_basic[] = {
+	{ _MMIO(0x9888), 0x166c00f0 },
+	{ _MMIO(0x9888), 0x12120280 },
+	{ _MMIO(0x9888), 0x12320280 },
+	{ _MMIO(0x9888), 0x11930317 },
+	{ _MMIO(0x9888), 0x159303df },
+	{ _MMIO(0x9888), 0x3f900c00 },
+	{ _MMIO(0x9888), 0x419000a0 },
+	{ _MMIO(0x9888), 0x002d1000 },
+	{ _MMIO(0x9888), 0x062d4000 },
+	{ _MMIO(0x9888), 0x082d5000 },
+	{ _MMIO(0x9888), 0x0a2d1000 },
+	{ _MMIO(0x9888), 0x0c2e0800 },
+	{ _MMIO(0x9888), 0x0e2e5900 },
+	{ _MMIO(0x9888), 0x0a4c8000 },
+	{ _MMIO(0x9888), 0x0c4c8000 },
+	{ _MMIO(0x9888), 0x0e4c4000 },
+	{ _MMIO(0x9888), 0x064e8000 },
+	{ _MMIO(0x9888), 0x084e8000 },
+	{ _MMIO(0x9888), 0x0a4e2000 },
+	{ _MMIO(0x9888), 0x1c4f0010 },
+	{ _MMIO(0x9888), 0x0a6c0053 },
+	{ _MMIO(0x9888), 0x106c0000 },
+	{ _MMIO(0x9888), 0x1c6c0000 },
+	{ _MMIO(0x9888), 0x1a0fcc00 },
+	{ _MMIO(0x9888), 0x1c0f0002 },
+	{ _MMIO(0x9888), 0x1c2c0040 },
+	{ _MMIO(0x9888), 0x00101000 },
+	{ _MMIO(0x9888), 0x04101000 },
+	{ _MMIO(0x9888), 0x00114000 },
+	{ _MMIO(0x9888), 0x08114000 },
+	{ _MMIO(0x9888), 0x00120020 },
+	{ _MMIO(0x9888), 0x08120021 },
+	{ _MMIO(0x9888), 0x00141000 },
+	{ _MMIO(0x9888), 0x08141000 },
+	{ _MMIO(0x9888), 0x02308000 },
+	{ _MMIO(0x9888), 0x04302000 },
+	{ _MMIO(0x9888), 0x06318000 },
+	{ _MMIO(0x9888), 0x08318000 },
+	{ _MMIO(0x9888), 0x06320800 },
+	{ _MMIO(0x9888), 0x08320840 },
+	{ _MMIO(0x9888), 0x00320000 },
+	{ _MMIO(0x9888), 0x06344000 },
+	{ _MMIO(0x9888), 0x08344000 },
+	{ _MMIO(0x9888), 0x0d931831 },
+	{ _MMIO(0x9888), 0x0f939f3f },
+	{ _MMIO(0x9888), 0x01939e80 },
+	{ _MMIO(0x9888), 0x039303bc },
+	{ _MMIO(0x9888), 0x0593000e },
+	{ _MMIO(0x9888), 0x1993002a },
+	{ _MMIO(0x9888), 0x07930000 },
+	{ _MMIO(0x9888), 0x09930000 },
+	{ _MMIO(0x9888), 0x1d900177 },
+	{ _MMIO(0x9888), 0x1f900187 },
+	{ _MMIO(0x9888), 0x35900000 },
+	{ _MMIO(0x9888), 0x13904000 },
+	{ _MMIO(0x9888), 0x21904000 },
+	{ _MMIO(0x9888), 0x23904000 },
+	{ _MMIO(0x9888), 0x25904000 },
+	{ _MMIO(0x9888), 0x27904000 },
+	{ _MMIO(0x9888), 0x2b904000 },
+	{ _MMIO(0x9888), 0x2d904000 },
+	{ _MMIO(0x9888), 0x2f904000 },
+	{ _MMIO(0x9888), 0x31904000 },
+	{ _MMIO(0x9888), 0x15904000 },
+	{ _MMIO(0x9888), 0x17904000 },
+	{ _MMIO(0x9888), 0x19904000 },
+	{ _MMIO(0x9888), 0x1b904000 },
+	{ _MMIO(0x9888), 0x53901110 },
+	{ _MMIO(0x9888), 0x43900423 },
+	{ _MMIO(0x9888), 0x55900111 },
+	{ _MMIO(0x9888), 0x47900c02 },
+	{ _MMIO(0x9888), 0x57900000 },
+	{ _MMIO(0x9888), 0x49900020 },
+	{ _MMIO(0x9888), 0x59901111 },
+	{ _MMIO(0x9888), 0x4b900421 },
+	{ _MMIO(0x9888), 0x37900000 },
+	{ _MMIO(0x9888), 0x33900000 },
+	{ _MMIO(0x9888), 0x4d900001 },
+	{ _MMIO(0x9888), 0x45900821 },
+};
+
+static int
+get_render_basic_mux_config(struct drm_i915_private *dev_priv,
+			    const struct i915_oa_reg **regs,
+			    int *lens)
+{
+	int n = 0;
+
+	BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
+	BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
+
+	regs[n] = mux_config_render_basic;
+	lens[n] = ARRAY_SIZE(mux_config_render_basic);
+	n++;
+
+	return n;
+}
+
+static const struct i915_oa_reg b_counter_config_compute_basic[] = {
+	{ _MMIO(0x2710), 0x00000000 },
+	{ _MMIO(0x2714), 0x00800000 },
+	{ _MMIO(0x2720), 0x00000000 },
+	{ _MMIO(0x2724), 0x00800000 },
+	{ _MMIO(0x2740), 0x00000000 },
+};
+
+static const struct i915_oa_reg flex_eu_config_compute_basic[] = {
+	{ _MMIO(0xe458), 0x00005004 },
+	{ _MMIO(0xe558), 0x00000003 },
+	{ _MMIO(0xe658), 0x00002001 },
+	{ _MMIO(0xe758), 0x00778008 },
+	{ _MMIO(0xe45c), 0x00088078 },
+	{ _MMIO(0xe55c), 0x00808708 },
+	{ _MMIO(0xe65c), 0x00a08908 },
+};
+
+static const struct i915_oa_reg mux_config_compute_basic[] = {
+	{ _MMIO(0x9888), 0x104f00e0 },
+	{ _MMIO(0x9888), 0x124f1c00 },
+	{ _MMIO(0x9888), 0x39900340 },
+	{ _MMIO(0x9888), 0x3f900c00 },
+	{ _MMIO(0x9888), 0x41900000 },
+	{ _MMIO(0x9888), 0x002d5000 },
+	{ _MMIO(0x9888), 0x062d4000 },
+	{ _MMIO(0x9888), 0x082d4000 },
+	{ _MMIO(0x9888), 0x0a2d1000 },
+	{ _MMIO(0x9888), 0x0c2d5000 },
+	{ _MMIO(0x9888), 0x0e2d4000 },
+	{ _MMIO(0x9888), 0x0c2e1400 },
+	{ _MMIO(0x9888), 0x0e2e5100 },
+	{ _MMIO(0x9888), 0x102e0114 },
+	{ _MMIO(0x9888), 0x044cc000 },
+	{ _MMIO(0x9888), 0x0a4c8000 },
+	{ _MMIO(0x9888), 0x0c4c8000 },
+	{ _MMIO(0x9888), 0x0e4c4000 },
+	{ _MMIO(0x9888), 0x104c8000 },
+	{ _MMIO(0x9888), 0x124c8000 },
+	{ _MMIO(0x9888), 0x164c2000 },
+	{ _MMIO(0x9888), 0x004ea000 },
+	{ _MMIO(0x9888), 0x064e8000 },
+	{ _MMIO(0x9888), 0x084e8000 },
+	{ _MMIO(0x9888), 0x0a4e2000 },
+	{ _MMIO(0x9888), 0x0c4ea000 },
+	{ _MMIO(0x9888), 0x0e4e8000 },
+	{ _MMIO(0x9888), 0x004f6b42 },
+	{ _MMIO(0x9888), 0x064f6200 },
+	{ _MMIO(0x9888), 0x084f4100 },
+	{ _MMIO(0x9888), 0x0a4f0061 },
+	{ _MMIO(0x9888), 0x0c4f6c4c },
+	{ _MMIO(0x9888), 0x0e4f4b00 },
+	{ _MMIO(0x9888), 0x1a4f0000 },
+	{ _MMIO(0x9888), 0x1c4f0000 },
+	{ _MMIO(0x9888), 0x180f5000 },
+	{ _MMIO(0x9888), 0x1a0f8800 },
+	{ _MMIO(0x9888), 0x1c0f08a2 },
+	{ _MMIO(0x9888), 0x182c4000 },
+	{ _MMIO(0x9888), 0x1c2c1451 },
+	{ _MMIO(0x9888), 0x1e2c0001 },
+	{ _MMIO(0x9888), 0x1a2c0010 },
+	{ _MMIO(0x9888), 0x01938000 },
+	{ _MMIO(0x9888), 0x0f938000 },
+	{ _MMIO(0x9888), 0x19938a28 },
+	{ _MMIO(0x9888), 0x03938000 },
+	{ _MMIO(0x9888), 0x19900177 },
+	{ _MMIO(0x9888), 0x1b900178 },
+	{ _MMIO(0x9888), 0x1d900125 },
+	{ _MMIO(0x9888), 0x1f900123 },
+	{ _MMIO(0x9888), 0x35900000 },
+	{ _MMIO(0x9888), 0x13904000 },
+	{ _MMIO(0x9888), 0x21904000 },
+	{ _MMIO(0x9888), 0x25904000 },
+	{ _MMIO(0x9888), 0x27904000 },
+	{ _MMIO(0x9888), 0x2b904000 },
+	{ _MMIO(0x9888), 0x2d904000 },
+	{ _MMIO(0x9888), 0x31904000 },
+	{ _MMIO(0x9888), 0x15904000 },
+	{ _MMIO(0x9888), 0x53901000 },
+	{ _MMIO(0x9888), 0x43900000 },
+	{ _MMIO(0x9888), 0x55900111 },
+	{ _MMIO(0x9888), 0x47900000 },
+	{ _MMIO(0x9888), 0x57900000 },
+	{ _MMIO(0x9888), 0x49900000 },
+	{ _MMIO(0x9888), 0x59900000 },
+	{ _MMIO(0x9888), 0x4b900000 },
+	{ _MMIO(0x9888), 0x37900000 },
+	{ _MMIO(0x9888), 0x33900000 },
+	{ _MMIO(0x9888), 0x4d900000 },
+	{ _MMIO(0x9888), 0x45900000 },
+};
+
+static int
+get_compute_basic_mux_config(struct drm_i915_private *dev_priv,
+			     const struct i915_oa_reg **regs,
+			     int *lens)
+{
+	int n = 0;
+
+	BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
+	BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
+
+	regs[n] = mux_config_compute_basic;
+	lens[n] = ARRAY_SIZE(mux_config_compute_basic);
+	n++;
+
+	return n;
+}
+
+static const struct i915_oa_reg b_counter_config_render_pipe_profile[] = {
+	{ _MMIO(0x2724), 0xf0800000 },
+	{ _MMIO(0x2720), 0x00000000 },
+	{ _MMIO(0x2714), 0xf0800000 },
+	{ _MMIO(0x2710), 0x00000000 },
+	{ _MMIO(0x2740), 0x00000000 },
+	{ _MMIO(0x2770), 0x0007ffea },
+	{ _MMIO(0x2774), 0x00007ffc },
+	{ _MMIO(0x2778), 0x0007affa },
+	{ _MMIO(0x277c), 0x0000f5fd },
+	{ _MMIO(0x2780), 0x00079ffa },
+	{ _MMIO(0x2784), 0x0000f3fb },
+	{ _MMIO(0x2788), 0x0007bf7a },
+	{ _MMIO(0x278c), 0x0000f7e7 },
+	{ _MMIO(0x2790), 0x0007fefa },
+	{ _MMIO(0x2794), 0x0000f7cf },
+	{ _MMIO(0x2798), 0x00077ffa },
+	{ _MMIO(0x279c), 0x0000efdf },
+	{ _MMIO(0x27a0), 0x0006fffa },
+	{ _MMIO(0x27a4), 0x0000cfbf },
+	{ _MMIO(0x27a8), 0x0003fffa },
+	{ _MMIO(0x27ac), 0x00005f7f },
+};
+
+static const struct i915_oa_reg flex_eu_config_render_pipe_profile[] = {
+	{ _MMIO(0xe458), 0x00005004 },
+	{ _MMIO(0xe558), 0x00015014 },
+	{ _MMIO(0xe658), 0x00025024 },
+	{ _MMIO(0xe758), 0x00035034 },
+	{ _MMIO(0xe45c), 0x00045044 },
+	{ _MMIO(0xe55c), 0x00055054 },
+	{ _MMIO(0xe65c), 0x00065064 },
+};
+
+static const struct i915_oa_reg mux_config_render_pipe_profile[] = {
+	{ _MMIO(0x9888), 0x0c2e001f },
+	{ _MMIO(0x9888), 0x0a2f0000 },
+	{ _MMIO(0x9888), 0x10186800 },
+	{ _MMIO(0x9888), 0x11810019 },
+	{ _MMIO(0x9888), 0x15810013 },
+	{ _MMIO(0x9888), 0x13820020 },
+	{ _MMIO(0x9888), 0x11830020 },
+	{ _MMIO(0x9888), 0x17840000 },
+	{ _MMIO(0x9888), 0x11860007 },
+	{ _MMIO(0x9888), 0x21860000 },
+	{ _MMIO(0x9888), 0x178703e0 },
+	{ _MMIO(0x9888), 0x0c2d8000 },
+	{ _MMIO(0x9888), 0x042d4000 },
+	{ _MMIO(0x9888), 0x062d1000 },
+	{ _MMIO(0x9888), 0x022e5400 },
+	{ _MMIO(0x9888), 0x002e0000 },
+	{ _MMIO(0x9888), 0x0e2e0080 },
+	{ _MMIO(0x9888), 0x082f0040 },
+	{ _MMIO(0x9888), 0x002f0000 },
+	{ _MMIO(0x9888), 0x06143000 },
+	{ _MMIO(0x9888), 0x06174000 },
+	{ _MMIO(0x9888), 0x06180012 },
+	{ _MMIO(0x9888), 0x00180000 },
+	{ _MMIO(0x9888), 0x0d804000 },
+	{ _MMIO(0x9888), 0x0f804000 },
+	{ _MMIO(0x9888), 0x05804000 },
+	{ _MMIO(0x9888), 0x09810200 },
+	{ _MMIO(0x9888), 0x0b810030 },
+	{ _MMIO(0x9888), 0x03810003 },
+	{ _MMIO(0x9888), 0x21819140 },
+	{ _MMIO(0x9888), 0x23819050 },
+	{ _MMIO(0x9888), 0x25810018 },
+	{ _MMIO(0x9888), 0x0b820980 },
+	{ _MMIO(0x9888), 0x03820d80 },
+	{ _MMIO(0x9888), 0x11820000 },
+	{ _MMIO(0x9888), 0x0182c000 },
+	{ _MMIO(0x9888), 0x07828000 },
+	{ _MMIO(0x9888), 0x09824000 },
+	{ _MMIO(0x9888), 0x0f828000 },
+	{ _MMIO(0x9888), 0x0d830004 },
+	{ _MMIO(0x9888), 0x0583000c },
+	{ _MMIO(0x9888), 0x0f831000 },
+	{ _MMIO(0x9888), 0x01848072 },
+	{ _MMIO(0x9888), 0x11840000 },
+	{ _MMIO(0x9888), 0x07848000 },
+	{ _MMIO(0x9888), 0x09844000 },
+	{ _MMIO(0x9888), 0x0f848000 },
+	{ _MMIO(0x9888), 0x07860000 },
+	{ _MMIO(0x9888), 0x09860092 },
+	{ _MMIO(0x9888), 0x0f860400 },
+	{ _MMIO(0x9888), 0x01869100 },
+	{ _MMIO(0x9888), 0x0f870065 },
+	{ _MMIO(0x9888), 0x01870000 },
+	{ _MMIO(0x9888), 0x19930800 },
+	{ _MMIO(0x9888), 0x0b938000 },
+	{ _MMIO(0x9888), 0x0d938000 },
+	{ _MMIO(0x9888), 0x1b952000 },
+	{ _MMIO(0x9888), 0x1d955055 },
+	{ _MMIO(0x9888), 0x1f951455 },
+	{ _MMIO(0x9888), 0x0992a000 },
+	{ _MMIO(0x9888), 0x0f928000 },
+	{ _MMIO(0x9888), 0x1192a800 },
+	{ _MMIO(0x9888), 0x1392028a },
+	{ _MMIO(0x9888), 0x0b92a000 },
+	{ _MMIO(0x9888), 0x0d922000 },
+	{ _MMIO(0x9888), 0x13908000 },
+	{ _MMIO(0x9888), 0x21908000 },
+	{ _MMIO(0x9888), 0x23908000 },
+	{ _MMIO(0x9888), 0x25908000 },
+	{ _MMIO(0x9888), 0x27908000 },
+	{ _MMIO(0x9888), 0x29908000 },
+	{ _MMIO(0x9888), 0x2b908000 },
+	{ _MMIO(0x9888), 0x2d904000 },
+	{ _MMIO(0x9888), 0x2f908000 },
+	{ _MMIO(0x9888), 0x31908000 },
+	{ _MMIO(0x9888), 0x15908000 },
+	{ _MMIO(0x9888), 0x17908000 },
+	{ _MMIO(0x9888), 0x19908000 },
+	{ _MMIO(0x9888), 0x1b908000 },
+	{ _MMIO(0x9888), 0x1d904000 },
+	{ _MMIO(0x9888), 0x1f904000 },
+	{ _MMIO(0x9888), 0x53900000 },
+	{ _MMIO(0x9888), 0x43900c01 },
+	{ _MMIO(0x9888), 0x55900000 },
+	{ _MMIO(0x9888), 0x47900000 },
+	{ _MMIO(0x9888), 0x57900000 },
+	{ _MMIO(0x9888), 0x49900863 },
+	{ _MMIO(0x9888), 0x59900000 },
+	{ _MMIO(0x9888), 0x4b900061 },
+	{ _MMIO(0x9888), 0x37900000 },
+	{ _MMIO(0x9888), 0x33900000 },
+	{ _MMIO(0x9888), 0x4d900000 },
+	{ _MMIO(0x9888), 0x45900c22 },
+};
+
+static int
+get_render_pipe_profile_mux_config(struct drm_i915_private *dev_priv,
+				   const struct i915_oa_reg **regs,
+				   int *lens)
+{
+	int n = 0;
+
+	BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
+	BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
+
+	regs[n] = mux_config_render_pipe_profile;
+	lens[n] = ARRAY_SIZE(mux_config_render_pipe_profile);
+	n++;
+
+	return n;
+}
+
+static const struct i915_oa_reg b_counter_config_memory_reads[] = {
+	{ _MMIO(0x272c), 0xffffffff },
+	{ _MMIO(0x2728), 0xffffffff },
+	{ _MMIO(0x2724), 0xf0800000 },
+	{ _MMIO(0x2720), 0x00000000 },
+	{ _MMIO(0x271c), 0xffffffff },
+	{ _MMIO(0x2718), 0xffffffff },
+	{ _MMIO(0x2714), 0xf0800000 },
+	{ _MMIO(0x2710), 0x00000000 },
+	{ _MMIO(0x274c), 0x86543210 },
+	{ _MMIO(0x2748), 0x86543210 },
+	{ _MMIO(0x2744), 0x00006667 },
+	{ _MMIO(0x2740), 0x00000000 },
+	{ _MMIO(0x275c), 0x86543210 },
+	{ _MMIO(0x2758), 0x86543210 },
+	{ _MMIO(0x2754), 0x00006465 },
+	{ _MMIO(0x2750), 0x00000000 },
+	{ _MMIO(0x2770), 0x0007f81a },
+	{ _MMIO(0x2774), 0x0000fe00 },
+	{ _MMIO(0x2778), 0x0007f82a },
+	{ _MMIO(0x277c), 0x0000fe00 },
+	{ _MMIO(0x2780), 0x0007f872 },
+	{ _MMIO(0x2784), 0x0000fe00 },
+	{ _MMIO(0x2788), 0x0007f8ba },
+	{ _MMIO(0x278c), 0x0000fe00 },
+	{ _MMIO(0x2790), 0x0007f87a },
+	{ _MMIO(0x2794), 0x0000fe00 },
+	{ _MMIO(0x2798), 0x0007f8ea },
+	{ _MMIO(0x279c), 0x0000fe00 },
+	{ _MMIO(0x27a0), 0x0007f8e2 },
+	{ _MMIO(0x27a4), 0x0000fe00 },
+	{ _MMIO(0x27a8), 0x0007f8f2 },
+	{ _MMIO(0x27ac), 0x0000fe00 },
+};
+
+static const struct i915_oa_reg flex_eu_config_memory_reads[] = {
+	{ _MMIO(0xe458), 0x00005004 },
+	{ _MMIO(0xe558), 0x00015014 },
+	{ _MMIO(0xe658), 0x00025024 },
+	{ _MMIO(0xe758), 0x00035034 },
+	{ _MMIO(0xe45c), 0x00045044 },
+	{ _MMIO(0xe55c), 0x00055054 },
+	{ _MMIO(0xe65c), 0x00065064 },
+};
+
+static const struct i915_oa_reg mux_config_memory_reads[] = {
+	{ _MMIO(0x9888), 0x19800343 },
+	{ _MMIO(0x9888), 0x39900340 },
+	{ _MMIO(0x9888), 0x3f901000 },
+	{ _MMIO(0x9888), 0x41900003 },
+	{ _MMIO(0x9888), 0x03803180 },
+	{ _MMIO(0x9888), 0x058035e2 },
+	{ _MMIO(0x9888), 0x0780006a },
+	{ _MMIO(0x9888), 0x11800000 },
+	{ _MMIO(0x9888), 0x2181a000 },
+	{ _MMIO(0x9888), 0x2381000a },
+	{ _MMIO(0x9888), 0x1d950550 },
+	{ _MMIO(0x9888), 0x0b928000 },
+	{ _MMIO(0x9888), 0x0d92a000 },
+	{ _MMIO(0x9888), 0x0f922000 },
+	{ _MMIO(0x9888), 0x13900170 },
+	{ _MMIO(0x9888), 0x21900171 },
+	{ _MMIO(0x9888), 0x23900172 },
+	{ _MMIO(0x9888), 0x25900173 },
+	{ _MMIO(0x9888), 0x27900174 },
+	{ _MMIO(0x9888), 0x29900175 },
+	{ _MMIO(0x9888), 0x2b900176 },
+	{ _MMIO(0x9888), 0x2d900177 },
+	{ _MMIO(0x9888), 0x2f90017f },
+	{ _MMIO(0x9888), 0x31900125 },
+	{ _MMIO(0x9888), 0x15900123 },
+	{ _MMIO(0x9888), 0x17900121 },
+	{ _MMIO(0x9888), 0x35900000 },
+	{ _MMIO(0x9888), 0x19908000 },
+	{ _MMIO(0x9888), 0x1b908000 },
+	{ _MMIO(0x9888), 0x1d908000 },
+	{ _MMIO(0x9888), 0x1f908000 },
+	{ _MMIO(0x9888), 0x53900000 },
+	{ _MMIO(0x9888), 0x43901084 },
+	{ _MMIO(0x9888), 0x55900000 },
+	{ _MMIO(0x9888), 0x47901080 },
+	{ _MMIO(0x9888), 0x57900000 },
+	{ _MMIO(0x9888), 0x49901084 },
+	{ _MMIO(0x9888), 0x59900000 },
+	{ _MMIO(0x9888), 0x4b901084 },
+	{ _MMIO(0x9888), 0x37900000 },
+	{ _MMIO(0x9888), 0x33900000 },
+	{ _MMIO(0x9888), 0x4d900004 },
+	{ _MMIO(0x9888), 0x45900000 },
+};
+
+static int
+get_memory_reads_mux_config(struct drm_i915_private *dev_priv,
+			    const struct i915_oa_reg **regs,
+			    int *lens)
+{
+	int n = 0;
+
+	BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
+	BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
+
+	regs[n] = mux_config_memory_reads;
+	lens[n] = ARRAY_SIZE(mux_config_memory_reads);
+	n++;
+
+	return n;
+}
+
+static const struct i915_oa_reg b_counter_config_memory_writes[] = {
+	{ _MMIO(0x272c), 0xffffffff },
+	{ _MMIO(0x2728), 0xffffffff },
+	{ _MMIO(0x2724), 0xf0800000 },
+	{ _MMIO(0x2720), 0x00000000 },
+	{ _MMIO(0x271c), 0xffffffff },
+	{ _MMIO(0x2718), 0xffffffff },
+	{ _MMIO(0x2714), 0xf0800000 },
+	{ _MMIO(0x2710), 0x00000000 },
+	{ _MMIO(0x274c), 0x86543210 },
+	{ _MMIO(0x2748), 0x86543210 },
+	{ _MMIO(0x2744), 0x00006667 },
+	{ _MMIO(0x2740), 0x00000000 },
+	{ _MMIO(0x275c), 0x86543210 },
+	{ _MMIO(0x2758), 0x86543210 },
+	{ _MMIO(0x2754), 0x00006465 },
+	{ _MMIO(0x2750), 0x00000000 },
+	{ _MMIO(0x2770), 0x0007f81a },
+	{ _MMIO(0x2774), 0x0000fe00 },
+	{ _MMIO(0x2778), 0x0007f82a },
+	{ _MMIO(0x277c), 0x0000fe00 },
+	{ _MMIO(0x2780), 0x0007f822 },
+	{ _MMIO(0x2784), 0x0000fe00 },
+	{ _MMIO(0x2788), 0x0007f8ba },
+	{ _MMIO(0x278c), 0x0000fe00 },
+	{ _MMIO(0x2790), 0x0007f87a },
+	{ _MMIO(0x2794), 0x0000fe00 },
+	{ _MMIO(0x2798), 0x0007f8ea },
+	{ _MMIO(0x279c), 0x0000fe00 },
+	{ _MMIO(0x27a0), 0x0007f8e2 },
+	{ _MMIO(0x27a4), 0x0000fe00 },
+	{ _MMIO(0x27a8), 0x0007f8f2 },
+	{ _MMIO(0x27ac), 0x0000fe00 },
+};
+
+static const struct i915_oa_reg flex_eu_config_memory_writes[] = {
+	{ _MMIO(0xe458), 0x00005004 },
+	{ _MMIO(0xe558), 0x00015014 },
+	{ _MMIO(0xe658), 0x00025024 },
+	{ _MMIO(0xe758), 0x00035034 },
+	{ _MMIO(0xe45c), 0x00045044 },
+	{ _MMIO(0xe55c), 0x00055054 },
+	{ _MMIO(0xe65c), 0x00065064 },
+};
+
+static const struct i915_oa_reg mux_config_memory_writes[] = {
+	{ _MMIO(0x9888), 0x19800343 },
+	{ _MMIO(0x9888), 0x39900340 },
+	{ _MMIO(0x9888), 0x3f900000 },
+	{ _MMIO(0x9888), 0x41900080 },
+	{ _MMIO(0x9888), 0x03803180 },
+	{ _MMIO(0x9888), 0x058035e2 },
+	{ _MMIO(0x9888), 0x0780006a },
+	{ _MMIO(0x9888), 0x11800000 },
+	{ _MMIO(0x9888), 0x2181a000 },
+	{ _MMIO(0x9888), 0x2381000a },
+	{ _MMIO(0x9888), 0x1d950550 },
+	{ _MMIO(0x9888), 0x0b928000 },
+	{ _MMIO(0x9888), 0x0d92a000 },
+	{ _MMIO(0x9888), 0x0f922000 },
+	{ _MMIO(0x9888), 0x13900180 },
+	{ _MMIO(0x9888), 0x21900181 },
+	{ _MMIO(0x9888), 0x23900182 },
+	{ _MMIO(0x9888), 0x25900183 },
+	{ _MMIO(0x9888), 0x27900184 },
+	{ _MMIO(0x9888), 0x29900185 },
+	{ _MMIO(0x9888), 0x2b900186 },
+	{ _MMIO(0x9888), 0x2d900187 },
+	{ _MMIO(0x9888), 0x2f900170 },
+	{ _MMIO(0x9888), 0x31900125 },
+	{ _MMIO(0x9888), 0x15900123 },
+	{ _MMIO(0x9888), 0x17900121 },
+	{ _MMIO(0x9888), 0x35900000 },
+	{ _MMIO(0x9888), 0x19908000 },
+	{ _MMIO(0x9888), 0x1b908000 },
+	{ _MMIO(0x9888), 0x1d908000 },
+	{ _MMIO(0x9888), 0x1f908000 },
+	{ _MMIO(0x9888), 0x53900000 },
+	{ _MMIO(0x9888), 0x43901084 },
+	{ _MMIO(0x9888), 0x55900000 },
+	{ _MMIO(0x9888), 0x47901080 },
+	{ _MMIO(0x9888), 0x57900000 },
+	{ _MMIO(0x9888), 0x49901084 },
+	{ _MMIO(0x9888), 0x59900000 },
+	{ _MMIO(0x9888), 0x4b901084 },
+	{ _MMIO(0x9888), 0x37900000 },
+	{ _MMIO(0x9888), 0x33900000 },
+	{ _MMIO(0x9888), 0x4d900004 },
+	{ _MMIO(0x9888), 0x45900000 },
+};
+
+static int
+get_memory_writes_mux_config(struct drm_i915_private *dev_priv,
+			     const struct i915_oa_reg **regs,
+			     int *lens)
+{
+	int n = 0;
+
+	BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
+	BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
+
+	regs[n] = mux_config_memory_writes;
+	lens[n] = ARRAY_SIZE(mux_config_memory_writes);
+	n++;
+
+	return n;
+}
+
+static const struct i915_oa_reg b_counter_config_compute_extended[] = {
+	{ _MMIO(0x2724), 0xf0800000 },
+	{ _MMIO(0x2720), 0x00000000 },
+	{ _MMIO(0x2714), 0xf0800000 },
+	{ _MMIO(0x2710), 0x00000000 },
+	{ _MMIO(0x2740), 0x00000000 },
+	{ _MMIO(0x2770), 0x0007fc2a },
+	{ _MMIO(0x2774), 0x0000bf00 },
+	{ _MMIO(0x2778), 0x0007fc6a },
+	{ _MMIO(0x277c), 0x0000bf00 },
+	{ _MMIO(0x2780), 0x0007fc92 },
+	{ _MMIO(0x2784), 0x0000bf00 },
+	{ _MMIO(0x2788), 0x0007fca2 },
+	{ _MMIO(0x278c), 0x0000bf00 },
+	{ _MMIO(0x2790), 0x0007fc32 },
+	{ _MMIO(0x2794), 0x0000bf00 },
+	{ _MMIO(0x2798), 0x0007fc9a },
+	{ _MMIO(0x279c), 0x0000bf00 },
+	{ _MMIO(0x27a0), 0x0007fe6a },
+	{ _MMIO(0x27a4), 0x0000bf00 },
+	{ _MMIO(0x27a8), 0x0007fe7a },
+	{ _MMIO(0x27ac), 0x0000bf00 },
+};
+
+static const struct i915_oa_reg flex_eu_config_compute_extended[] = {
+	{ _MMIO(0xe458), 0x00005004 },
+	{ _MMIO(0xe558), 0x00000003 },
+	{ _MMIO(0xe658), 0x00002001 },
+	{ _MMIO(0xe758), 0x00778008 },
+	{ _MMIO(0xe45c), 0x00088078 },
+	{ _MMIO(0xe55c), 0x00808708 },
+	{ _MMIO(0xe65c), 0x00a08908 },
+};
+
+static const struct i915_oa_reg mux_config_compute_extended[] = {
+	{ _MMIO(0x9888), 0x104f00e0 },
+	{ _MMIO(0x9888), 0x141c0160 },
+	{ _MMIO(0x9888), 0x161c0015 },
+	{ _MMIO(0x9888), 0x181c0120 },
+	{ _MMIO(0x9888), 0x002d5000 },
+	{ _MMIO(0x9888), 0x062d4000 },
+	{ _MMIO(0x9888), 0x082d5000 },
+	{ _MMIO(0x9888), 0x0a2d5000 },
+	{ _MMIO(0x9888), 0x0c2d5000 },
+	{ _MMIO(0x9888), 0x0e2d5000 },
+	{ _MMIO(0x9888), 0x022d5000 },
+	{ _MMIO(0x9888), 0x042d5000 },
+	{ _MMIO(0x9888), 0x0c2e5400 },
+	{ _MMIO(0x9888), 0x0e2e5515 },
+	{ _MMIO(0x9888), 0x102e0155 },
+	{ _MMIO(0x9888), 0x044cc000 },
+	{ _MMIO(0x9888), 0x0a4c8000 },
+	{ _MMIO(0x9888), 0x0c4cc000 },
+	{ _MMIO(0x9888), 0x0e4cc000 },
+	{ _MMIO(0x9888), 0x104c8000 },
+	{ _MMIO(0x9888), 0x124c8000 },
+	{ _MMIO(0x9888), 0x144c8000 },
+	{ _MMIO(0x9888), 0x164c2000 },
+	{ _MMIO(0x9888), 0x064cc000 },
+	{ _MMIO(0x9888), 0x084cc000 },
+	{ _MMIO(0x9888), 0x004ea000 },
+	{ _MMIO(0x9888), 0x064e8000 },
+	{ _MMIO(0x9888), 0x084ea000 },
+	{ _MMIO(0x9888), 0x0a4ea000 },
+	{ _MMIO(0x9888), 0x0c4ea000 },
+	{ _MMIO(0x9888), 0x0e4ea000 },
+	{ _MMIO(0x9888), 0x024ea000 },
+	{ _MMIO(0x9888), 0x044ea000 },
+	{ _MMIO(0x9888), 0x0e4f4b41 },
+	{ _MMIO(0x9888), 0x004f4200 },
+	{ _MMIO(0x9888), 0x024f404c },
+	{ _MMIO(0x9888), 0x1c4f0000 },
+	{ _MMIO(0x9888), 0x1a4f0000 },
+	{ _MMIO(0x9888), 0x001b4000 },
+	{ _MMIO(0x9888), 0x061b8000 },
+	{ _MMIO(0x9888), 0x081bc000 },
+	{ _MMIO(0x9888), 0x0a1bc000 },
+	{ _MMIO(0x9888), 0x0c1bc000 },
+	{ _MMIO(0x9888), 0x041bc000 },
+	{ _MMIO(0x9888), 0x001c0031 },
+	{ _MMIO(0x9888), 0x061c1900 },
+	{ _MMIO(0x9888), 0x081c1a33 },
+	{ _MMIO(0x9888), 0x0a1c1b35 },
+	{ _MMIO(0x9888), 0x0c1c3337 },
+	{ _MMIO(0x9888), 0x041c31c7 },
+	{ _MMIO(0x9888), 0x180f5000 },
+	{ _MMIO(0x9888), 0x1a0fa8aa },
+	{ _MMIO(0x9888), 0x1c0f0aaa },
+	{ _MMIO(0x9888), 0x182c8000 },
+	{ _MMIO(0x9888), 0x1c2c6aaa },
+	{ _MMIO(0x9888), 0x1e2c0001 },
+	{ _MMIO(0x9888), 0x1a2c2950 },
+	{ _MMIO(0x9888), 0x01938000 },
+	{ _MMIO(0x9888), 0x0f938000 },
+	{ _MMIO(0x9888), 0x1993aaaa },
+	{ _MMIO(0x9888), 0x03938000 },
+	{ _MMIO(0x9888), 0x05938000 },
+	{ _MMIO(0x9888), 0x07938000 },
+	{ _MMIO(0x9888), 0x09938000 },
+	{ _MMIO(0x9888), 0x0b938000 },
+	{ _MMIO(0x9888), 0x13904000 },
+	{ _MMIO(0x9888), 0x21904000 },
+	{ _MMIO(0x9888), 0x23904000 },
+	{ _MMIO(0x9888), 0x25904000 },
+	{ _MMIO(0x9888), 0x27904000 },
+	{ _MMIO(0x9888), 0x29904000 },
+	{ _MMIO(0x9888), 0x2b904000 },
+	{ _MMIO(0x9888), 0x2d904000 },
+	{ _MMIO(0x9888), 0x2f904000 },
+	{ _MMIO(0x9888), 0x31904000 },
+	{ _MMIO(0x9888), 0x15904000 },
+	{ _MMIO(0x9888), 0x17904000 },
+	{ _MMIO(0x9888), 0x19904000 },
+	{ _MMIO(0x9888), 0x1b904000 },
+	{ _MMIO(0x9888), 0x1d904000 },
+	{ _MMIO(0x9888), 0x53900000 },
+	{ _MMIO(0x9888), 0x43900420 },
+	{ _MMIO(0x9888), 0x55900000 },
+	{ _MMIO(0x9888), 0x47900000 },
+	{ _MMIO(0x9888), 0x57900000 },
+	{ _MMIO(0x9888), 0x49900000 },
+	{ _MMIO(0x9888), 0x59900000 },
+	{ _MMIO(0x9888), 0x4b900400 },
+	{ _MMIO(0x9888), 0x37900000 },
+	{ _MMIO(0x9888), 0x33900000 },
+	{ _MMIO(0x9888), 0x4d900001 },
+	{ _MMIO(0x9888), 0x45900001 },
+};
+
+static int
+get_compute_extended_mux_config(struct drm_i915_private *dev_priv,
+				const struct i915_oa_reg **regs,
+				int *lens)
+{
+	int n = 0;
+
+	BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
+	BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
+
+	regs[n] = mux_config_compute_extended;
+	lens[n] = ARRAY_SIZE(mux_config_compute_extended);
+	n++;
+
+	return n;
+}
+
+static const struct i915_oa_reg b_counter_config_compute_l3_cache[] = {
+	{ _MMIO(0x2710), 0x00000000 },
+	{ _MMIO(0x2714), 0x30800000 },
+	{ _MMIO(0x2720), 0x00000000 },
+	{ _MMIO(0x2724), 0x30800000 },
+	{ _MMIO(0x2740), 0x00000000 },
+	{ _MMIO(0x2770), 0x0007fffa },
+	{ _MMIO(0x2774), 0x0000fefe },
+	{ _MMIO(0x2778), 0x0007fffa },
+	{ _MMIO(0x277c), 0x0000fefd },
+	{ _MMIO(0x2790), 0x0007fffa },
+	{ _MMIO(0x2794), 0x0000fbef },
+	{ _MMIO(0x2798), 0x0007fffa },
+	{ _MMIO(0x279c), 0x0000fbdf },
+};
+
+static const struct i915_oa_reg flex_eu_config_compute_l3_cache[] = {
+	{ _MMIO(0xe458), 0x00005004 },
+	{ _MMIO(0xe558), 0x00000003 },
+	{ _MMIO(0xe658), 0x00002001 },
+	{ _MMIO(0xe758), 0x00101100 },
+	{ _MMIO(0xe45c), 0x00201200 },
+	{ _MMIO(0xe55c), 0x00301300 },
+	{ _MMIO(0xe65c), 0x00401400 },
+};
+
+static const struct i915_oa_reg mux_config_compute_l3_cache[] = {
+	{ _MMIO(0x9888), 0x166c03b0 },
+	{ _MMIO(0x9888), 0x1593001e },
+	{ _MMIO(0x9888), 0x3f900c00 },
+	{ _MMIO(0x9888), 0x41900000 },
+	{ _MMIO(0x9888), 0x002d1000 },
+	{ _MMIO(0x9888), 0x062d4000 },
+	{ _MMIO(0x9888), 0x082d5000 },
+	{ _MMIO(0x9888), 0x0e2d5000 },
+	{ _MMIO(0x9888), 0x0c2e0400 },
+	{ _MMIO(0x9888), 0x0e2e1500 },
+	{ _MMIO(0x9888), 0x102e0140 },
+	{ _MMIO(0x9888), 0x044c4000 },
+	{ _MMIO(0x9888), 0x0a4c8000 },
+	{ _MMIO(0x9888), 0x0c4cc000 },
+	{ _MMIO(0x9888), 0x144c8000 },
+	{ _MMIO(0x9888), 0x164c2000 },
+	{ _MMIO(0x9888), 0x004e2000 },
+	{ _MMIO(0x9888), 0x064e8000 },
+	{ _MMIO(0x9888), 0x084ea000 },
+	{ _MMIO(0x9888), 0x0e4ea000 },
+	{ _MMIO(0x9888), 0x1a4f4001 },
+	{ _MMIO(0x9888), 0x1c4f5005 },
+	{ _MMIO(0x9888), 0x006c0051 },
+	{ _MMIO(0x9888), 0x066c5000 },
+	{ _MMIO(0x9888), 0x086c5c5d },
+	{ _MMIO(0x9888), 0x0e6c5e5f },
+	{ _MMIO(0x9888), 0x106c0000 },
+	{ _MMIO(0x9888), 0x146c0000 },
+	{ _MMIO(0x9888), 0x1a6c0000 },
+	{ _MMIO(0x9888), 0x1c6c0000 },
+	{ _MMIO(0x9888), 0x180f1000 },
+	{ _MMIO(0x9888), 0x1a0fa800 },
+	{ _MMIO(0x9888), 0x1c0f0a00 },
+	{ _MMIO(0x9888), 0x182c4000 },
+	{ _MMIO(0x9888), 0x1c2c4015 },
+	{ _MMIO(0x9888), 0x1e2c0001 },
+	{ _MMIO(0x9888), 0x03931980 },
+	{ _MMIO(0x9888), 0x05930032 },
+	{ _MMIO(0x9888), 0x11930000 },
+	{ _MMIO(0x9888), 0x01938000 },
+	{ _MMIO(0x9888), 0x0f938000 },
+	{ _MMIO(0x9888), 0x1993a00a },
+	{ _MMIO(0x9888), 0x07930000 },
+	{ _MMIO(0x9888), 0x09930000 },
+	{ _MMIO(0x9888), 0x1d900177 },
+	{ _MMIO(0x9888), 0x1f900178 },
+	{ _MMIO(0x9888), 0x35900000 },
+	{ _MMIO(0x9888), 0x13904000 },
+	{ _MMIO(0x9888), 0x21904000 },
+	{ _MMIO(0x9888), 0x23904000 },
+	{ _MMIO(0x9888), 0x25904000 },
+	{ _MMIO(0x9888), 0x2f904000 },
+	{ _MMIO(0x9888), 0x31904000 },
+	{ _MMIO(0x9888), 0x19904000 },
+	{ _MMIO(0x9888), 0x1b904000 },
+	{ _MMIO(0x9888), 0x53901000 },
+	{ _MMIO(0x9888), 0x43900000 },
+	{ _MMIO(0x9888), 0x55900111 },
+	{ _MMIO(0x9888), 0x47900001 },
+	{ _MMIO(0x9888), 0x57900000 },
+	{ _MMIO(0x9888), 0x49900000 },
+	{ _MMIO(0x9888), 0x37900000 },
+	{ _MMIO(0x9888), 0x33900000 },
+	{ _MMIO(0x9888), 0x59900000 },
+	{ _MMIO(0x9888), 0x4b900000 },
+	{ _MMIO(0x9888), 0x4d900000 },
+	{ _MMIO(0x9888), 0x45900400 },
+};
+
+static int
+get_compute_l3_cache_mux_config(struct drm_i915_private *dev_priv,
+				const struct i915_oa_reg **regs,
+				int *lens)
+{
+	int n = 0;
+
+	BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
+	BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
+
+	regs[n] = mux_config_compute_l3_cache;
+	lens[n] = ARRAY_SIZE(mux_config_compute_l3_cache);
+	n++;
+
+	return n;
+}
+
+static const struct i915_oa_reg b_counter_config_hdc_and_sf[] = {
+	{ _MMIO(0x2740), 0x00000000 },
+	{ _MMIO(0x2744), 0x00800000 },
+	{ _MMIO(0x2710), 0x00000000 },
+	{ _MMIO(0x2714), 0x10800000 },
+	{ _MMIO(0x2720), 0x00000000 },
+	{ _MMIO(0x2724), 0x00800000 },
+	{ _MMIO(0x2770), 0x00000002 },
+	{ _MMIO(0x2774), 0x0000fdff },
+};
+
+static const struct i915_oa_reg flex_eu_config_hdc_and_sf[] = {
+	{ _MMIO(0xe458), 0x00005004 },
+	{ _MMIO(0xe558), 0x00010003 },
+	{ _MMIO(0xe658), 0x00012011 },
+	{ _MMIO(0xe758), 0x00015014 },
+	{ _MMIO(0xe45c), 0x00051050 },
+	{ _MMIO(0xe55c), 0x00053052 },
+	{ _MMIO(0xe65c), 0x00055054 },
+};
+
+static const struct i915_oa_reg mux_config_hdc_and_sf[] = {
+	{ _MMIO(0x9888), 0x104f0232 },
+	{ _MMIO(0x9888), 0x124f4640 },
+	{ _MMIO(0x9888), 0x11834400 },
+	{ _MMIO(0x9888), 0x022d4000 },
+	{ _MMIO(0x9888), 0x042d5000 },
+	{ _MMIO(0x9888), 0x062d1000 },
+	{ _MMIO(0x9888), 0x0e2e0055 },
+	{ _MMIO(0x9888), 0x064c8000 },
+	{ _MMIO(0x9888), 0x084cc000 },
+	{ _MMIO(0x9888), 0x0a4c4000 },
+	{ _MMIO(0x9888), 0x024e8000 },
+	{ _MMIO(0x9888), 0x044ea000 },
+	{ _MMIO(0x9888), 0x064e2000 },
+	{ _MMIO(0x9888), 0x024f6100 },
+	{ _MMIO(0x9888), 0x044f416b },
+	{ _MMIO(0x9888), 0x064f004b },
+	{ _MMIO(0x9888), 0x1a4f0000 },
+	{ _MMIO(0x9888), 0x1a0f02a8 },
+	{ _MMIO(0x9888), 0x1a2c5500 },
+	{ _MMIO(0x9888), 0x0f808000 },
+	{ _MMIO(0x9888), 0x25810020 },
+	{ _MMIO(0x9888), 0x0f8305c0 },
+	{ _MMIO(0x9888), 0x07938000 },
+	{ _MMIO(0x9888), 0x09938000 },
+	{ _MMIO(0x9888), 0x0b938000 },
+	{ _MMIO(0x9888), 0x0d938000 },
+	{ _MMIO(0x9888), 0x1f951000 },
+	{ _MMIO(0x9888), 0x13920200 },
+	{ _MMIO(0x9888), 0x31908000 },
+	{ _MMIO(0x9888), 0x19904000 },
+	{ _MMIO(0x9888), 0x1b904000 },
+	{ _MMIO(0x9888), 0x1d904000 },
+	{ _MMIO(0x9888), 0x1f904000 },
+	{ _MMIO(0x9888), 0x37900000 },
+	{ _MMIO(0x9888), 0x59900000 },
+	{ _MMIO(0x9888), 0x4d900003 },
+	{ _MMIO(0x9888), 0x53900000 },
+	{ _MMIO(0x9888), 0x45900000 },
+	{ _MMIO(0x9888), 0x55900000 },
+	{ _MMIO(0x9888), 0x47900000 },
+	{ _MMIO(0x9888), 0x33900000 },
+};
+
+static int
+get_hdc_and_sf_mux_config(struct drm_i915_private *dev_priv,
+			  const struct i915_oa_reg **regs,
+			  int *lens)
+{
+	int n = 0;
+
+	BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
+	BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
+
+	regs[n] = mux_config_hdc_and_sf;
+	lens[n] = ARRAY_SIZE(mux_config_hdc_and_sf);
+	n++;
+
+	return n;
+}
+
+static const struct i915_oa_reg b_counter_config_l3_1[] = {
+	{ _MMIO(0x2740), 0x00000000 },
+	{ _MMIO(0x2744), 0x00800000 },
+	{ _MMIO(0x2710), 0x00000000 },
+	{ _MMIO(0x2714), 0xf0800000 },
+	{ _MMIO(0x2720), 0x00000000 },
+	{ _MMIO(0x2724), 0xf0800000 },
+	{ _MMIO(0x2770), 0x00100070 },
+	{ _MMIO(0x2774), 0x0000fff1 },
+	{ _MMIO(0x2778), 0x00014002 },
+	{ _MMIO(0x277c), 0x0000c3ff },
+	{ _MMIO(0x2780), 0x00010002 },
+	{ _MMIO(0x2784), 0x0000c7ff },
+	{ _MMIO(0x2788), 0x00004002 },
+	{ _MMIO(0x278c), 0x0000d3ff },
+	{ _MMIO(0x2790), 0x00100700 },
+	{ _MMIO(0x2794), 0x0000ff1f },
+	{ _MMIO(0x2798), 0x00001402 },
+	{ _MMIO(0x279c), 0x0000fc3f },
+	{ _MMIO(0x27a0), 0x00001002 },
+	{ _MMIO(0x27a4), 0x0000fc7f },
+	{ _MMIO(0x27a8), 0x00000402 },
+	{ _MMIO(0x27ac), 0x0000fd3f },
+};
+
+static const struct i915_oa_reg flex_eu_config_l3_1[] = {
+	{ _MMIO(0xe458), 0x00005004 },
+	{ _MMIO(0xe558), 0x00010003 },
+	{ _MMIO(0xe658), 0x00012011 },
+	{ _MMIO(0xe758), 0x00015014 },
+	{ _MMIO(0xe45c), 0x00051050 },
+	{ _MMIO(0xe55c), 0x00053052 },
+	{ _MMIO(0xe65c), 0x00055054 },
+};
+
+static const struct i915_oa_reg mux_config_l3_1[] = {
+	{ _MMIO(0x9888), 0x12643400 },
+	{ _MMIO(0x9888), 0x12653400 },
+	{ _MMIO(0x9888), 0x106c6800 },
+	{ _MMIO(0x9888), 0x126c001e },
+	{ _MMIO(0x9888), 0x166c0010 },
+	{ _MMIO(0x9888), 0x0c2d5000 },
+	{ _MMIO(0x9888), 0x0e2d5000 },
+	{ _MMIO(0x9888), 0x002d4000 },
+	{ _MMIO(0x9888), 0x022d5000 },
+	{ _MMIO(0x9888), 0x042d5000 },
+	{ _MMIO(0x9888), 0x062d1000 },
+	{ _MMIO(0x9888), 0x102e0154 },
+	{ _MMIO(0x9888), 0x0c2e5000 },
+	{ _MMIO(0x9888), 0x0e2e0055 },
+	{ _MMIO(0x9888), 0x104c8000 },
+	{ _MMIO(0x9888), 0x124c8000 },
+	{ _MMIO(0x9888), 0x144c8000 },
+	{ _MMIO(0x9888), 0x164c2000 },
+	{ _MMIO(0x9888), 0x044c8000 },
+	{ _MMIO(0x9888), 0x064cc000 },
+	{ _MMIO(0x9888), 0x084cc000 },
+	{ _MMIO(0x9888), 0x0a4c4000 },
+	{ _MMIO(0x9888), 0x0c4ea000 },
+	{ _MMIO(0x9888), 0x0e4ea000 },
+	{ _MMIO(0x9888), 0x004e8000 },
+	{ _MMIO(0x9888), 0x024ea000 },
+	{ _MMIO(0x9888), 0x044ea000 },
+	{ _MMIO(0x9888), 0x064e2000 },
+	{ _MMIO(0x9888), 0x1c4f5500 },
+	{ _MMIO(0x9888), 0x1a4f1554 },
+	{ _MMIO(0x9888), 0x0a640024 },
+	{ _MMIO(0x9888), 0x10640000 },
+	{ _MMIO(0x9888), 0x04640000 },
+	{ _MMIO(0x9888), 0x0c650024 },
+	{ _MMIO(0x9888), 0x10650000 },
+	{ _MMIO(0x9888), 0x06650000 },
+	{ _MMIO(0x9888), 0x0c6c5327 },
+	{ _MMIO(0x9888), 0x0e6c5425 },
+	{ _MMIO(0x9888), 0x006c2a00 },
+	{ _MMIO(0x9888), 0x026c285b },
+	{ _MMIO(0x9888), 0x046c005c },
+	{ _MMIO(0x9888), 0x1c6c0000 },
+	{ _MMIO(0x9888), 0x1a6c0900 },
+	{ _MMIO(0x9888), 0x1c0f0aa0 },
+	{ _MMIO(0x9888), 0x180f4000 },
+	{ _MMIO(0x9888), 0x1a0f02aa },
+	{ _MMIO(0x9888), 0x1c2c5400 },
+	{ _MMIO(0x9888), 0x1e2c0001 },
+	{ _MMIO(0x9888), 0x1a2c5550 },
+	{ _MMIO(0x9888), 0x1993aa00 },
+	{ _MMIO(0x9888), 0x03938000 },
+	{ _MMIO(0x9888), 0x05938000 },
+	{ _MMIO(0x9888), 0x07938000 },
+	{ _MMIO(0x9888), 0x09938000 },
+	{ _MMIO(0x9888), 0x0b938000 },
+	{ _MMIO(0x9888), 0x0d938000 },
+	{ _MMIO(0x9888), 0x2b904000 },
+	{ _MMIO(0x9888), 0x2d904000 },
+	{ _MMIO(0x9888), 0x2f904000 },
+	{ _MMIO(0x9888), 0x31904000 },
+	{ _MMIO(0x9888), 0x15904000 },
+	{ _MMIO(0x9888), 0x17904000 },
+	{ _MMIO(0x9888), 0x19904000 },
+	{ _MMIO(0x9888), 0x1b904000 },
+	{ _MMIO(0x9888), 0x1d904000 },
+	{ _MMIO(0x9888), 0x1f904000 },
+	{ _MMIO(0x9888), 0x59900000 },
+	{ _MMIO(0x9888), 0x4b900421 },
+	{ _MMIO(0x9888), 0x37900000 },
+	{ _MMIO(0x9888), 0x33900000 },
+	{ _MMIO(0x9888), 0x4d900001 },
+	{ _MMIO(0x9888), 0x53900000 },
+	{ _MMIO(0x9888), 0x43900420 },
+	{ _MMIO(0x9888), 0x45900021 },
+	{ _MMIO(0x9888), 0x55900000 },
+	{ _MMIO(0x9888), 0x47900000 },
+};
+
+static int
+get_l3_1_mux_config(struct drm_i915_private *dev_priv,
+		    const struct i915_oa_reg **regs,
+		    int *lens)
+{
+	int n = 0;
+
+	BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
+	BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
+
+	regs[n] = mux_config_l3_1;
+	lens[n] = ARRAY_SIZE(mux_config_l3_1);
+	n++;
+
+	return n;
+}
+
+static const struct i915_oa_reg b_counter_config_rasterizer_and_pixel_backend[] = {
+	{ _MMIO(0x2740), 0x00000000 },
+	{ _MMIO(0x2744), 0x00800000 },
+	{ _MMIO(0x2710), 0x00000000 },
+	{ _MMIO(0x2714), 0x30800000 },
+	{ _MMIO(0x2720), 0x00000000 },
+	{ _MMIO(0x2724), 0x00800000 },
+	{ _MMIO(0x2770), 0x00000002 },
+	{ _MMIO(0x2774), 0x0000efff },
+	{ _MMIO(0x2778), 0x00006000 },
+	{ _MMIO(0x277c), 0x0000f3ff },
+};
+
+static const struct i915_oa_reg flex_eu_config_rasterizer_and_pixel_backend[] = {
+	{ _MMIO(0xe458), 0x00005004 },
+	{ _MMIO(0xe558), 0x00010003 },
+	{ _MMIO(0xe658), 0x00012011 },
+	{ _MMIO(0xe758), 0x00015014 },
+	{ _MMIO(0xe45c), 0x00051050 },
+	{ _MMIO(0xe55c), 0x00053052 },
+	{ _MMIO(0xe65c), 0x00055054 },
+};
+
+static const struct i915_oa_reg mux_config_rasterizer_and_pixel_backend[] = {
+	{ _MMIO(0x9888), 0x102d7800 },
+	{ _MMIO(0x9888), 0x122d79e0 },
+	{ _MMIO(0x9888), 0x0c2f0004 },
+	{ _MMIO(0x9888), 0x100e3800 },
+	{ _MMIO(0x9888), 0x180f0005 },
+	{ _MMIO(0x9888), 0x002d0940 },
+	{ _MMIO(0x9888), 0x022d802f },
+	{ _MMIO(0x9888), 0x042d4013 },
+	{ _MMIO(0x9888), 0x062d1000 },
+	{ _MMIO(0x9888), 0x0e2e0050 },
+	{ _MMIO(0x9888), 0x022f0010 },
+	{ _MMIO(0x9888), 0x002f0000 },
+	{ _MMIO(0x9888), 0x084c8000 },
+	{ _MMIO(0x9888), 0x0a4c4000 },
+	{ _MMIO(0x9888), 0x044e8000 },
+	{ _MMIO(0x9888), 0x064e2000 },
+	{ _MMIO(0x9888), 0x040e0480 },
+	{ _MMIO(0x9888), 0x000e0000 },
+	{ _MMIO(0x9888), 0x060f0027 },
+	{ _MMIO(0x9888), 0x100f0000 },
+	{ _MMIO(0x9888), 0x1a0f0040 },
+	{ _MMIO(0x9888), 0x03938000 },
+	{ _MMIO(0x9888), 0x05938000 },
+	{ _MMIO(0x9888), 0x07938000 },
+	{ _MMIO(0x9888), 0x09938000 },
+	{ _MMIO(0x9888), 0x0b938000 },
+	{ _MMIO(0x9888), 0x0d938000 },
+	{ _MMIO(0x9888), 0x15904000 },
+	{ _MMIO(0x9888), 0x17904000 },
+	{ _MMIO(0x9888), 0x19904000 },
+	{ _MMIO(0x9888), 0x1b904000 },
+	{ _MMIO(0x9888), 0x1d904000 },
+	{ _MMIO(0x9888), 0x1f904000 },
+	{ _MMIO(0x9888), 0x37900000 },
+	{ _MMIO(0x9888), 0x53900000 },
+	{ _MMIO(0x9888), 0x439014a0 },
+	{ _MMIO(0x9888), 0x459000a4 },
+	{ _MMIO(0x9888), 0x55900000 },
+	{ _MMIO(0x9888), 0x47900001 },
+	{ _MMIO(0x9888), 0x33900000 },
+};
+
+static int
+get_rasterizer_and_pixel_backend_mux_config(struct drm_i915_private *dev_priv,
+					    const struct i915_oa_reg **regs,
+					    int *lens)
+{
+	int n = 0;
+
+	BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
+	BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
+
+	regs[n] = mux_config_rasterizer_and_pixel_backend;
+	lens[n] = ARRAY_SIZE(mux_config_rasterizer_and_pixel_backend);
+	n++;
+
+	return n;
+}
+
+static const struct i915_oa_reg b_counter_config_sampler[] = {
+	{ _MMIO(0x2740), 0x00000000 },
+	{ _MMIO(0x2744), 0x00800000 },
+	{ _MMIO(0x2710), 0x00000000 },
+	{ _MMIO(0x2714), 0x70800000 },
+	{ _MMIO(0x2720), 0x00000000 },
+	{ _MMIO(0x2724), 0x00800000 },
+	{ _MMIO(0x2770), 0x0000c000 },
+	{ _MMIO(0x2774), 0x0000e7ff },
+	{ _MMIO(0x2778), 0x00003000 },
+	{ _MMIO(0x277c), 0x0000f9ff },
+	{ _MMIO(0x2780), 0x00000c00 },
+	{ _MMIO(0x2784), 0x0000fe7f },
+};
+
+static const struct i915_oa_reg flex_eu_config_sampler[] = {
+	{ _MMIO(0xe458), 0x00005004 },
+	{ _MMIO(0xe558), 0x00010003 },
+	{ _MMIO(0xe658), 0x00012011 },
+	{ _MMIO(0xe758), 0x00015014 },
+	{ _MMIO(0xe45c), 0x00051050 },
+	{ _MMIO(0xe55c), 0x00053052 },
+	{ _MMIO(0xe65c), 0x00055054 },
+};
+
+static const struct i915_oa_reg mux_config_sampler[] = {
+	{ _MMIO(0x9888), 0x121300a0 },
+	{ _MMIO(0x9888), 0x141600ab },
+	{ _MMIO(0x9888), 0x123300a0 },
+	{ _MMIO(0x9888), 0x143600ab },
+	{ _MMIO(0x9888), 0x125300a0 },
+	{ _MMIO(0x9888), 0x145600ab },
+	{ _MMIO(0x9888), 0x0c2d4000 },
+	{ _MMIO(0x9888), 0x0e2d5000 },
+	{ _MMIO(0x9888), 0x002d4000 },
+	{ _MMIO(0x9888), 0x022d5000 },
+	{ _MMIO(0x9888), 0x042d5000 },
+	{ _MMIO(0x9888), 0x062d1000 },
+	{ _MMIO(0x9888), 0x102e01a0 },
+	{ _MMIO(0x9888), 0x0c2e5000 },
+	{ _MMIO(0x9888), 0x0e2e0065 },
+	{ _MMIO(0x9888), 0x164c2000 },
+	{ _MMIO(0x9888), 0x044c8000 },
+	{ _MMIO(0x9888), 0x064cc000 },
+	{ _MMIO(0x9888), 0x084c4000 },
+	{ _MMIO(0x9888), 0x0a4c4000 },
+	{ _MMIO(0x9888), 0x0e4e8000 },
+	{ _MMIO(0x9888), 0x004e8000 },
+	{ _MMIO(0x9888), 0x024ea000 },
+	{ _MMIO(0x9888), 0x044e2000 },
+	{ _MMIO(0x9888), 0x064e2000 },
+	{ _MMIO(0x9888), 0x1c0f0800 },
+	{ _MMIO(0x9888), 0x180f4000 },
+	{ _MMIO(0x9888), 0x1a0f023f },
+	{ _MMIO(0x9888), 0x1e2c0003 },
+	{ _MMIO(0x9888), 0x1a2cc030 },
+	{ _MMIO(0x9888), 0x04132180 },
+	{ _MMIO(0x9888), 0x02130000 },
+	{ _MMIO(0x9888), 0x0c148000 },
+	{ _MMIO(0x9888), 0x0e142000 },
+	{ _MMIO(0x9888), 0x04148000 },
+	{ _MMIO(0x9888), 0x1e150140 },
+	{ _MMIO(0x9888), 0x1c150040 },
+	{ _MMIO(0x9888), 0x0c163000 },
+	{ _MMIO(0x9888), 0x0e160068 },
+	{ _MMIO(0x9888), 0x10160000 },
+	{ _MMIO(0x9888), 0x18160000 },
+	{ _MMIO(0x9888), 0x0a164000 },
+	{ _MMIO(0x9888), 0x04330043 },
+	{ _MMIO(0x9888), 0x02330000 },
+	{ _MMIO(0x9888), 0x0234a000 },
+	{ _MMIO(0x9888), 0x04342000 },
+	{ _MMIO(0x9888), 0x1c350015 },
+	{ _MMIO(0x9888), 0x02363460 },
+	{ _MMIO(0x9888), 0x10360000 },
+	{ _MMIO(0x9888), 0x04360000 },
+	{ _MMIO(0x9888), 0x06360000 },
+	{ _MMIO(0x9888), 0x08364000 },
+	{ _MMIO(0x9888), 0x06530043 },
+	{ _MMIO(0x9888), 0x02530000 },
+	{ _MMIO(0x9888), 0x0e548000 },
+	{ _MMIO(0x9888), 0x00548000 },
+	{ _MMIO(0x9888), 0x06542000 },
+	{ _MMIO(0x9888), 0x1e550400 },
+	{ _MMIO(0x9888), 0x1a552000 },
+	{ _MMIO(0x9888), 0x1c550100 },
+	{ _MMIO(0x9888), 0x0e563000 },
+	{ _MMIO(0x9888), 0x00563400 },
+	{ _MMIO(0x9888), 0x10560000 },
+	{ _MMIO(0x9888), 0x18560000 },
+	{ _MMIO(0x9888), 0x02560000 },
+	{ _MMIO(0x9888), 0x0c564000 },
+	{ _MMIO(0x9888), 0x1993a800 },
+	{ _MMIO(0x9888), 0x03938000 },
+	{ _MMIO(0x9888), 0x05938000 },
+	{ _MMIO(0x9888), 0x07938000 },
+	{ _MMIO(0x9888), 0x09938000 },
+	{ _MMIO(0x9888), 0x0b938000 },
+	{ _MMIO(0x9888), 0x0d938000 },
+	{ _MMIO(0x9888), 0x2d904000 },
+	{ _MMIO(0x9888), 0x2f904000 },
+	{ _MMIO(0x9888), 0x31904000 },
+	{ _MMIO(0x9888), 0x15904000 },
+	{ _MMIO(0x9888), 0x17904000 },
+	{ _MMIO(0x9888), 0x19904000 },
+	{ _MMIO(0x9888), 0x1b904000 },
+	{ _MMIO(0x9888), 0x1d904000 },
+	{ _MMIO(0x9888), 0x1f904000 },
+	{ _MMIO(0x9888), 0x59900000 },
+	{ _MMIO(0x9888), 0x4b9014a0 },
+	{ _MMIO(0x9888), 0x37900000 },
+	{ _MMIO(0x9888), 0x33900000 },
+	{ _MMIO(0x9888), 0x4d900001 },
+	{ _MMIO(0x9888), 0x53900000 },
+	{ _MMIO(0x9888), 0x43900820 },
+	{ _MMIO(0x9888), 0x45901022 },
+	{ _MMIO(0x9888), 0x55900000 },
+	{ _MMIO(0x9888), 0x47900000 },
+};
+
+static int
+get_sampler_mux_config(struct drm_i915_private *dev_priv,
+		       const struct i915_oa_reg **regs,
+		       int *lens)
+{
+	int n = 0;
+
+	BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
+	BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
+
+	regs[n] = mux_config_sampler;
+	lens[n] = ARRAY_SIZE(mux_config_sampler);
+	n++;
+
+	return n;
+}
+
+static const struct i915_oa_reg b_counter_config_tdl_1[] = {
+	{ _MMIO(0x2740), 0x00000000 },
+	{ _MMIO(0x2744), 0x00800000 },
+	{ _MMIO(0x2710), 0x00000000 },
+	{ _MMIO(0x2714), 0xf0800000 },
+	{ _MMIO(0x2720), 0x00000000 },
+	{ _MMIO(0x2724), 0x30800000 },
+	{ _MMIO(0x2770), 0x00000002 },
+	{ _MMIO(0x2774), 0x00007fff },
+	{ _MMIO(0x2778), 0x00000000 },
+	{ _MMIO(0x277c), 0x00009fff },
+	{ _MMIO(0x2780), 0x00000002 },
+	{ _MMIO(0x2784), 0x0000efff },
+	{ _MMIO(0x2788), 0x00000000 },
+	{ _MMIO(0x278c), 0x0000f3ff },
+	{ _MMIO(0x2790), 0x00000002 },
+	{ _MMIO(0x2794), 0x0000fdff },
+	{ _MMIO(0x2798), 0x00000000 },
+	{ _MMIO(0x279c), 0x0000fe7f },
+};
+
+static const struct i915_oa_reg flex_eu_config_tdl_1[] = {
+	{ _MMIO(0xe458), 0x00005004 },
+	{ _MMIO(0xe558), 0x00010003 },
+	{ _MMIO(0xe658), 0x00012011 },
+	{ _MMIO(0xe758), 0x00015014 },
+	{ _MMIO(0xe45c), 0x00051050 },
+	{ _MMIO(0xe55c), 0x00053052 },
+	{ _MMIO(0xe65c), 0x00055054 },
+};
+
+static const struct i915_oa_reg mux_config_tdl_1[] = {
+	{ _MMIO(0x9888), 0x141a0000 },
+	{ _MMIO(0x9888), 0x143a0000 },
+	{ _MMIO(0x9888), 0x145a0000 },
+	{ _MMIO(0x9888), 0x0c2d4000 },
+	{ _MMIO(0x9888), 0x0e2d5000 },
+	{ _MMIO(0x9888), 0x002d4000 },
+	{ _MMIO(0x9888), 0x022d5000 },
+	{ _MMIO(0x9888), 0x042d5000 },
+	{ _MMIO(0x9888), 0x062d1000 },
+	{ _MMIO(0x9888), 0x102e0150 },
+	{ _MMIO(0x9888), 0x0c2e5000 },
+	{ _MMIO(0x9888), 0x0e2e006a },
+	{ _MMIO(0x9888), 0x124c8000 },
+	{ _MMIO(0x9888), 0x144c8000 },
+	{ _MMIO(0x9888), 0x164c2000 },
+	{ _MMIO(0x9888), 0x044c8000 },
+	{ _MMIO(0x9888), 0x064c4000 },
+	{ _MMIO(0x9888), 0x0a4c4000 },
+	{ _MMIO(0x9888), 0x0c4e8000 },
+	{ _MMIO(0x9888), 0x0e4ea000 },
+	{ _MMIO(0x9888), 0x004e8000 },
+	{ _MMIO(0x9888), 0x024e2000 },
+	{ _MMIO(0x9888), 0x064e2000 },
+	{ _MMIO(0x9888), 0x1c0f0bc0 },
+	{ _MMIO(0x9888), 0x180f4000 },
+	{ _MMIO(0x9888), 0x1a0f0302 },
+	{ _MMIO(0x9888), 0x1e2c0003 },
+	{ _MMIO(0x9888), 0x1a2c00f0 },
+	{ _MMIO(0x9888), 0x021a3080 },
+	{ _MMIO(0x9888), 0x041a31e5 },
+	{ _MMIO(0x9888), 0x02148000 },
+	{ _MMIO(0x9888), 0x0414a000 },
+	{ _MMIO(0x9888), 0x1c150054 },
+	{ _MMIO(0x9888), 0x06168000 },
+	{ _MMIO(0x9888), 0x08168000 },
+	{ _MMIO(0x9888), 0x0a168000 },
+	{ _MMIO(0x9888), 0x0c3a3280 },
+	{ _MMIO(0x9888), 0x0e3a0063 },
+	{ _MMIO(0x9888), 0x063a0061 },
+	{ _MMIO(0x9888), 0x023a0000 },
+	{ _MMIO(0x9888), 0x0c348000 },
+	{ _MMIO(0x9888), 0x0e342000 },
+	{ _MMIO(0x9888), 0x06342000 },
+	{ _MMIO(0x9888), 0x1e350140 },
+	{ _MMIO(0x9888), 0x1c350100 },
+	{ _MMIO(0x9888), 0x18360028 },
+	{ _MMIO(0x9888), 0x0c368000 },
+	{ _MMIO(0x9888), 0x0e5a3080 },
+	{ _MMIO(0x9888), 0x005a3280 },
+	{ _MMIO(0x9888), 0x025a0063 },
+	{ _MMIO(0x9888), 0x0e548000 },
+	{ _MMIO(0x9888), 0x00548000 },
+	{ _MMIO(0x9888), 0x02542000 },
+	{ _MMIO(0x9888), 0x1e550400 },
+	{ _MMIO(0x9888), 0x1a552000 },
+	{ _MMIO(0x9888), 0x1c550001 },
+	{ _MMIO(0x9888), 0x18560080 },
+	{ _MMIO(0x9888), 0x02568000 },
+	{ _MMIO(0x9888), 0x04568000 },
+	{ _MMIO(0x9888), 0x1993a800 },
+	{ _MMIO(0x9888), 0x03938000 },
+	{ _MMIO(0x9888), 0x05938000 },
+	{ _MMIO(0x9888), 0x07938000 },
+	{ _MMIO(0x9888), 0x09938000 },
+	{ _MMIO(0x9888), 0x0b938000 },
+	{ _MMIO(0x9888), 0x0d938000 },
+	{ _MMIO(0x9888), 0x2d904000 },
+	{ _MMIO(0x9888), 0x2f904000 },
+	{ _MMIO(0x9888), 0x31904000 },
+	{ _MMIO(0x9888), 0x15904000 },
+	{ _MMIO(0x9888), 0x17904000 },
+	{ _MMIO(0x9888), 0x19904000 },
+	{ _MMIO(0x9888), 0x1b904000 },
+	{ _MMIO(0x9888), 0x1d904000 },
+	{ _MMIO(0x9888), 0x1f904000 },
+	{ _MMIO(0x9888), 0x59900000 },
+	{ _MMIO(0x9888), 0x4b900420 },
+	{ _MMIO(0x9888), 0x37900000 },
+	{ _MMIO(0x9888), 0x33900000 },
+	{ _MMIO(0x9888), 0x4d900000 },
+	{ _MMIO(0x9888), 0x53900000 },
+	{ _MMIO(0x9888), 0x43900000 },
+	{ _MMIO(0x9888), 0x45901084 },
+	{ _MMIO(0x9888), 0x55900000 },
+	{ _MMIO(0x9888), 0x47900001 },
+};
+
+static int
+get_tdl_1_mux_config(struct drm_i915_private *dev_priv,
+		     const struct i915_oa_reg **regs,
+		     int *lens)
+{
+	int n = 0;
+
+	BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
+	BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
+
+	regs[n] = mux_config_tdl_1;
+	lens[n] = ARRAY_SIZE(mux_config_tdl_1);
+	n++;
+
+	return n;
+}
+
+static const struct i915_oa_reg b_counter_config_tdl_2[] = {
+	{ _MMIO(0x2740), 0x00000000 },
+	{ _MMIO(0x2744), 0x00800000 },
+	{ _MMIO(0x2710), 0x00000000 },
+	{ _MMIO(0x2714), 0x00800000 },
+	{ _MMIO(0x2720), 0x00000000 },
+	{ _MMIO(0x2724), 0x00800000 },
+};
+
+static const struct i915_oa_reg flex_eu_config_tdl_2[] = {
+	{ _MMIO(0xe458), 0x00005004 },
+	{ _MMIO(0xe558), 0x00010003 },
+	{ _MMIO(0xe658), 0x00012011 },
+	{ _MMIO(0xe758), 0x00015014 },
+	{ _MMIO(0xe45c), 0x00051050 },
+	{ _MMIO(0xe55c), 0x00053052 },
+	{ _MMIO(0xe65c), 0x00055054 },
+};
+
+static const struct i915_oa_reg mux_config_tdl_2[] = {
+	{ _MMIO(0x9888), 0x141a026b },
+	{ _MMIO(0x9888), 0x143a0173 },
+	{ _MMIO(0x9888), 0x145a026b },
+	{ _MMIO(0x9888), 0x002d4000 },
+	{ _MMIO(0x9888), 0x022d5000 },
+	{ _MMIO(0x9888), 0x042d5000 },
+	{ _MMIO(0x9888), 0x062d1000 },
+	{ _MMIO(0x9888), 0x0c2e5000 },
+	{ _MMIO(0x9888), 0x0e2e0069 },
+	{ _MMIO(0x9888), 0x044c8000 },
+	{ _MMIO(0x9888), 0x064cc000 },
+	{ _MMIO(0x9888), 0x0a4c4000 },
+	{ _MMIO(0x9888), 0x004e8000 },
+	{ _MMIO(0x9888), 0x024ea000 },
+	{ _MMIO(0x9888), 0x064e2000 },
+	{ _MMIO(0x9888), 0x180f6000 },
+	{ _MMIO(0x9888), 0x1a0f030a },
+	{ _MMIO(0x9888), 0x1a2c03c0 },
+	{ _MMIO(0x9888), 0x041a37e7 },
+	{ _MMIO(0x9888), 0x021a0000 },
+	{ _MMIO(0x9888), 0x0414a000 },
+	{ _MMIO(0x9888), 0x1c150050 },
+	{ _MMIO(0x9888), 0x08168000 },
+	{ _MMIO(0x9888), 0x0a168000 },
+	{ _MMIO(0x9888), 0x003a3380 },
+	{ _MMIO(0x9888), 0x063a006f },
+	{ _MMIO(0x9888), 0x023a0000 },
+	{ _MMIO(0x9888), 0x00348000 },
+	{ _MMIO(0x9888), 0x06342000 },
+	{ _MMIO(0x9888), 0x1a352000 },
+	{ _MMIO(0x9888), 0x1c350100 },
+	{ _MMIO(0x9888), 0x02368000 },
+	{ _MMIO(0x9888), 0x0c368000 },
+	{ _MMIO(0x9888), 0x025a37e7 },
+	{ _MMIO(0x9888), 0x0254a000 },
+	{ _MMIO(0x9888), 0x1c550005 },
+	{ _MMIO(0x9888), 0x04568000 },
+	{ _MMIO(0x9888), 0x06568000 },
+	{ _MMIO(0x9888), 0x03938000 },
+	{ _MMIO(0x9888), 0x05938000 },
+	{ _MMIO(0x9888), 0x07938000 },
+	{ _MMIO(0x9888), 0x09938000 },
+	{ _MMIO(0x9888), 0x0b938000 },
+	{ _MMIO(0x9888), 0x0d938000 },
+	{ _MMIO(0x9888), 0x15904000 },
+	{ _MMIO(0x9888), 0x17904000 },
+	{ _MMIO(0x9888), 0x19904000 },
+	{ _MMIO(0x9888), 0x1b904000 },
+	{ _MMIO(0x9888), 0x1d904000 },
+	{ _MMIO(0x9888), 0x1f904000 },
+	{ _MMIO(0x9888), 0x37900000 },
+	{ _MMIO(0x9888), 0x53900000 },
+	{ _MMIO(0x9888), 0x43900020 },
+	{ _MMIO(0x9888), 0x45901080 },
+	{ _MMIO(0x9888), 0x55900000 },
+	{ _MMIO(0x9888), 0x47900001 },
+	{ _MMIO(0x9888), 0x33900000 },
+};
+
+static int
+get_tdl_2_mux_config(struct drm_i915_private *dev_priv,
+		     const struct i915_oa_reg **regs,
+		     int *lens)
+{
+	int n = 0;
+
+	BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
+	BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
+
+	regs[n] = mux_config_tdl_2;
+	lens[n] = ARRAY_SIZE(mux_config_tdl_2);
+	n++;
+
+	return n;
+}
+
+static const struct i915_oa_reg b_counter_config_compute_extra[] = {
+	{ _MMIO(0x2740), 0x00000000 },
+	{ _MMIO(0x2744), 0x00800000 },
+	{ _MMIO(0x2710), 0x00000000 },
+	{ _MMIO(0x2714), 0x00800000 },
+	{ _MMIO(0x2720), 0x00000000 },
+	{ _MMIO(0x2724), 0x00800000 },
+};
+
+static const struct i915_oa_reg flex_eu_config_compute_extra[] = {
+	{ _MMIO(0xe458), 0x00001000 },
+	{ _MMIO(0xe558), 0x00003002 },
+	{ _MMIO(0xe658), 0x00005004 },
+	{ _MMIO(0xe758), 0x00011010 },
+	{ _MMIO(0xe45c), 0x00050012 },
+	{ _MMIO(0xe55c), 0x00052051 },
+	{ _MMIO(0xe65c), 0x00000008 },
+};
+
+static const struct i915_oa_reg mux_config_compute_extra[] = {
+	{ _MMIO(0x9888), 0x141a001f },
+	{ _MMIO(0x9888), 0x143a001f },
+	{ _MMIO(0x9888), 0x145a001f },
+	{ _MMIO(0x9888), 0x042d5000 },
+	{ _MMIO(0x9888), 0x062d1000 },
+	{ _MMIO(0x9888), 0x0e2e0094 },
+	{ _MMIO(0x9888), 0x084cc000 },
+	{ _MMIO(0x9888), 0x044ea000 },
+	{ _MMIO(0x9888), 0x1a0f00e0 },
+	{ _MMIO(0x9888), 0x1a2c0c00 },
+	{ _MMIO(0x9888), 0x061a0063 },
+	{ _MMIO(0x9888), 0x021a0000 },
+	{ _MMIO(0x9888), 0x06142000 },
+	{ _MMIO(0x9888), 0x1c150100 },
+	{ _MMIO(0x9888), 0x0c168000 },
+	{ _MMIO(0x9888), 0x043a3180 },
+	{ _MMIO(0x9888), 0x023a0000 },
+	{ _MMIO(0x9888), 0x04348000 },
+	{ _MMIO(0x9888), 0x1c350040 },
+	{ _MMIO(0x9888), 0x0a368000 },
+	{ _MMIO(0x9888), 0x045a0063 },
+	{ _MMIO(0x9888), 0x025a0000 },
+	{ _MMIO(0x9888), 0x04542000 },
+	{ _MMIO(0x9888), 0x1c550010 },
+	{ _MMIO(0x9888), 0x08568000 },
+	{ _MMIO(0x9888), 0x09938000 },
+	{ _MMIO(0x9888), 0x0b938000 },
+	{ _MMIO(0x9888), 0x0d938000 },
+	{ _MMIO(0x9888), 0x1b904000 },
+	{ _MMIO(0x9888), 0x1d904000 },
+	{ _MMIO(0x9888), 0x1f904000 },
+	{ _MMIO(0x9888), 0x37900000 },
+	{ _MMIO(0x9888), 0x55900000 },
+	{ _MMIO(0x9888), 0x45900400 },
+	{ _MMIO(0x9888), 0x47900004 },
+	{ _MMIO(0x9888), 0x33900000 },
+};
+
+static int
+get_compute_extra_mux_config(struct drm_i915_private *dev_priv,
+			     const struct i915_oa_reg **regs,
+			     int *lens)
+{
+	int n = 0;
+
+	BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
+	BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
+
+	regs[n] = mux_config_compute_extra;
+	lens[n] = ARRAY_SIZE(mux_config_compute_extra);
+	n++;
+
+	return n;
+}
+
+static const struct i915_oa_reg b_counter_config_test_oa[] = {
+	{ _MMIO(0x2740), 0x00000000 },
+	{ _MMIO(0x2744), 0x00800000 },
+	{ _MMIO(0x2714), 0xf0800000 },
+	{ _MMIO(0x2710), 0x00000000 },
+	{ _MMIO(0x2724), 0xf0800000 },
+	{ _MMIO(0x2720), 0x00000000 },
+	{ _MMIO(0x2770), 0x00000004 },
+	{ _MMIO(0x2774), 0x00000000 },
+	{ _MMIO(0x2778), 0x00000003 },
+	{ _MMIO(0x277c), 0x00000000 },
+	{ _MMIO(0x2780), 0x00000007 },
+	{ _MMIO(0x2784), 0x00000000 },
+	{ _MMIO(0x2788), 0x00100002 },
+	{ _MMIO(0x278c), 0x0000fff7 },
+	{ _MMIO(0x2790), 0x00100002 },
+	{ _MMIO(0x2794), 0x0000ffcf },
+	{ _MMIO(0x2798), 0x00100082 },
+	{ _MMIO(0x279c), 0x0000ffef },
+	{ _MMIO(0x27a0), 0x001000c2 },
+	{ _MMIO(0x27a4), 0x0000ffe7 },
+	{ _MMIO(0x27a8), 0x00100001 },
+	{ _MMIO(0x27ac), 0x0000ffe7 },
+};
+
+static const struct i915_oa_reg flex_eu_config_test_oa[] = {
+};
+
+static const struct i915_oa_reg mux_config_test_oa[] = {
+	{ _MMIO(0x9888), 0x19800000 },
+	{ _MMIO(0x9888), 0x07800063 },
+	{ _MMIO(0x9888), 0x11800000 },
+	{ _MMIO(0x9888), 0x23810008 },
+	{ _MMIO(0x9888), 0x1d950400 },
+	{ _MMIO(0x9888), 0x0f922000 },
+	{ _MMIO(0x9888), 0x1f908000 },
+	{ _MMIO(0x9888), 0x37900000 },
+	{ _MMIO(0x9888), 0x55900000 },
+	{ _MMIO(0x9888), 0x47900000 },
+	{ _MMIO(0x9888), 0x33900000 },
+};
+
+static int
+get_test_oa_mux_config(struct drm_i915_private *dev_priv,
+		       const struct i915_oa_reg **regs,
+		       int *lens)
+{
+	int n = 0;
+
+	BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
+	BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
+
+	regs[n] = mux_config_test_oa;
+	lens[n] = ARRAY_SIZE(mux_config_test_oa);
+	n++;
+
+	return n;
+}
+
+int i915_oa_select_metric_set_glk(struct drm_i915_private *dev_priv)
+{
+	dev_priv->perf.oa.n_mux_configs = 0;
+	dev_priv->perf.oa.b_counter_regs = NULL;
+	dev_priv->perf.oa.b_counter_regs_len = 0;
+	dev_priv->perf.oa.flex_regs = NULL;
+	dev_priv->perf.oa.flex_regs_len = 0;
+
+	switch (dev_priv->perf.oa.metrics_set) {
+	case METRIC_SET_ID_RENDER_BASIC:
+		dev_priv->perf.oa.n_mux_configs =
+			get_render_basic_mux_config(dev_priv,
+						    dev_priv->perf.oa.mux_regs,
+						    dev_priv->perf.oa.mux_regs_lens);
+		if (dev_priv->perf.oa.n_mux_configs == 0) {
+			DRM_DEBUG_DRIVER("No suitable MUX config for \"RENDER_BASIC\" metric set\n");
+
+			/* EINVAL because *_register_sysfs already checked this
+			 * and so it wouldn't have been advertised to userspace and
+			 * so shouldn't have been requested
+			 */
+			return -EINVAL;
+		}
+
+		dev_priv->perf.oa.b_counter_regs =
+			b_counter_config_render_basic;
+		dev_priv->perf.oa.b_counter_regs_len =
+			ARRAY_SIZE(b_counter_config_render_basic);
+
+		dev_priv->perf.oa.flex_regs =
+			flex_eu_config_render_basic;
+		dev_priv->perf.oa.flex_regs_len =
+			ARRAY_SIZE(flex_eu_config_render_basic);
+
+		return 0;
+	case METRIC_SET_ID_COMPUTE_BASIC:
+		dev_priv->perf.oa.n_mux_configs =
+			get_compute_basic_mux_config(dev_priv,
+						     dev_priv->perf.oa.mux_regs,
+						     dev_priv->perf.oa.mux_regs_lens);
+		if (dev_priv->perf.oa.n_mux_configs == 0) {
+			DRM_DEBUG_DRIVER("No suitable MUX config for \"COMPUTE_BASIC\" metric set\n");
+
+			/* EINVAL because *_register_sysfs already checked this
+			 * and so it wouldn't have been advertised to userspace and
+			 * so shouldn't have been requested
+			 */
+			return -EINVAL;
+		}
+
+		dev_priv->perf.oa.b_counter_regs =
+			b_counter_config_compute_basic;
+		dev_priv->perf.oa.b_counter_regs_len =
+			ARRAY_SIZE(b_counter_config_compute_basic);
+
+		dev_priv->perf.oa.flex_regs =
+			flex_eu_config_compute_basic;
+		dev_priv->perf.oa.flex_regs_len =
+			ARRAY_SIZE(flex_eu_config_compute_basic);
+
+		return 0;
+	case METRIC_SET_ID_RENDER_PIPE_PROFILE:
+		dev_priv->perf.oa.n_mux_configs =
+			get_render_pipe_profile_mux_config(dev_priv,
+							   dev_priv->perf.oa.mux_regs,
+							   dev_priv->perf.oa.mux_regs_lens);
+		if (dev_priv->perf.oa.n_mux_configs == 0) {
+			DRM_DEBUG_DRIVER("No suitable MUX config for \"RENDER_PIPE_PROFILE\" metric set\n");
+
+			/* EINVAL because *_register_sysfs already checked this
+			 * and so it wouldn't have been advertised to userspace and
+			 * so shouldn't have been requested
+			 */
+			return -EINVAL;
+		}
+
+		dev_priv->perf.oa.b_counter_regs =
+			b_counter_config_render_pipe_profile;
+		dev_priv->perf.oa.b_counter_regs_len =
+			ARRAY_SIZE(b_counter_config_render_pipe_profile);
+
+		dev_priv->perf.oa.flex_regs =
+			flex_eu_config_render_pipe_profile;
+		dev_priv->perf.oa.flex_regs_len =
+			ARRAY_SIZE(flex_eu_config_render_pipe_profile);
+
+		return 0;
+	case METRIC_SET_ID_MEMORY_READS:
+		dev_priv->perf.oa.n_mux_configs =
+			get_memory_reads_mux_config(dev_priv,
+						    dev_priv->perf.oa.mux_regs,
+						    dev_priv->perf.oa.mux_regs_lens);
+		if (dev_priv->perf.oa.n_mux_configs == 0) {
+			DRM_DEBUG_DRIVER("No suitable MUX config for \"MEMORY_READS\" metric set\n");
+
+			/* EINVAL because *_register_sysfs already checked this
+			 * and so it wouldn't have been advertised to userspace and
+			 * so shouldn't have been requested
+			 */
+			return -EINVAL;
+		}
+
+		dev_priv->perf.oa.b_counter_regs =
+			b_counter_config_memory_reads;
+		dev_priv->perf.oa.b_counter_regs_len =
+			ARRAY_SIZE(b_counter_config_memory_reads);
+
+		dev_priv->perf.oa.flex_regs =
+			flex_eu_config_memory_reads;
+		dev_priv->perf.oa.flex_regs_len =
+			ARRAY_SIZE(flex_eu_config_memory_reads);
+
+		return 0;
+	case METRIC_SET_ID_MEMORY_WRITES:
+		dev_priv->perf.oa.n_mux_configs =
+			get_memory_writes_mux_config(dev_priv,
+						     dev_priv->perf.oa.mux_regs,
+						     dev_priv->perf.oa.mux_regs_lens);
+		if (dev_priv->perf.oa.n_mux_configs == 0) {
+			DRM_DEBUG_DRIVER("No suitable MUX config for \"MEMORY_WRITES\" metric set\n");
+
+			/* EINVAL because *_register_sysfs already checked this
+			 * and so it wouldn't have been advertised to userspace and
+			 * so shouldn't have been requested
+			 */
+			return -EINVAL;
+		}
+
+		dev_priv->perf.oa.b_counter_regs =
+			b_counter_config_memory_writes;
+		dev_priv->perf.oa.b_counter_regs_len =
+			ARRAY_SIZE(b_counter_config_memory_writes);
+
+		dev_priv->perf.oa.flex_regs =
+			flex_eu_config_memory_writes;
+		dev_priv->perf.oa.flex_regs_len =
+			ARRAY_SIZE(flex_eu_config_memory_writes);
+
+		return 0;
+	case METRIC_SET_ID_COMPUTE_EXTENDED:
+		dev_priv->perf.oa.n_mux_configs =
+			get_compute_extended_mux_config(dev_priv,
+							dev_priv->perf.oa.mux_regs,
+							dev_priv->perf.oa.mux_regs_lens);
+		if (dev_priv->perf.oa.n_mux_configs == 0) {
+			DRM_DEBUG_DRIVER("No suitable MUX config for \"COMPUTE_EXTENDED\" metric set\n");
+
+			/* EINVAL because *_register_sysfs already checked this
+			 * and so it wouldn't have been advertised to userspace and
+			 * so shouldn't have been requested
+			 */
+			return -EINVAL;
+		}
+
+		dev_priv->perf.oa.b_counter_regs =
+			b_counter_config_compute_extended;
+		dev_priv->perf.oa.b_counter_regs_len =
+			ARRAY_SIZE(b_counter_config_compute_extended);
+
+		dev_priv->perf.oa.flex_regs =
+			flex_eu_config_compute_extended;
+		dev_priv->perf.oa.flex_regs_len =
+			ARRAY_SIZE(flex_eu_config_compute_extended);
+
+		return 0;
+	case METRIC_SET_ID_COMPUTE_L3_CACHE:
+		dev_priv->perf.oa.n_mux_configs =
+			get_compute_l3_cache_mux_config(dev_priv,
+							dev_priv->perf.oa.mux_regs,
+							dev_priv->perf.oa.mux_regs_lens);
+		if (dev_priv->perf.oa.n_mux_configs == 0) {
+			DRM_DEBUG_DRIVER("No suitable MUX config for \"COMPUTE_L3_CACHE\" metric set\n");
+
+			/* EINVAL because *_register_sysfs already checked this
+			 * and so it wouldn't have been advertised to userspace and
+			 * so shouldn't have been requested
+			 */
+			return -EINVAL;
+		}
+
+		dev_priv->perf.oa.b_counter_regs =
+			b_counter_config_compute_l3_cache;
+		dev_priv->perf.oa.b_counter_regs_len =
+			ARRAY_SIZE(b_counter_config_compute_l3_cache);
+
+		dev_priv->perf.oa.flex_regs =
+			flex_eu_config_compute_l3_cache;
+		dev_priv->perf.oa.flex_regs_len =
+			ARRAY_SIZE(flex_eu_config_compute_l3_cache);
+
+		return 0;
+	case METRIC_SET_ID_HDC_AND_SF:
+		dev_priv->perf.oa.n_mux_configs =
+			get_hdc_and_sf_mux_config(dev_priv,
+						  dev_priv->perf.oa.mux_regs,
+						  dev_priv->perf.oa.mux_regs_lens);
+		if (dev_priv->perf.oa.n_mux_configs == 0) {
+			DRM_DEBUG_DRIVER("No suitable MUX config for \"HDC_AND_SF\" metric set\n");
+
+			/* EINVAL because *_register_sysfs already checked this
+			 * and so it wouldn't have been advertised to userspace and
+			 * so shouldn't have been requested
+			 */
+			return -EINVAL;
+		}
+
+		dev_priv->perf.oa.b_counter_regs =
+			b_counter_config_hdc_and_sf;
+		dev_priv->perf.oa.b_counter_regs_len =
+			ARRAY_SIZE(b_counter_config_hdc_and_sf);
+
+		dev_priv->perf.oa.flex_regs =
+			flex_eu_config_hdc_and_sf;
+		dev_priv->perf.oa.flex_regs_len =
+			ARRAY_SIZE(flex_eu_config_hdc_and_sf);
+
+		return 0;
+	case METRIC_SET_ID_L3_1:
+		dev_priv->perf.oa.n_mux_configs =
+			get_l3_1_mux_config(dev_priv,
+					    dev_priv->perf.oa.mux_regs,
+					    dev_priv->perf.oa.mux_regs_lens);
+		if (dev_priv->perf.oa.n_mux_configs == 0) {
+			DRM_DEBUG_DRIVER("No suitable MUX config for \"L3_1\" metric set\n");
+
+			/* EINVAL because *_register_sysfs already checked this
+			 * and so it wouldn't have been advertised to userspace and
+			 * so shouldn't have been requested
+			 */
+			return -EINVAL;
+		}
+
+		dev_priv->perf.oa.b_counter_regs =
+			b_counter_config_l3_1;
+		dev_priv->perf.oa.b_counter_regs_len =
+			ARRAY_SIZE(b_counter_config_l3_1);
+
+		dev_priv->perf.oa.flex_regs =
+			flex_eu_config_l3_1;
+		dev_priv->perf.oa.flex_regs_len =
+			ARRAY_SIZE(flex_eu_config_l3_1);
+
+		return 0;
+	case METRIC_SET_ID_RASTERIZER_AND_PIXEL_BACKEND:
+		dev_priv->perf.oa.n_mux_configs =
+			get_rasterizer_and_pixel_backend_mux_config(dev_priv,
+								    dev_priv->perf.oa.mux_regs,
+								    dev_priv->perf.oa.mux_regs_lens);
+		if (dev_priv->perf.oa.n_mux_configs == 0) {
+			DRM_DEBUG_DRIVER("No suitable MUX config for \"RASTERIZER_AND_PIXEL_BACKEND\" metric set\n");
+
+			/* EINVAL because *_register_sysfs already checked this
+			 * and so it wouldn't have been advertised to userspace and
+			 * so shouldn't have been requested
+			 */
+			return -EINVAL;
+		}
+
+		dev_priv->perf.oa.b_counter_regs =
+			b_counter_config_rasterizer_and_pixel_backend;
+		dev_priv->perf.oa.b_counter_regs_len =
+			ARRAY_SIZE(b_counter_config_rasterizer_and_pixel_backend);
+
+		dev_priv->perf.oa.flex_regs =
+			flex_eu_config_rasterizer_and_pixel_backend;
+		dev_priv->perf.oa.flex_regs_len =
+			ARRAY_SIZE(flex_eu_config_rasterizer_and_pixel_backend);
+
+		return 0;
+	case METRIC_SET_ID_SAMPLER:
+		dev_priv->perf.oa.n_mux_configs =
+			get_sampler_mux_config(dev_priv,
+					       dev_priv->perf.oa.mux_regs,
+					       dev_priv->perf.oa.mux_regs_lens);
+		if (dev_priv->perf.oa.n_mux_configs == 0) {
+			DRM_DEBUG_DRIVER("No suitable MUX config for \"SAMPLER\" metric set\n");
+
+			/* EINVAL because *_register_sysfs already checked this
+			 * and so it wouldn't have been advertised to userspace and
+			 * so shouldn't have been requested
+			 */
+			return -EINVAL;
+		}
+
+		dev_priv->perf.oa.b_counter_regs =
+			b_counter_config_sampler;
+		dev_priv->perf.oa.b_counter_regs_len =
+			ARRAY_SIZE(b_counter_config_sampler);
+
+		dev_priv->perf.oa.flex_regs =
+			flex_eu_config_sampler;
+		dev_priv->perf.oa.flex_regs_len =
+			ARRAY_SIZE(flex_eu_config_sampler);
+
+		return 0;
+	case METRIC_SET_ID_TDL_1:
+		dev_priv->perf.oa.n_mux_configs =
+			get_tdl_1_mux_config(dev_priv,
+					     dev_priv->perf.oa.mux_regs,
+					     dev_priv->perf.oa.mux_regs_lens);
+		if (dev_priv->perf.oa.n_mux_configs == 0) {
+			DRM_DEBUG_DRIVER("No suitable MUX config for \"TDL_1\" metric set\n");
+
+			/* EINVAL because *_register_sysfs already checked this
+			 * and so it wouldn't have been advertised to userspace and
+			 * so shouldn't have been requested
+			 */
+			return -EINVAL;
+		}
+
+		dev_priv->perf.oa.b_counter_regs =
+			b_counter_config_tdl_1;
+		dev_priv->perf.oa.b_counter_regs_len =
+			ARRAY_SIZE(b_counter_config_tdl_1);
+
+		dev_priv->perf.oa.flex_regs =
+			flex_eu_config_tdl_1;
+		dev_priv->perf.oa.flex_regs_len =
+			ARRAY_SIZE(flex_eu_config_tdl_1);
+
+		return 0;
+	case METRIC_SET_ID_TDL_2:
+		dev_priv->perf.oa.n_mux_configs =
+			get_tdl_2_mux_config(dev_priv,
+					     dev_priv->perf.oa.mux_regs,
+					     dev_priv->perf.oa.mux_regs_lens);
+		if (dev_priv->perf.oa.n_mux_configs == 0) {
+			DRM_DEBUG_DRIVER("No suitable MUX config for \"TDL_2\" metric set\n");
+
+			/* EINVAL because *_register_sysfs already checked this
+			 * and so it wouldn't have been advertised to userspace and
+			 * so shouldn't have been requested
+			 */
+			return -EINVAL;
+		}
+
+		dev_priv->perf.oa.b_counter_regs =
+			b_counter_config_tdl_2;
+		dev_priv->perf.oa.b_counter_regs_len =
+			ARRAY_SIZE(b_counter_config_tdl_2);
+
+		dev_priv->perf.oa.flex_regs =
+			flex_eu_config_tdl_2;
+		dev_priv->perf.oa.flex_regs_len =
+			ARRAY_SIZE(flex_eu_config_tdl_2);
+
+		return 0;
+	case METRIC_SET_ID_COMPUTE_EXTRA:
+		dev_priv->perf.oa.n_mux_configs =
+			get_compute_extra_mux_config(dev_priv,
+						     dev_priv->perf.oa.mux_regs,
+						     dev_priv->perf.oa.mux_regs_lens);
+		if (dev_priv->perf.oa.n_mux_configs == 0) {
+			DRM_DEBUG_DRIVER("No suitable MUX config for \"COMPUTE_EXTRA\" metric set\n");
+
+			/* EINVAL because *_register_sysfs already checked this
+			 * and so it wouldn't have been advertised to userspace and
+			 * so shouldn't have been requested
+			 */
+			return -EINVAL;
+		}
+
+		dev_priv->perf.oa.b_counter_regs =
+			b_counter_config_compute_extra;
+		dev_priv->perf.oa.b_counter_regs_len =
+			ARRAY_SIZE(b_counter_config_compute_extra);
+
+		dev_priv->perf.oa.flex_regs =
+			flex_eu_config_compute_extra;
+		dev_priv->perf.oa.flex_regs_len =
+			ARRAY_SIZE(flex_eu_config_compute_extra);
+
+		return 0;
+	case METRIC_SET_ID_TEST_OA:
+		dev_priv->perf.oa.n_mux_configs =
+			get_test_oa_mux_config(dev_priv,
+					       dev_priv->perf.oa.mux_regs,
+					       dev_priv->perf.oa.mux_regs_lens);
+		if (dev_priv->perf.oa.n_mux_configs == 0) {
+			DRM_DEBUG_DRIVER("No suitable MUX config for \"TEST_OA\" metric set\n");
+
+			/* EINVAL because *_register_sysfs already checked this
+			 * and so it wouldn't have been advertised to userspace and
+			 * so shouldn't have been requested
+			 */
+			return -EINVAL;
+		}
+
+		dev_priv->perf.oa.b_counter_regs =
+			b_counter_config_test_oa;
+		dev_priv->perf.oa.b_counter_regs_len =
+			ARRAY_SIZE(b_counter_config_test_oa);
+
+		dev_priv->perf.oa.flex_regs =
+			flex_eu_config_test_oa;
+		dev_priv->perf.oa.flex_regs_len =
+			ARRAY_SIZE(flex_eu_config_test_oa);
+
+		return 0;
+	default:
+		return -ENODEV;
+	}
+}
+
+static ssize_t
+show_render_basic_id(struct device *kdev, struct device_attribute *attr, char *buf)
+{
+	return sprintf(buf, "%d\n", METRIC_SET_ID_RENDER_BASIC);
+}
+
+static struct device_attribute dev_attr_render_basic_id = {
+	.attr = { .name = "id", .mode = 0444 },
+	.show = show_render_basic_id,
+	.store = NULL,
+};
+
+static struct attribute *attrs_render_basic[] = {
+	&dev_attr_render_basic_id.attr,
+	NULL,
+};
+
+static struct attribute_group group_render_basic = {
+	.name = "d72df5c7-5b4a-4274-a43f-00b0fd51fc68",
+	.attrs =  attrs_render_basic,
+};
+
+static ssize_t
+show_compute_basic_id(struct device *kdev, struct device_attribute *attr, char *buf)
+{
+	return sprintf(buf, "%d\n", METRIC_SET_ID_COMPUTE_BASIC);
+}
+
+static struct device_attribute dev_attr_compute_basic_id = {
+	.attr = { .name = "id", .mode = 0444 },
+	.show = show_compute_basic_id,
+	.store = NULL,
+};
+
+static struct attribute *attrs_compute_basic[] = {
+	&dev_attr_compute_basic_id.attr,
+	NULL,
+};
+
+static struct attribute_group group_compute_basic = {
+	.name = "814285f6-354d-41d2-ba49-e24e622714a0",
+	.attrs =  attrs_compute_basic,
+};
+
+static ssize_t
+show_render_pipe_profile_id(struct device *kdev, struct device_attribute *attr, char *buf)
+{
+	return sprintf(buf, "%d\n", METRIC_SET_ID_RENDER_PIPE_PROFILE);
+}
+
+static struct device_attribute dev_attr_render_pipe_profile_id = {
+	.attr = { .name = "id", .mode = 0444 },
+	.show = show_render_pipe_profile_id,
+	.store = NULL,
+};
+
+static struct attribute *attrs_render_pipe_profile[] = {
+	&dev_attr_render_pipe_profile_id.attr,
+	NULL,
+};
+
+static struct attribute_group group_render_pipe_profile = {
+	.name = "07d397a6-b3e6-49f6-9433-a4f293d55978",
+	.attrs =  attrs_render_pipe_profile,
+};
+
+static ssize_t
+show_memory_reads_id(struct device *kdev, struct device_attribute *attr, char *buf)
+{
+	return sprintf(buf, "%d\n", METRIC_SET_ID_MEMORY_READS);
+}
+
+static struct device_attribute dev_attr_memory_reads_id = {
+	.attr = { .name = "id", .mode = 0444 },
+	.show = show_memory_reads_id,
+	.store = NULL,
+};
+
+static struct attribute *attrs_memory_reads[] = {
+	&dev_attr_memory_reads_id.attr,
+	NULL,
+};
+
+static struct attribute_group group_memory_reads = {
+	.name = "1a356946-5428-450b-a2f0-89f8783a302d",
+	.attrs =  attrs_memory_reads,
+};
+
+static ssize_t
+show_memory_writes_id(struct device *kdev, struct device_attribute *attr, char *buf)
+{
+	return sprintf(buf, "%d\n", METRIC_SET_ID_MEMORY_WRITES);
+}
+
+static struct device_attribute dev_attr_memory_writes_id = {
+	.attr = { .name = "id", .mode = 0444 },
+	.show = show_memory_writes_id,
+	.store = NULL,
+};
+
+static struct attribute *attrs_memory_writes[] = {
+	&dev_attr_memory_writes_id.attr,
+	NULL,
+};
+
+static struct attribute_group group_memory_writes = {
+	.name = "5299be9d-7a61-4c99-9f81-f87e6c5aaca9",
+	.attrs =  attrs_memory_writes,
+};
+
+static ssize_t
+show_compute_extended_id(struct device *kdev, struct device_attribute *attr, char *buf)
+{
+	return sprintf(buf, "%d\n", METRIC_SET_ID_COMPUTE_EXTENDED);
+}
+
+static struct device_attribute dev_attr_compute_extended_id = {
+	.attr = { .name = "id", .mode = 0444 },
+	.show = show_compute_extended_id,
+	.store = NULL,
+};
+
+static struct attribute *attrs_compute_extended[] = {
+	&dev_attr_compute_extended_id.attr,
+	NULL,
+};
+
+static struct attribute_group group_compute_extended = {
+	.name = "bc9bcff2-459a-4cbc-986d-a84b077153f3",
+	.attrs =  attrs_compute_extended,
+};
+
+static ssize_t
+show_compute_l3_cache_id(struct device *kdev, struct device_attribute *attr, char *buf)
+{
+	return sprintf(buf, "%d\n", METRIC_SET_ID_COMPUTE_L3_CACHE);
+}
+
+static struct device_attribute dev_attr_compute_l3_cache_id = {
+	.attr = { .name = "id", .mode = 0444 },
+	.show = show_compute_l3_cache_id,
+	.store = NULL,
+};
+
+static struct attribute *attrs_compute_l3_cache[] = {
+	&dev_attr_compute_l3_cache_id.attr,
+	NULL,
+};
+
+static struct attribute_group group_compute_l3_cache = {
+	.name = "88ec931f-5b4a-453a-9db6-a61232b6143d",
+	.attrs =  attrs_compute_l3_cache,
+};
+
+static ssize_t
+show_hdc_and_sf_id(struct device *kdev, struct device_attribute *attr, char *buf)
+{
+	return sprintf(buf, "%d\n", METRIC_SET_ID_HDC_AND_SF);
+}
+
+static struct device_attribute dev_attr_hdc_and_sf_id = {
+	.attr = { .name = "id", .mode = 0444 },
+	.show = show_hdc_and_sf_id,
+	.store = NULL,
+};
+
+static struct attribute *attrs_hdc_and_sf[] = {
+	&dev_attr_hdc_and_sf_id.attr,
+	NULL,
+};
+
+static struct attribute_group group_hdc_and_sf = {
+	.name = "530d176d-2a18-4014-adf8-1500c6c60835",
+	.attrs =  attrs_hdc_and_sf,
+};
+
+static ssize_t
+show_l3_1_id(struct device *kdev, struct device_attribute *attr, char *buf)
+{
+	return sprintf(buf, "%d\n", METRIC_SET_ID_L3_1);
+}
+
+static struct device_attribute dev_attr_l3_1_id = {
+	.attr = { .name = "id", .mode = 0444 },
+	.show = show_l3_1_id,
+	.store = NULL,
+};
+
+static struct attribute *attrs_l3_1[] = {
+	&dev_attr_l3_1_id.attr,
+	NULL,
+};
+
+static struct attribute_group group_l3_1 = {
+	.name = "fdee5a5a-f23c-43d1-aa73-f6257c71671d",
+	.attrs =  attrs_l3_1,
+};
+
+static ssize_t
+show_rasterizer_and_pixel_backend_id(struct device *kdev, struct device_attribute *attr, char *buf)
+{
+	return sprintf(buf, "%d\n", METRIC_SET_ID_RASTERIZER_AND_PIXEL_BACKEND);
+}
+
+static struct device_attribute dev_attr_rasterizer_and_pixel_backend_id = {
+	.attr = { .name = "id", .mode = 0444 },
+	.show = show_rasterizer_and_pixel_backend_id,
+	.store = NULL,
+};
+
+static struct attribute *attrs_rasterizer_and_pixel_backend[] = {
+	&dev_attr_rasterizer_and_pixel_backend_id.attr,
+	NULL,
+};
+
+static struct attribute_group group_rasterizer_and_pixel_backend = {
+	.name = "6617623e-ca73-4791-b2b7-ddedd0846a0c",
+	.attrs =  attrs_rasterizer_and_pixel_backend,
+};
+
+static ssize_t
+show_sampler_id(struct device *kdev, struct device_attribute *attr, char *buf)
+{
+	return sprintf(buf, "%d\n", METRIC_SET_ID_SAMPLER);
+}
+
+static struct device_attribute dev_attr_sampler_id = {
+	.attr = { .name = "id", .mode = 0444 },
+	.show = show_sampler_id,
+	.store = NULL,
+};
+
+static struct attribute *attrs_sampler[] = {
+	&dev_attr_sampler_id.attr,
+	NULL,
+};
+
+static struct attribute_group group_sampler = {
+	.name = "f3b2ea63-e82e-4234-b418-44dd20dd34d0",
+	.attrs =  attrs_sampler,
+};
+
+static ssize_t
+show_tdl_1_id(struct device *kdev, struct device_attribute *attr, char *buf)
+{
+	return sprintf(buf, "%d\n", METRIC_SET_ID_TDL_1);
+}
+
+static struct device_attribute dev_attr_tdl_1_id = {
+	.attr = { .name = "id", .mode = 0444 },
+	.show = show_tdl_1_id,
+	.store = NULL,
+};
+
+static struct attribute *attrs_tdl_1[] = {
+	&dev_attr_tdl_1_id.attr,
+	NULL,
+};
+
+static struct attribute_group group_tdl_1 = {
+	.name = "14411d35-cbf6-4f5e-b68b-190faf9a1a83",
+	.attrs =  attrs_tdl_1,
+};
+
+static ssize_t
+show_tdl_2_id(struct device *kdev, struct device_attribute *attr, char *buf)
+{
+	return sprintf(buf, "%d\n", METRIC_SET_ID_TDL_2);
+}
+
+static struct device_attribute dev_attr_tdl_2_id = {
+	.attr = { .name = "id", .mode = 0444 },
+	.show = show_tdl_2_id,
+	.store = NULL,
+};
+
+static struct attribute *attrs_tdl_2[] = {
+	&dev_attr_tdl_2_id.attr,
+	NULL,
+};
+
+static struct attribute_group group_tdl_2 = {
+	.name = "ffa3f263-0478-4724-8c9f-c911c5ec0f1d",
+	.attrs =  attrs_tdl_2,
+};
+
+static ssize_t
+show_compute_extra_id(struct device *kdev, struct device_attribute *attr, char *buf)
+{
+	return sprintf(buf, "%d\n", METRIC_SET_ID_COMPUTE_EXTRA);
+}
+
+static struct device_attribute dev_attr_compute_extra_id = {
+	.attr = { .name = "id", .mode = 0444 },
+	.show = show_compute_extra_id,
+	.store = NULL,
+};
+
+static struct attribute *attrs_compute_extra[] = {
+	&dev_attr_compute_extra_id.attr,
+	NULL,
+};
+
+static struct attribute_group group_compute_extra = {
+	.name = "15274c82-27d2-4819-876a-7cb1a2c59ba4",
+	.attrs =  attrs_compute_extra,
+};
+
+static ssize_t
+show_test_oa_id(struct device *kdev, struct device_attribute *attr, char *buf)
+{
+	return sprintf(buf, "%d\n", METRIC_SET_ID_TEST_OA);
+}
+
+static struct device_attribute dev_attr_test_oa_id = {
+	.attr = { .name = "id", .mode = 0444 },
+	.show = show_test_oa_id,
+	.store = NULL,
+};
+
+static struct attribute *attrs_test_oa[] = {
+	&dev_attr_test_oa_id.attr,
+	NULL,
+};
+
+static struct attribute_group group_test_oa = {
+	.name = "dd3fd789-e783-4204-8cd0-b671bbccb0cf",
+	.attrs =  attrs_test_oa,
+};
+
+int
+i915_perf_register_sysfs_glk(struct drm_i915_private *dev_priv)
+{
+	const struct i915_oa_reg *mux_regs[ARRAY_SIZE(dev_priv->perf.oa.mux_regs)];
+	int mux_lens[ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens)];
+	int ret = 0;
+
+	if (get_render_basic_mux_config(dev_priv, mux_regs, mux_lens)) {
+		ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_render_basic);
+		if (ret)
+			goto error_render_basic;
+	}
+	if (get_compute_basic_mux_config(dev_priv, mux_regs, mux_lens)) {
+		ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_compute_basic);
+		if (ret)
+			goto error_compute_basic;
+	}
+	if (get_render_pipe_profile_mux_config(dev_priv, mux_regs, mux_lens)) {
+		ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_render_pipe_profile);
+		if (ret)
+			goto error_render_pipe_profile;
+	}
+	if (get_memory_reads_mux_config(dev_priv, mux_regs, mux_lens)) {
+		ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_memory_reads);
+		if (ret)
+			goto error_memory_reads;
+	}
+	if (get_memory_writes_mux_config(dev_priv, mux_regs, mux_lens)) {
+		ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_memory_writes);
+		if (ret)
+			goto error_memory_writes;
+	}
+	if (get_compute_extended_mux_config(dev_priv, mux_regs, mux_lens)) {
+		ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_compute_extended);
+		if (ret)
+			goto error_compute_extended;
+	}
+	if (get_compute_l3_cache_mux_config(dev_priv, mux_regs, mux_lens)) {
+		ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_compute_l3_cache);
+		if (ret)
+			goto error_compute_l3_cache;
+	}
+	if (get_hdc_and_sf_mux_config(dev_priv, mux_regs, mux_lens)) {
+		ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_hdc_and_sf);
+		if (ret)
+			goto error_hdc_and_sf;
+	}
+	if (get_l3_1_mux_config(dev_priv, mux_regs, mux_lens)) {
+		ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_l3_1);
+		if (ret)
+			goto error_l3_1;
+	}
+	if (get_rasterizer_and_pixel_backend_mux_config(dev_priv, mux_regs, mux_lens)) {
+		ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_rasterizer_and_pixel_backend);
+		if (ret)
+			goto error_rasterizer_and_pixel_backend;
+	}
+	if (get_sampler_mux_config(dev_priv, mux_regs, mux_lens)) {
+		ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_sampler);
+		if (ret)
+			goto error_sampler;
+	}
+	if (get_tdl_1_mux_config(dev_priv, mux_regs, mux_lens)) {
+		ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_tdl_1);
+		if (ret)
+			goto error_tdl_1;
+	}
+	if (get_tdl_2_mux_config(dev_priv, mux_regs, mux_lens)) {
+		ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_tdl_2);
+		if (ret)
+			goto error_tdl_2;
+	}
+	if (get_compute_extra_mux_config(dev_priv, mux_regs, mux_lens)) {
+		ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_compute_extra);
+		if (ret)
+			goto error_compute_extra;
+	}
+	if (get_test_oa_mux_config(dev_priv, mux_regs, mux_lens)) {
+		ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_test_oa);
+		if (ret)
+			goto error_test_oa;
+	}
+
+	return 0;
+
+error_test_oa:
+	if (get_compute_extra_mux_config(dev_priv, mux_regs, mux_lens))
+		sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_compute_extra);
+error_compute_extra:
+	if (get_tdl_2_mux_config(dev_priv, mux_regs, mux_lens))
+		sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_tdl_2);
+error_tdl_2:
+	if (get_tdl_1_mux_config(dev_priv, mux_regs, mux_lens))
+		sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_tdl_1);
+error_tdl_1:
+	if (get_sampler_mux_config(dev_priv, mux_regs, mux_lens))
+		sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_sampler);
+error_sampler:
+	if (get_rasterizer_and_pixel_backend_mux_config(dev_priv, mux_regs, mux_lens))
+		sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_rasterizer_and_pixel_backend);
+error_rasterizer_and_pixel_backend:
+	if (get_l3_1_mux_config(dev_priv, mux_regs, mux_lens))
+		sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_l3_1);
+error_l3_1:
+	if (get_hdc_and_sf_mux_config(dev_priv, mux_regs, mux_lens))
+		sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_hdc_and_sf);
+error_hdc_and_sf:
+	if (get_compute_l3_cache_mux_config(dev_priv, mux_regs, mux_lens))
+		sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_compute_l3_cache);
+error_compute_l3_cache:
+	if (get_compute_extended_mux_config(dev_priv, mux_regs, mux_lens))
+		sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_compute_extended);
+error_compute_extended:
+	if (get_memory_writes_mux_config(dev_priv, mux_regs, mux_lens))
+		sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_memory_writes);
+error_memory_writes:
+	if (get_memory_reads_mux_config(dev_priv, mux_regs, mux_lens))
+		sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_memory_reads);
+error_memory_reads:
+	if (get_render_pipe_profile_mux_config(dev_priv, mux_regs, mux_lens))
+		sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_render_pipe_profile);
+error_render_pipe_profile:
+	if (get_compute_basic_mux_config(dev_priv, mux_regs, mux_lens))
+		sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_compute_basic);
+error_compute_basic:
+	if (get_render_basic_mux_config(dev_priv, mux_regs, mux_lens))
+		sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_render_basic);
+error_render_basic:
+	return ret;
+}
+
+void
+i915_perf_unregister_sysfs_glk(struct drm_i915_private *dev_priv)
+{
+	const struct i915_oa_reg *mux_regs[ARRAY_SIZE(dev_priv->perf.oa.mux_regs)];
+	int mux_lens[ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens)];
+
+	if (get_render_basic_mux_config(dev_priv, mux_regs, mux_lens))
+		sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_render_basic);
+	if (get_compute_basic_mux_config(dev_priv, mux_regs, mux_lens))
+		sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_compute_basic);
+	if (get_render_pipe_profile_mux_config(dev_priv, mux_regs, mux_lens))
+		sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_render_pipe_profile);
+	if (get_memory_reads_mux_config(dev_priv, mux_regs, mux_lens))
+		sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_memory_reads);
+	if (get_memory_writes_mux_config(dev_priv, mux_regs, mux_lens))
+		sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_memory_writes);
+	if (get_compute_extended_mux_config(dev_priv, mux_regs, mux_lens))
+		sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_compute_extended);
+	if (get_compute_l3_cache_mux_config(dev_priv, mux_regs, mux_lens))
+		sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_compute_l3_cache);
+	if (get_hdc_and_sf_mux_config(dev_priv, mux_regs, mux_lens))
+		sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_hdc_and_sf);
+	if (get_l3_1_mux_config(dev_priv, mux_regs, mux_lens))
+		sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_l3_1);
+	if (get_rasterizer_and_pixel_backend_mux_config(dev_priv, mux_regs, mux_lens))
+		sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_rasterizer_and_pixel_backend);
+	if (get_sampler_mux_config(dev_priv, mux_regs, mux_lens))
+		sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_sampler);
+	if (get_tdl_1_mux_config(dev_priv, mux_regs, mux_lens))
+		sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_tdl_1);
+	if (get_tdl_2_mux_config(dev_priv, mux_regs, mux_lens))
+		sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_tdl_2);
+	if (get_compute_extra_mux_config(dev_priv, mux_regs, mux_lens))
+		sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_compute_extra);
+	if (get_test_oa_mux_config(dev_priv, mux_regs, mux_lens))
+		sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_test_oa);
+}

+ 40 - 0
drivers/gpu/drm/i915/i915_oa_glk.h

@@ -0,0 +1,40 @@
+/*
+ * Autogenerated file by GPU Top : https://github.com/rib/gputop
+ * DO NOT EDIT manually!
+ *
+ *
+ * Copyright (c) 2015 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __I915_OA_GLK_H__
+#define __I915_OA_GLK_H__
+
+extern int i915_oa_n_builtin_metric_sets_glk;
+
+extern int i915_oa_select_metric_set_glk(struct drm_i915_private *dev_priv);
+
+extern int i915_perf_register_sysfs_glk(struct drm_i915_private *dev_priv);
+
+extern void i915_perf_unregister_sysfs_glk(struct drm_i915_private *dev_priv);
+
+#endif

Some files were not shown because too many files changed in this diff