浏览代码

Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net

David Ahern's dump indexing bug fix in 'net' overlapped the
change of the function signature of inet6_fill_ifaddr() in
'net-next'.  Trivially resolved.

Signed-off-by: David S. Miller <davem@davemloft.net>
David S. Miller 6 年之前
父节点
当前提交
21ea1d36f6
共有 42 个文件被更改,包括 308 次插入139 次删除
  1. 1 0
      arch/x86/boot/compressed/Makefile
  2. 7 6
      arch/x86/entry/entry_32.S
  3. 13 0
      arch/x86/entry/entry_64.S
  4. 1 1
      arch/x86/include/asm/fpu/internal.h
  5. 4 4
      arch/x86/include/asm/percpu.h
  6. 0 1
      arch/x86/kernel/fpu/signal.c
  7. 0 2
      arch/x86/kernel/pci-swiotlb.c
  8. 1 1
      arch/x86/kernel/time.c
  9. 3 3
      arch/x86/kernel/tsc.c
  10. 2 26
      block/blk-lib.c
  11. 5 0
      drivers/gpu/drm/drm_atomic.c
  12. 8 4
      drivers/gpu/drm/drm_atomic_helper.c
  13. 11 1
      drivers/gpu/drm/sun4i/sun4i_dotclock.c
  14. 1 0
      drivers/input/mouse/elan_i2c_core.c
  15. 1 1
      drivers/nvme/host/core.c
  16. 8 8
      drivers/usb/class/cdc-acm.c
  17. 2 2
      drivers/usb/core/devio.c
  18. 3 0
      drivers/usb/gadget/function/f_mass_storage.c
  19. 4 2
      drivers/usb/host/xhci-pci.c
  20. 2 0
      drivers/usb/roles/intel-xhci-usb-role-switch.c
  21. 42 15
      drivers/usb/usbip/vhci_hcd.c
  22. 11 0
      include/drm/drm_atomic.h
  23. 20 4
      kernel/sched/fair.c
  24. 2 0
      kernel/sched/sched.h
  25. 25 7
      kernel/trace/trace_events_hist.c
  26. 1 2
      net/core/neighbour.c
  27. 3 2
      net/core/skbuff.c
  28. 4 2
      net/ipv6/addrconf.c
  29. 1 0
      tools/arch/x86/include/uapi/asm/kvm.h
  30. 1 0
      tools/include/uapi/linux/kvm.h
  31. 2 2
      tools/lib/api/fs/tracing_path.c
  32. 1 1
      tools/perf/Makefile.config
  33. 1 1
      tools/perf/Makefile.perf
  34. 1 0
      tools/perf/builtin-report.c
  35. 8 8
      tools/perf/pmu-events/arch/x86/ivytown/uncore-power.json
  36. 8 8
      tools/perf/pmu-events/arch/x86/jaketown/uncore-power.json
  37. 3 19
      tools/perf/util/event.c
  38. 3 0
      tools/perf/util/evsel.c
  39. 7 6
      tools/perf/util/pmu.c
  40. 3 0
      tools/perf/util/srcline.c
  41. 4 0
      tools/testing/selftests/drivers/usb/usbip/usbip_test.sh
  42. 80 0
      tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-synthetic-event-syntax.tc

+ 1 - 0
arch/x86/boot/compressed/Makefile

@@ -37,6 +37,7 @@ KBUILD_CFLAGS += $(call cc-option,-ffreestanding)
 KBUILD_CFLAGS += $(call cc-option,-fno-stack-protector)
 KBUILD_CFLAGS += $(call cc-option,-fno-stack-protector)
 KBUILD_CFLAGS += $(call cc-disable-warning, address-of-packed-member)
 KBUILD_CFLAGS += $(call cc-disable-warning, address-of-packed-member)
 KBUILD_CFLAGS += $(call cc-disable-warning, gnu)
 KBUILD_CFLAGS += $(call cc-disable-warning, gnu)
+KBUILD_CFLAGS += -Wno-pointer-sign
 
 
 KBUILD_AFLAGS  := $(KBUILD_CFLAGS) -D__ASSEMBLY__
 KBUILD_AFLAGS  := $(KBUILD_CFLAGS) -D__ASSEMBLY__
 GCOV_PROFILE := n
 GCOV_PROFILE := n

+ 7 - 6
arch/x86/entry/entry_32.S

@@ -389,6 +389,13 @@
 	 * that register for the time this macro runs
 	 * that register for the time this macro runs
 	 */
 	 */
 
 
+	/*
+	 * The high bits of the CS dword (__csh) are used for
+	 * CS_FROM_ENTRY_STACK and CS_FROM_USER_CR3. Clear them in case
+	 * hardware didn't do this for us.
+	 */
+	andl	$(0x0000ffff), PT_CS(%esp)
+
 	/* Are we on the entry stack? Bail out if not! */
 	/* Are we on the entry stack? Bail out if not! */
 	movl	PER_CPU_VAR(cpu_entry_area), %ecx
 	movl	PER_CPU_VAR(cpu_entry_area), %ecx
 	addl	$CPU_ENTRY_AREA_entry_stack + SIZEOF_entry_stack, %ecx
 	addl	$CPU_ENTRY_AREA_entry_stack + SIZEOF_entry_stack, %ecx
@@ -407,12 +414,6 @@
 	/* Load top of task-stack into %edi */
 	/* Load top of task-stack into %edi */
 	movl	TSS_entry2task_stack(%edi), %edi
 	movl	TSS_entry2task_stack(%edi), %edi
 
 
-	/*
-	 * Clear unused upper bits of the dword containing the word-sized CS
-	 * slot in pt_regs in case hardware didn't clear it for us.
-	 */
-	andl	$(0x0000ffff), PT_CS(%esp)
-
 	/* Special case - entry from kernel mode via entry stack */
 	/* Special case - entry from kernel mode via entry stack */
 #ifdef CONFIG_VM86
 #ifdef CONFIG_VM86
 	movl	PT_EFLAGS(%esp), %ecx		# mix EFLAGS and CS
 	movl	PT_EFLAGS(%esp), %ecx		# mix EFLAGS and CS

+ 13 - 0
arch/x86/entry/entry_64.S

@@ -1187,6 +1187,16 @@ ENTRY(paranoid_entry)
 	xorl	%ebx, %ebx
 	xorl	%ebx, %ebx
 
 
 1:
 1:
+	/*
+	 * Always stash CR3 in %r14.  This value will be restored,
+	 * verbatim, at exit.  Needed if paranoid_entry interrupted
+	 * another entry that already switched to the user CR3 value
+	 * but has not yet returned to userspace.
+	 *
+	 * This is also why CS (stashed in the "iret frame" by the
+	 * hardware at entry) can not be used: this may be a return
+	 * to kernel code, but with a user CR3 value.
+	 */
 	SAVE_AND_SWITCH_TO_KERNEL_CR3 scratch_reg=%rax save_reg=%r14
 	SAVE_AND_SWITCH_TO_KERNEL_CR3 scratch_reg=%rax save_reg=%r14
 
 
 	ret
 	ret
@@ -1211,11 +1221,13 @@ ENTRY(paranoid_exit)
 	testl	%ebx, %ebx			/* swapgs needed? */
 	testl	%ebx, %ebx			/* swapgs needed? */
 	jnz	.Lparanoid_exit_no_swapgs
 	jnz	.Lparanoid_exit_no_swapgs
 	TRACE_IRQS_IRETQ
 	TRACE_IRQS_IRETQ
+	/* Always restore stashed CR3 value (see paranoid_entry) */
 	RESTORE_CR3	scratch_reg=%rbx save_reg=%r14
 	RESTORE_CR3	scratch_reg=%rbx save_reg=%r14
 	SWAPGS_UNSAFE_STACK
 	SWAPGS_UNSAFE_STACK
 	jmp	.Lparanoid_exit_restore
 	jmp	.Lparanoid_exit_restore
 .Lparanoid_exit_no_swapgs:
 .Lparanoid_exit_no_swapgs:
 	TRACE_IRQS_IRETQ_DEBUG
 	TRACE_IRQS_IRETQ_DEBUG
+	/* Always restore stashed CR3 value (see paranoid_entry) */
 	RESTORE_CR3	scratch_reg=%rbx save_reg=%r14
 	RESTORE_CR3	scratch_reg=%rbx save_reg=%r14
 .Lparanoid_exit_restore:
 .Lparanoid_exit_restore:
 	jmp restore_regs_and_return_to_kernel
 	jmp restore_regs_and_return_to_kernel
@@ -1626,6 +1638,7 @@ end_repeat_nmi:
 	movq	$-1, %rsi
 	movq	$-1, %rsi
 	call	do_nmi
 	call	do_nmi
 
 
+	/* Always restore stashed CR3 value (see paranoid_entry) */
 	RESTORE_CR3 scratch_reg=%r15 save_reg=%r14
 	RESTORE_CR3 scratch_reg=%r15 save_reg=%r14
 
 
 	testl	%ebx, %ebx			/* swapgs needed? */
 	testl	%ebx, %ebx			/* swapgs needed? */

+ 1 - 1
arch/x86/include/asm/fpu/internal.h

@@ -528,7 +528,7 @@ static inline void fpregs_activate(struct fpu *fpu)
 static inline void
 static inline void
 switch_fpu_prepare(struct fpu *old_fpu, int cpu)
 switch_fpu_prepare(struct fpu *old_fpu, int cpu)
 {
 {
-	if (old_fpu->initialized) {
+	if (static_cpu_has(X86_FEATURE_FPU) && old_fpu->initialized) {
 		if (!copy_fpregs_to_fpstate(old_fpu))
 		if (!copy_fpregs_to_fpstate(old_fpu))
 			old_fpu->last_cpu = -1;
 			old_fpu->last_cpu = -1;
 		else
 		else

+ 4 - 4
arch/x86/include/asm/percpu.h

@@ -185,22 +185,22 @@ do {									\
 	typeof(var) pfo_ret__;				\
 	typeof(var) pfo_ret__;				\
 	switch (sizeof(var)) {				\
 	switch (sizeof(var)) {				\
 	case 1:						\
 	case 1:						\
-		asm(op "b "__percpu_arg(1)",%0"		\
+		asm volatile(op "b "__percpu_arg(1)",%0"\
 		    : "=q" (pfo_ret__)			\
 		    : "=q" (pfo_ret__)			\
 		    : "m" (var));			\
 		    : "m" (var));			\
 		break;					\
 		break;					\
 	case 2:						\
 	case 2:						\
-		asm(op "w "__percpu_arg(1)",%0"		\
+		asm volatile(op "w "__percpu_arg(1)",%0"\
 		    : "=r" (pfo_ret__)			\
 		    : "=r" (pfo_ret__)			\
 		    : "m" (var));			\
 		    : "m" (var));			\
 		break;					\
 		break;					\
 	case 4:						\
 	case 4:						\
-		asm(op "l "__percpu_arg(1)",%0"		\
+		asm volatile(op "l "__percpu_arg(1)",%0"\
 		    : "=r" (pfo_ret__)			\
 		    : "=r" (pfo_ret__)			\
 		    : "m" (var));			\
 		    : "m" (var));			\
 		break;					\
 		break;					\
 	case 8:						\
 	case 8:						\
-		asm(op "q "__percpu_arg(1)",%0"		\
+		asm volatile(op "q "__percpu_arg(1)",%0"\
 		    : "=r" (pfo_ret__)			\
 		    : "=r" (pfo_ret__)			\
 		    : "m" (var));			\
 		    : "m" (var));			\
 		break;					\
 		break;					\

+ 0 - 1
arch/x86/kernel/fpu/signal.c

@@ -314,7 +314,6 @@ static int __fpu__restore_sig(void __user *buf, void __user *buf_fx, int size)
 		 * thread's fpu state, reconstruct fxstate from the fsave
 		 * thread's fpu state, reconstruct fxstate from the fsave
 		 * header. Validate and sanitize the copied state.
 		 * header. Validate and sanitize the copied state.
 		 */
 		 */
-		struct fpu *fpu = &tsk->thread.fpu;
 		struct user_i387_ia32_struct env;
 		struct user_i387_ia32_struct env;
 		int err = 0;
 		int err = 0;
 
 

+ 0 - 2
arch/x86/kernel/pci-swiotlb.c

@@ -42,10 +42,8 @@ IOMMU_INIT_FINISH(pci_swiotlb_detect_override,
 int __init pci_swiotlb_detect_4gb(void)
 int __init pci_swiotlb_detect_4gb(void)
 {
 {
 	/* don't initialize swiotlb if iommu=off (no_iommu=1) */
 	/* don't initialize swiotlb if iommu=off (no_iommu=1) */
-#ifdef CONFIG_X86_64
 	if (!no_iommu && max_possible_pfn > MAX_DMA32_PFN)
 	if (!no_iommu && max_possible_pfn > MAX_DMA32_PFN)
 		swiotlb = 1;
 		swiotlb = 1;
-#endif
 
 
 	/*
 	/*
 	 * If SME is active then swiotlb will be set to 1 so that bounce
 	 * If SME is active then swiotlb will be set to 1 so that bounce

+ 1 - 1
arch/x86/kernel/time.c

@@ -25,7 +25,7 @@
 #include <asm/time.h>
 #include <asm/time.h>
 
 
 #ifdef CONFIG_X86_64
 #ifdef CONFIG_X86_64
-__visible volatile unsigned long jiffies __cacheline_aligned = INITIAL_JIFFIES;
+__visible volatile unsigned long jiffies __cacheline_aligned_in_smp = INITIAL_JIFFIES;
 #endif
 #endif
 
 
 unsigned long profile_pc(struct pt_regs *regs)
 unsigned long profile_pc(struct pt_regs *regs)

+ 3 - 3
arch/x86/kernel/tsc.c

@@ -58,7 +58,7 @@ struct cyc2ns {
 
 
 static DEFINE_PER_CPU_ALIGNED(struct cyc2ns, cyc2ns);
 static DEFINE_PER_CPU_ALIGNED(struct cyc2ns, cyc2ns);
 
 
-void cyc2ns_read_begin(struct cyc2ns_data *data)
+void __always_inline cyc2ns_read_begin(struct cyc2ns_data *data)
 {
 {
 	int seq, idx;
 	int seq, idx;
 
 
@@ -75,7 +75,7 @@ void cyc2ns_read_begin(struct cyc2ns_data *data)
 	} while (unlikely(seq != this_cpu_read(cyc2ns.seq.sequence)));
 	} while (unlikely(seq != this_cpu_read(cyc2ns.seq.sequence)));
 }
 }
 
 
-void cyc2ns_read_end(void)
+void __always_inline cyc2ns_read_end(void)
 {
 {
 	preempt_enable_notrace();
 	preempt_enable_notrace();
 }
 }
@@ -104,7 +104,7 @@ void cyc2ns_read_end(void)
  *                      -johnstul@us.ibm.com "math is hard, lets go shopping!"
  *                      -johnstul@us.ibm.com "math is hard, lets go shopping!"
  */
  */
 
 
-static inline unsigned long long cycles_2_ns(unsigned long long cyc)
+static __always_inline unsigned long long cycles_2_ns(unsigned long long cyc)
 {
 {
 	struct cyc2ns_data data;
 	struct cyc2ns_data data;
 	unsigned long long ns;
 	unsigned long long ns;

+ 2 - 26
block/blk-lib.c

@@ -29,9 +29,7 @@ int __blkdev_issue_discard(struct block_device *bdev, sector_t sector,
 {
 {
 	struct request_queue *q = bdev_get_queue(bdev);
 	struct request_queue *q = bdev_get_queue(bdev);
 	struct bio *bio = *biop;
 	struct bio *bio = *biop;
-	unsigned int granularity;
 	unsigned int op;
 	unsigned int op;
-	int alignment;
 	sector_t bs_mask;
 	sector_t bs_mask;
 
 
 	if (!q)
 	if (!q)
@@ -54,38 +52,16 @@ int __blkdev_issue_discard(struct block_device *bdev, sector_t sector,
 	if ((sector | nr_sects) & bs_mask)
 	if ((sector | nr_sects) & bs_mask)
 		return -EINVAL;
 		return -EINVAL;
 
 
-	/* Zero-sector (unknown) and one-sector granularities are the same.  */
-	granularity = max(q->limits.discard_granularity >> 9, 1U);
-	alignment = (bdev_discard_alignment(bdev) >> 9) % granularity;
-
 	while (nr_sects) {
 	while (nr_sects) {
-		unsigned int req_sects;
-		sector_t end_sect, tmp;
+		unsigned int req_sects = nr_sects;
+		sector_t end_sect;
 
 
-		/*
-		 * Issue in chunks of the user defined max discard setting,
-		 * ensuring that bi_size doesn't overflow
-		 */
-		req_sects = min_t(sector_t, nr_sects,
-					q->limits.max_discard_sectors);
 		if (!req_sects)
 		if (!req_sects)
 			goto fail;
 			goto fail;
 		if (req_sects > UINT_MAX >> 9)
 		if (req_sects > UINT_MAX >> 9)
 			req_sects = UINT_MAX >> 9;
 			req_sects = UINT_MAX >> 9;
 
 
-		/*
-		 * If splitting a request, and the next starting sector would be
-		 * misaligned, stop the discard at the previous aligned sector.
-		 */
 		end_sect = sector + req_sects;
 		end_sect = sector + req_sects;
-		tmp = end_sect;
-		if (req_sects < nr_sects &&
-		    sector_div(tmp, granularity) != alignment) {
-			end_sect = end_sect - alignment;
-			sector_div(end_sect, granularity);
-			end_sect = end_sect * granularity + alignment;
-			req_sects = end_sect - sector;
-		}
 
 
 		bio = next_bio(bio, 0, gfp_mask);
 		bio = next_bio(bio, 0, gfp_mask);
 		bio->bi_iter.bi_sector = sector;
 		bio->bi_iter.bi_sector = sector;

+ 5 - 0
drivers/gpu/drm/drm_atomic.c

@@ -174,6 +174,11 @@ void drm_atomic_state_default_clear(struct drm_atomic_state *state)
 		state->crtcs[i].state = NULL;
 		state->crtcs[i].state = NULL;
 		state->crtcs[i].old_state = NULL;
 		state->crtcs[i].old_state = NULL;
 		state->crtcs[i].new_state = NULL;
 		state->crtcs[i].new_state = NULL;
+
+		if (state->crtcs[i].commit) {
+			drm_crtc_commit_put(state->crtcs[i].commit);
+			state->crtcs[i].commit = NULL;
+		}
 	}
 	}
 
 
 	for (i = 0; i < config->num_total_plane; i++) {
 	for (i = 0; i < config->num_total_plane; i++) {

+ 8 - 4
drivers/gpu/drm/drm_atomic_helper.c

@@ -1408,15 +1408,16 @@ EXPORT_SYMBOL(drm_atomic_helper_wait_for_vblanks);
 void drm_atomic_helper_wait_for_flip_done(struct drm_device *dev,
 void drm_atomic_helper_wait_for_flip_done(struct drm_device *dev,
 					  struct drm_atomic_state *old_state)
 					  struct drm_atomic_state *old_state)
 {
 {
-	struct drm_crtc_state *new_crtc_state;
 	struct drm_crtc *crtc;
 	struct drm_crtc *crtc;
 	int i;
 	int i;
 
 
-	for_each_new_crtc_in_state(old_state, crtc, new_crtc_state, i) {
-		struct drm_crtc_commit *commit = new_crtc_state->commit;
+	for (i = 0; i < dev->mode_config.num_crtc; i++) {
+		struct drm_crtc_commit *commit = old_state->crtcs[i].commit;
 		int ret;
 		int ret;
 
 
-		if (!commit)
+		crtc = old_state->crtcs[i].ptr;
+
+		if (!crtc || !commit)
 			continue;
 			continue;
 
 
 		ret = wait_for_completion_timeout(&commit->flip_done, 10 * HZ);
 		ret = wait_for_completion_timeout(&commit->flip_done, 10 * HZ);
@@ -1934,6 +1935,9 @@ int drm_atomic_helper_setup_commit(struct drm_atomic_state *state,
 		drm_crtc_commit_get(commit);
 		drm_crtc_commit_get(commit);
 
 
 		commit->abort_completion = true;
 		commit->abort_completion = true;
+
+		state->crtcs[i].commit = commit;
+		drm_crtc_commit_get(commit);
 	}
 	}
 
 
 	for_each_oldnew_connector_in_state(state, conn, old_conn_state, new_conn_state, i) {
 	for_each_oldnew_connector_in_state(state, conn, old_conn_state, new_conn_state, i) {

+ 11 - 1
drivers/gpu/drm/sun4i/sun4i_dotclock.c

@@ -81,9 +81,19 @@ static long sun4i_dclk_round_rate(struct clk_hw *hw, unsigned long rate,
 	int i;
 	int i;
 
 
 	for (i = tcon->dclk_min_div; i <= tcon->dclk_max_div; i++) {
 	for (i = tcon->dclk_min_div; i <= tcon->dclk_max_div; i++) {
-		unsigned long ideal = rate * i;
+		u64 ideal = (u64)rate * i;
 		unsigned long rounded;
 		unsigned long rounded;
 
 
+		/*
+		 * ideal has overflowed the max value that can be stored in an
+		 * unsigned long, and every clk operation we might do on a
+		 * truncated u64 value will give us incorrect results.
+		 * Let's just stop there since bigger dividers will result in
+		 * the same overflow issue.
+		 */
+		if (ideal > ULONG_MAX)
+			goto out;
+
 		rounded = clk_hw_round_rate(clk_hw_get_parent(hw),
 		rounded = clk_hw_round_rate(clk_hw_get_parent(hw),
 					    ideal);
 					    ideal);
 
 

+ 1 - 0
drivers/input/mouse/elan_i2c_core.c

@@ -1346,6 +1346,7 @@ static const struct acpi_device_id elan_acpi_id[] = {
 	{ "ELAN0611", 0 },
 	{ "ELAN0611", 0 },
 	{ "ELAN0612", 0 },
 	{ "ELAN0612", 0 },
 	{ "ELAN0618", 0 },
 	{ "ELAN0618", 0 },
+	{ "ELAN061C", 0 },
 	{ "ELAN061D", 0 },
 	{ "ELAN061D", 0 },
 	{ "ELAN0622", 0 },
 	{ "ELAN0622", 0 },
 	{ "ELAN1000", 0 },
 	{ "ELAN1000", 0 },

+ 1 - 1
drivers/nvme/host/core.c

@@ -3143,8 +3143,8 @@ static void nvme_ns_remove(struct nvme_ns *ns)
 	}
 	}
 
 
 	mutex_lock(&ns->ctrl->subsys->lock);
 	mutex_lock(&ns->ctrl->subsys->lock);
-	nvme_mpath_clear_current_path(ns);
 	list_del_rcu(&ns->siblings);
 	list_del_rcu(&ns->siblings);
+	nvme_mpath_clear_current_path(ns);
 	mutex_unlock(&ns->ctrl->subsys->lock);
 	mutex_unlock(&ns->ctrl->subsys->lock);
 
 
 	down_write(&ns->ctrl->namespaces_rwsem);
 	down_write(&ns->ctrl->namespaces_rwsem);

+ 8 - 8
drivers/usb/class/cdc-acm.c

@@ -310,17 +310,17 @@ static void acm_process_notification(struct acm *acm, unsigned char *buf)
 
 
 		if (difference & ACM_CTRL_DSR)
 		if (difference & ACM_CTRL_DSR)
 			acm->iocount.dsr++;
 			acm->iocount.dsr++;
-		if (difference & ACM_CTRL_BRK)
-			acm->iocount.brk++;
-		if (difference & ACM_CTRL_RI)
-			acm->iocount.rng++;
 		if (difference & ACM_CTRL_DCD)
 		if (difference & ACM_CTRL_DCD)
 			acm->iocount.dcd++;
 			acm->iocount.dcd++;
-		if (difference & ACM_CTRL_FRAMING)
+		if (newctrl & ACM_CTRL_BRK)
+			acm->iocount.brk++;
+		if (newctrl & ACM_CTRL_RI)
+			acm->iocount.rng++;
+		if (newctrl & ACM_CTRL_FRAMING)
 			acm->iocount.frame++;
 			acm->iocount.frame++;
-		if (difference & ACM_CTRL_PARITY)
+		if (newctrl & ACM_CTRL_PARITY)
 			acm->iocount.parity++;
 			acm->iocount.parity++;
-		if (difference & ACM_CTRL_OVERRUN)
+		if (newctrl & ACM_CTRL_OVERRUN)
 			acm->iocount.overrun++;
 			acm->iocount.overrun++;
 		spin_unlock_irqrestore(&acm->read_lock, flags);
 		spin_unlock_irqrestore(&acm->read_lock, flags);
 
 
@@ -355,7 +355,6 @@ static void acm_ctrl_irq(struct urb *urb)
 	case -ENOENT:
 	case -ENOENT:
 	case -ESHUTDOWN:
 	case -ESHUTDOWN:
 		/* this urb is terminated, clean up */
 		/* this urb is terminated, clean up */
-		acm->nb_index = 0;
 		dev_dbg(&acm->control->dev,
 		dev_dbg(&acm->control->dev,
 			"%s - urb shutting down with status: %d\n",
 			"%s - urb shutting down with status: %d\n",
 			__func__, status);
 			__func__, status);
@@ -1642,6 +1641,7 @@ static int acm_pre_reset(struct usb_interface *intf)
 	struct acm *acm = usb_get_intfdata(intf);
 	struct acm *acm = usb_get_intfdata(intf);
 
 
 	clear_bit(EVENT_RX_STALL, &acm->flags);
 	clear_bit(EVENT_RX_STALL, &acm->flags);
+	acm->nb_index = 0; /* pending control transfers are lost */
 
 
 	return 0;
 	return 0;
 }
 }

+ 2 - 2
drivers/usb/core/devio.c

@@ -1474,8 +1474,6 @@ static int proc_do_submiturb(struct usb_dev_state *ps, struct usbdevfs_urb *uurb
 	u = 0;
 	u = 0;
 	switch (uurb->type) {
 	switch (uurb->type) {
 	case USBDEVFS_URB_TYPE_CONTROL:
 	case USBDEVFS_URB_TYPE_CONTROL:
-		if (is_in)
-			allow_short = true;
 		if (!usb_endpoint_xfer_control(&ep->desc))
 		if (!usb_endpoint_xfer_control(&ep->desc))
 			return -EINVAL;
 			return -EINVAL;
 		/* min 8 byte setup packet */
 		/* min 8 byte setup packet */
@@ -1505,6 +1503,8 @@ static int proc_do_submiturb(struct usb_dev_state *ps, struct usbdevfs_urb *uurb
 			is_in = 0;
 			is_in = 0;
 			uurb->endpoint &= ~USB_DIR_IN;
 			uurb->endpoint &= ~USB_DIR_IN;
 		}
 		}
+		if (is_in)
+			allow_short = true;
 		snoop(&ps->dev->dev, "control urb: bRequestType=%02x "
 		snoop(&ps->dev->dev, "control urb: bRequestType=%02x "
 			"bRequest=%02x wValue=%04x "
 			"bRequest=%02x wValue=%04x "
 			"wIndex=%04x wLength=%04x\n",
 			"wIndex=%04x wLength=%04x\n",

+ 3 - 0
drivers/usb/gadget/function/f_mass_storage.c

@@ -221,6 +221,8 @@
 #include <linux/usb/gadget.h>
 #include <linux/usb/gadget.h>
 #include <linux/usb/composite.h>
 #include <linux/usb/composite.h>
 
 
+#include <linux/nospec.h>
+
 #include "configfs.h"
 #include "configfs.h"
 
 
 
 
@@ -3152,6 +3154,7 @@ static struct config_group *fsg_lun_make(struct config_group *group,
 	fsg_opts = to_fsg_opts(&group->cg_item);
 	fsg_opts = to_fsg_opts(&group->cg_item);
 	if (num >= FSG_MAX_LUNS)
 	if (num >= FSG_MAX_LUNS)
 		return ERR_PTR(-ERANGE);
 		return ERR_PTR(-ERANGE);
+	num = array_index_nospec(num, FSG_MAX_LUNS);
 
 
 	mutex_lock(&fsg_opts->lock);
 	mutex_lock(&fsg_opts->lock);
 	if (fsg_opts->refcnt || fsg_opts->common->luns[num]) {
 	if (fsg_opts->refcnt || fsg_opts->common->luns[num]) {

+ 4 - 2
drivers/usb/host/xhci-pci.c

@@ -179,10 +179,12 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
 		xhci->quirks |= XHCI_PME_STUCK_QUIRK;
 		xhci->quirks |= XHCI_PME_STUCK_QUIRK;
 	}
 	}
 	if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
 	if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
-		 pdev->device == PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI) {
+	    pdev->device == PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI)
 		xhci->quirks |= XHCI_SSIC_PORT_UNUSED;
 		xhci->quirks |= XHCI_SSIC_PORT_UNUSED;
+	if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
+	    (pdev->device == PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI ||
+	     pdev->device == PCI_DEVICE_ID_INTEL_APL_XHCI))
 		xhci->quirks |= XHCI_INTEL_USB_ROLE_SW;
 		xhci->quirks |= XHCI_INTEL_USB_ROLE_SW;
-	}
 	if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
 	if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
 	    (pdev->device == PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI ||
 	    (pdev->device == PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI ||
 	     pdev->device == PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_XHCI ||
 	     pdev->device == PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_XHCI ||

+ 2 - 0
drivers/usb/roles/intel-xhci-usb-role-switch.c

@@ -161,6 +161,8 @@ static int intel_xhci_usb_remove(struct platform_device *pdev)
 {
 {
 	struct intel_xhci_usb_data *data = platform_get_drvdata(pdev);
 	struct intel_xhci_usb_data *data = platform_get_drvdata(pdev);
 
 
+	pm_runtime_disable(&pdev->dev);
+
 	usb_role_switch_unregister(data->role_sw);
 	usb_role_switch_unregister(data->role_sw);
 	return 0;
 	return 0;
 }
 }

+ 42 - 15
drivers/usb/usbip/vhci_hcd.c

@@ -318,8 +318,9 @@ static int vhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
 	struct vhci_hcd	*vhci_hcd;
 	struct vhci_hcd	*vhci_hcd;
 	struct vhci	*vhci;
 	struct vhci	*vhci;
 	int             retval = 0;
 	int             retval = 0;
-	int		rhport;
+	int		rhport = -1;
 	unsigned long	flags;
 	unsigned long	flags;
+	bool invalid_rhport = false;
 
 
 	u32 prev_port_status[VHCI_HC_PORTS];
 	u32 prev_port_status[VHCI_HC_PORTS];
 
 
@@ -334,9 +335,19 @@ static int vhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
 	usbip_dbg_vhci_rh("typeReq %x wValue %x wIndex %x\n", typeReq, wValue,
 	usbip_dbg_vhci_rh("typeReq %x wValue %x wIndex %x\n", typeReq, wValue,
 			  wIndex);
 			  wIndex);
 
 
-	if (wIndex > VHCI_HC_PORTS)
-		pr_err("invalid port number %d\n", wIndex);
-	rhport = wIndex - 1;
+	/*
+	 * wIndex can be 0 for some request types (typeReq). rhport is
+	 * in valid range when wIndex >= 1 and < VHCI_HC_PORTS.
+	 *
+	 * Reference port_status[] only with valid rhport when
+	 * invalid_rhport is false.
+	 */
+	if (wIndex < 1 || wIndex > VHCI_HC_PORTS) {
+		invalid_rhport = true;
+		if (wIndex > VHCI_HC_PORTS)
+			pr_err("invalid port number %d\n", wIndex);
+	} else
+		rhport = wIndex - 1;
 
 
 	vhci_hcd = hcd_to_vhci_hcd(hcd);
 	vhci_hcd = hcd_to_vhci_hcd(hcd);
 	vhci = vhci_hcd->vhci;
 	vhci = vhci_hcd->vhci;
@@ -345,8 +356,9 @@ static int vhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
 
 
 	/* store old status and compare now and old later */
 	/* store old status and compare now and old later */
 	if (usbip_dbg_flag_vhci_rh) {
 	if (usbip_dbg_flag_vhci_rh) {
-		memcpy(prev_port_status, vhci_hcd->port_status,
-			sizeof(prev_port_status));
+		if (!invalid_rhport)
+			memcpy(prev_port_status, vhci_hcd->port_status,
+				sizeof(prev_port_status));
 	}
 	}
 
 
 	switch (typeReq) {
 	switch (typeReq) {
@@ -354,8 +366,10 @@ static int vhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
 		usbip_dbg_vhci_rh(" ClearHubFeature\n");
 		usbip_dbg_vhci_rh(" ClearHubFeature\n");
 		break;
 		break;
 	case ClearPortFeature:
 	case ClearPortFeature:
-		if (rhport < 0)
+		if (invalid_rhport) {
+			pr_err("invalid port number %d\n", wIndex);
 			goto error;
 			goto error;
+		}
 		switch (wValue) {
 		switch (wValue) {
 		case USB_PORT_FEAT_SUSPEND:
 		case USB_PORT_FEAT_SUSPEND:
 			if (hcd->speed == HCD_USB3) {
 			if (hcd->speed == HCD_USB3) {
@@ -415,9 +429,10 @@ static int vhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
 		break;
 		break;
 	case GetPortStatus:
 	case GetPortStatus:
 		usbip_dbg_vhci_rh(" GetPortStatus port %x\n", wIndex);
 		usbip_dbg_vhci_rh(" GetPortStatus port %x\n", wIndex);
-		if (wIndex < 1) {
+		if (invalid_rhport) {
 			pr_err("invalid port number %d\n", wIndex);
 			pr_err("invalid port number %d\n", wIndex);
 			retval = -EPIPE;
 			retval = -EPIPE;
+			goto error;
 		}
 		}
 
 
 		/* we do not care about resume. */
 		/* we do not care about resume. */
@@ -513,16 +528,20 @@ static int vhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
 				goto error;
 				goto error;
 			}
 			}
 
 
-			if (rhport < 0)
+			if (invalid_rhport) {
+				pr_err("invalid port number %d\n", wIndex);
 				goto error;
 				goto error;
+			}
 
 
 			vhci_hcd->port_status[rhport] |= USB_PORT_STAT_SUSPEND;
 			vhci_hcd->port_status[rhport] |= USB_PORT_STAT_SUSPEND;
 			break;
 			break;
 		case USB_PORT_FEAT_POWER:
 		case USB_PORT_FEAT_POWER:
 			usbip_dbg_vhci_rh(
 			usbip_dbg_vhci_rh(
 				" SetPortFeature: USB_PORT_FEAT_POWER\n");
 				" SetPortFeature: USB_PORT_FEAT_POWER\n");
-			if (rhport < 0)
+			if (invalid_rhport) {
+				pr_err("invalid port number %d\n", wIndex);
 				goto error;
 				goto error;
+			}
 			if (hcd->speed == HCD_USB3)
 			if (hcd->speed == HCD_USB3)
 				vhci_hcd->port_status[rhport] |= USB_SS_PORT_STAT_POWER;
 				vhci_hcd->port_status[rhport] |= USB_SS_PORT_STAT_POWER;
 			else
 			else
@@ -531,8 +550,10 @@ static int vhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
 		case USB_PORT_FEAT_BH_PORT_RESET:
 		case USB_PORT_FEAT_BH_PORT_RESET:
 			usbip_dbg_vhci_rh(
 			usbip_dbg_vhci_rh(
 				" SetPortFeature: USB_PORT_FEAT_BH_PORT_RESET\n");
 				" SetPortFeature: USB_PORT_FEAT_BH_PORT_RESET\n");
-			if (rhport < 0)
+			if (invalid_rhport) {
+				pr_err("invalid port number %d\n", wIndex);
 				goto error;
 				goto error;
+			}
 			/* Applicable only for USB3.0 hub */
 			/* Applicable only for USB3.0 hub */
 			if (hcd->speed != HCD_USB3) {
 			if (hcd->speed != HCD_USB3) {
 				pr_err("USB_PORT_FEAT_BH_PORT_RESET req not "
 				pr_err("USB_PORT_FEAT_BH_PORT_RESET req not "
@@ -543,8 +564,10 @@ static int vhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
 		case USB_PORT_FEAT_RESET:
 		case USB_PORT_FEAT_RESET:
 			usbip_dbg_vhci_rh(
 			usbip_dbg_vhci_rh(
 				" SetPortFeature: USB_PORT_FEAT_RESET\n");
 				" SetPortFeature: USB_PORT_FEAT_RESET\n");
-			if (rhport < 0)
+			if (invalid_rhport) {
+				pr_err("invalid port number %d\n", wIndex);
 				goto error;
 				goto error;
+			}
 			/* if it's already enabled, disable */
 			/* if it's already enabled, disable */
 			if (hcd->speed == HCD_USB3) {
 			if (hcd->speed == HCD_USB3) {
 				vhci_hcd->port_status[rhport] = 0;
 				vhci_hcd->port_status[rhport] = 0;
@@ -565,8 +588,10 @@ static int vhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
 		default:
 		default:
 			usbip_dbg_vhci_rh(" SetPortFeature: default %d\n",
 			usbip_dbg_vhci_rh(" SetPortFeature: default %d\n",
 					  wValue);
 					  wValue);
-			if (rhport < 0)
+			if (invalid_rhport) {
+				pr_err("invalid port number %d\n", wIndex);
 				goto error;
 				goto error;
+			}
 			if (hcd->speed == HCD_USB3) {
 			if (hcd->speed == HCD_USB3) {
 				if ((vhci_hcd->port_status[rhport] &
 				if ((vhci_hcd->port_status[rhport] &
 				     USB_SS_PORT_STAT_POWER) != 0) {
 				     USB_SS_PORT_STAT_POWER) != 0) {
@@ -608,7 +633,7 @@ error:
 	if (usbip_dbg_flag_vhci_rh) {
 	if (usbip_dbg_flag_vhci_rh) {
 		pr_debug("port %d\n", rhport);
 		pr_debug("port %d\n", rhport);
 		/* Only dump valid port status */
 		/* Only dump valid port status */
-		if (rhport >= 0) {
+		if (!invalid_rhport) {
 			dump_port_status_diff(prev_port_status[rhport],
 			dump_port_status_diff(prev_port_status[rhport],
 					      vhci_hcd->port_status[rhport],
 					      vhci_hcd->port_status[rhport],
 					      hcd->speed == HCD_USB3);
 					      hcd->speed == HCD_USB3);
@@ -618,8 +643,10 @@ error:
 
 
 	spin_unlock_irqrestore(&vhci->lock, flags);
 	spin_unlock_irqrestore(&vhci->lock, flags);
 
 
-	if ((vhci_hcd->port_status[rhport] & PORT_C_MASK) != 0)
+	if (!invalid_rhport &&
+	    (vhci_hcd->port_status[rhport] & PORT_C_MASK) != 0) {
 		usb_hcd_poll_rh_status(hcd);
 		usb_hcd_poll_rh_status(hcd);
+	}
 
 
 	return retval;
 	return retval;
 }
 }

+ 11 - 0
include/drm/drm_atomic.h

@@ -153,6 +153,17 @@ struct __drm_planes_state {
 struct __drm_crtcs_state {
 struct __drm_crtcs_state {
 	struct drm_crtc *ptr;
 	struct drm_crtc *ptr;
 	struct drm_crtc_state *state, *old_state, *new_state;
 	struct drm_crtc_state *state, *old_state, *new_state;
+
+	/**
+	 * @commit:
+	 *
+	 * A reference to the CRTC commit object that is kept for use by
+	 * drm_atomic_helper_wait_for_flip_done() after
+	 * drm_atomic_helper_commit_hw_done() is called. This ensures that a
+	 * concurrent commit won't free a commit object that is still in use.
+	 */
+	struct drm_crtc_commit *commit;
+
 	s32 __user *out_fence_ptr;
 	s32 __user *out_fence_ptr;
 	u64 last_vblank_count;
 	u64 last_vblank_count;
 };
 };

+ 20 - 4
kernel/sched/fair.c

@@ -4001,7 +4001,7 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
 	 * put back on, and if we advance min_vruntime, we'll be placed back
 	 * put back on, and if we advance min_vruntime, we'll be placed back
 	 * further than we started -- ie. we'll be penalized.
 	 * further than we started -- ie. we'll be penalized.
 	 */
 	 */
-	if ((flags & (DEQUEUE_SAVE | DEQUEUE_MOVE)) == DEQUEUE_SAVE)
+	if ((flags & (DEQUEUE_SAVE | DEQUEUE_MOVE)) != DEQUEUE_SAVE)
 		update_min_vruntime(cfs_rq);
 		update_min_vruntime(cfs_rq);
 }
 }
 
 
@@ -4476,9 +4476,13 @@ static void throttle_cfs_rq(struct cfs_rq *cfs_rq)
 
 
 	/*
 	/*
 	 * Add to the _head_ of the list, so that an already-started
 	 * Add to the _head_ of the list, so that an already-started
-	 * distribute_cfs_runtime will not see us
+	 * distribute_cfs_runtime will not see us. If disribute_cfs_runtime is
+	 * not running add to the tail so that later runqueues don't get starved.
 	 */
 	 */
-	list_add_rcu(&cfs_rq->throttled_list, &cfs_b->throttled_cfs_rq);
+	if (cfs_b->distribute_running)
+		list_add_rcu(&cfs_rq->throttled_list, &cfs_b->throttled_cfs_rq);
+	else
+		list_add_tail_rcu(&cfs_rq->throttled_list, &cfs_b->throttled_cfs_rq);
 
 
 	/*
 	/*
 	 * If we're the first throttled task, make sure the bandwidth
 	 * If we're the first throttled task, make sure the bandwidth
@@ -4622,14 +4626,16 @@ static int do_sched_cfs_period_timer(struct cfs_bandwidth *cfs_b, int overrun)
 	 * in us over-using our runtime if it is all used during this loop, but
 	 * in us over-using our runtime if it is all used during this loop, but
 	 * only by limited amounts in that extreme case.
 	 * only by limited amounts in that extreme case.
 	 */
 	 */
-	while (throttled && cfs_b->runtime > 0) {
+	while (throttled && cfs_b->runtime > 0 && !cfs_b->distribute_running) {
 		runtime = cfs_b->runtime;
 		runtime = cfs_b->runtime;
+		cfs_b->distribute_running = 1;
 		raw_spin_unlock(&cfs_b->lock);
 		raw_spin_unlock(&cfs_b->lock);
 		/* we can't nest cfs_b->lock while distributing bandwidth */
 		/* we can't nest cfs_b->lock while distributing bandwidth */
 		runtime = distribute_cfs_runtime(cfs_b, runtime,
 		runtime = distribute_cfs_runtime(cfs_b, runtime,
 						 runtime_expires);
 						 runtime_expires);
 		raw_spin_lock(&cfs_b->lock);
 		raw_spin_lock(&cfs_b->lock);
 
 
+		cfs_b->distribute_running = 0;
 		throttled = !list_empty(&cfs_b->throttled_cfs_rq);
 		throttled = !list_empty(&cfs_b->throttled_cfs_rq);
 
 
 		cfs_b->runtime -= min(runtime, cfs_b->runtime);
 		cfs_b->runtime -= min(runtime, cfs_b->runtime);
@@ -4740,6 +4746,11 @@ static void do_sched_cfs_slack_timer(struct cfs_bandwidth *cfs_b)
 
 
 	/* confirm we're still not at a refresh boundary */
 	/* confirm we're still not at a refresh boundary */
 	raw_spin_lock(&cfs_b->lock);
 	raw_spin_lock(&cfs_b->lock);
+	if (cfs_b->distribute_running) {
+		raw_spin_unlock(&cfs_b->lock);
+		return;
+	}
+
 	if (runtime_refresh_within(cfs_b, min_bandwidth_expiration)) {
 	if (runtime_refresh_within(cfs_b, min_bandwidth_expiration)) {
 		raw_spin_unlock(&cfs_b->lock);
 		raw_spin_unlock(&cfs_b->lock);
 		return;
 		return;
@@ -4749,6 +4760,9 @@ static void do_sched_cfs_slack_timer(struct cfs_bandwidth *cfs_b)
 		runtime = cfs_b->runtime;
 		runtime = cfs_b->runtime;
 
 
 	expires = cfs_b->runtime_expires;
 	expires = cfs_b->runtime_expires;
+	if (runtime)
+		cfs_b->distribute_running = 1;
+
 	raw_spin_unlock(&cfs_b->lock);
 	raw_spin_unlock(&cfs_b->lock);
 
 
 	if (!runtime)
 	if (!runtime)
@@ -4759,6 +4773,7 @@ static void do_sched_cfs_slack_timer(struct cfs_bandwidth *cfs_b)
 	raw_spin_lock(&cfs_b->lock);
 	raw_spin_lock(&cfs_b->lock);
 	if (expires == cfs_b->runtime_expires)
 	if (expires == cfs_b->runtime_expires)
 		cfs_b->runtime -= min(runtime, cfs_b->runtime);
 		cfs_b->runtime -= min(runtime, cfs_b->runtime);
+	cfs_b->distribute_running = 0;
 	raw_spin_unlock(&cfs_b->lock);
 	raw_spin_unlock(&cfs_b->lock);
 }
 }
 
 
@@ -4867,6 +4882,7 @@ void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
 	cfs_b->period_timer.function = sched_cfs_period_timer;
 	cfs_b->period_timer.function = sched_cfs_period_timer;
 	hrtimer_init(&cfs_b->slack_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
 	hrtimer_init(&cfs_b->slack_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
 	cfs_b->slack_timer.function = sched_cfs_slack_timer;
 	cfs_b->slack_timer.function = sched_cfs_slack_timer;
+	cfs_b->distribute_running = 0;
 }
 }
 
 
 static void init_cfs_rq_runtime(struct cfs_rq *cfs_rq)
 static void init_cfs_rq_runtime(struct cfs_rq *cfs_rq)

+ 2 - 0
kernel/sched/sched.h

@@ -346,6 +346,8 @@ struct cfs_bandwidth {
 	int			nr_periods;
 	int			nr_periods;
 	int			nr_throttled;
 	int			nr_throttled;
 	u64			throttled_time;
 	u64			throttled_time;
+
+	bool                    distribute_running;
 #endif
 #endif
 };
 };
 
 

+ 25 - 7
kernel/trace/trace_events_hist.c

@@ -738,16 +738,30 @@ static void free_synth_field(struct synth_field *field)
 	kfree(field);
 	kfree(field);
 }
 }
 
 
-static struct synth_field *parse_synth_field(char *field_type,
-					     char *field_name)
+static struct synth_field *parse_synth_field(int argc, char **argv,
+					     int *consumed)
 {
 {
 	struct synth_field *field;
 	struct synth_field *field;
+	const char *prefix = NULL;
+	char *field_type = argv[0], *field_name;
 	int len, ret = 0;
 	int len, ret = 0;
 	char *array;
 	char *array;
 
 
 	if (field_type[0] == ';')
 	if (field_type[0] == ';')
 		field_type++;
 		field_type++;
 
 
+	if (!strcmp(field_type, "unsigned")) {
+		if (argc < 3)
+			return ERR_PTR(-EINVAL);
+		prefix = "unsigned ";
+		field_type = argv[1];
+		field_name = argv[2];
+		*consumed = 3;
+	} else {
+		field_name = argv[1];
+		*consumed = 2;
+	}
+
 	len = strlen(field_name);
 	len = strlen(field_name);
 	if (field_name[len - 1] == ';')
 	if (field_name[len - 1] == ';')
 		field_name[len - 1] = '\0';
 		field_name[len - 1] = '\0';
@@ -760,11 +774,15 @@ static struct synth_field *parse_synth_field(char *field_type,
 	array = strchr(field_name, '[');
 	array = strchr(field_name, '[');
 	if (array)
 	if (array)
 		len += strlen(array);
 		len += strlen(array);
+	if (prefix)
+		len += strlen(prefix);
 	field->type = kzalloc(len, GFP_KERNEL);
 	field->type = kzalloc(len, GFP_KERNEL);
 	if (!field->type) {
 	if (!field->type) {
 		ret = -ENOMEM;
 		ret = -ENOMEM;
 		goto free;
 		goto free;
 	}
 	}
+	if (prefix)
+		strcat(field->type, prefix);
 	strcat(field->type, field_type);
 	strcat(field->type, field_type);
 	if (array) {
 	if (array) {
 		strcat(field->type, array);
 		strcat(field->type, array);
@@ -1009,7 +1027,7 @@ static int create_synth_event(int argc, char **argv)
 	struct synth_field *field, *fields[SYNTH_FIELDS_MAX];
 	struct synth_field *field, *fields[SYNTH_FIELDS_MAX];
 	struct synth_event *event = NULL;
 	struct synth_event *event = NULL;
 	bool delete_event = false;
 	bool delete_event = false;
-	int i, n_fields = 0, ret = 0;
+	int i, consumed = 0, n_fields = 0, ret = 0;
 	char *name;
 	char *name;
 
 
 	mutex_lock(&synth_event_mutex);
 	mutex_lock(&synth_event_mutex);
@@ -1061,16 +1079,16 @@ static int create_synth_event(int argc, char **argv)
 			goto err;
 			goto err;
 		}
 		}
 
 
-		field = parse_synth_field(argv[i], argv[i + 1]);
+		field = parse_synth_field(argc - i, &argv[i], &consumed);
 		if (IS_ERR(field)) {
 		if (IS_ERR(field)) {
 			ret = PTR_ERR(field);
 			ret = PTR_ERR(field);
 			goto err;
 			goto err;
 		}
 		}
-		fields[n_fields] = field;
-		i++; n_fields++;
+		fields[n_fields++] = field;
+		i += consumed - 1;
 	}
 	}
 
 
-	if (i < argc) {
+	if (i < argc && strcmp(argv[i], ";") != 0) {
 		ret = -EINVAL;
 		ret = -EINVAL;
 		goto err;
 		goto err;
 	}
 	}

+ 1 - 2
net/core/neighbour.c

@@ -1167,8 +1167,7 @@ int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new,
 		neigh->nud_state = new;
 		neigh->nud_state = new;
 		err = 0;
 		err = 0;
 		notify = old & NUD_VALID;
 		notify = old & NUD_VALID;
-		if (((old & (NUD_INCOMPLETE | NUD_PROBE)) ||
-		     (flags & NEIGH_UPDATE_F_ADMIN)) &&
+		if ((old & (NUD_INCOMPLETE | NUD_PROBE)) &&
 		    (new & NUD_FAILED)) {
 		    (new & NUD_FAILED)) {
 			neigh_invalidate(neigh);
 			neigh_invalidate(neigh);
 			notify = 1;
 			notify = 1;

+ 3 - 2
net/core/skbuff.c

@@ -1846,8 +1846,9 @@ int pskb_trim_rcsum_slow(struct sk_buff *skb, unsigned int len)
 	if (skb->ip_summed == CHECKSUM_COMPLETE) {
 	if (skb->ip_summed == CHECKSUM_COMPLETE) {
 		int delta = skb->len - len;
 		int delta = skb->len - len;
 
 
-		skb->csum = csum_sub(skb->csum,
-				     skb_checksum(skb, len, delta, 0));
+		skb->csum = csum_block_sub(skb->csum,
+					   skb_checksum(skb, len, delta, 0),
+					   len);
 	}
 	}
 	return __pskb_trim(skb, len);
 	return __pskb_trim(skb, len);
 }
 }

+ 4 - 2
net/ipv6/addrconf.c

@@ -4972,12 +4972,14 @@ static int in6_dump_addrs(struct inet6_dev *idev, struct sk_buff *skb,
 
 
 		/* unicast address incl. temp addr */
 		/* unicast address incl. temp addr */
 		list_for_each_entry(ifa, &idev->addr_list, if_list) {
 		list_for_each_entry(ifa, &idev->addr_list, if_list) {
-			if (++ip_idx < s_ip_idx)
-				continue;
+			if (ip_idx < s_ip_idx)
+				goto next;
 			err = inet6_fill_ifaddr(skb, ifa, fillargs);
 			err = inet6_fill_ifaddr(skb, ifa, fillargs);
 			if (err < 0)
 			if (err < 0)
 				break;
 				break;
 			nl_dump_check_consistent(cb, nlmsg_hdr(skb));
 			nl_dump_check_consistent(cb, nlmsg_hdr(skb));
+next:
+			ip_idx++;
 		}
 		}
 		break;
 		break;
 	}
 	}

+ 1 - 0
tools/arch/x86/include/uapi/asm/kvm.h

@@ -377,6 +377,7 @@ struct kvm_sync_regs {
 
 
 #define KVM_X86_QUIRK_LINT0_REENABLED	(1 << 0)
 #define KVM_X86_QUIRK_LINT0_REENABLED	(1 << 0)
 #define KVM_X86_QUIRK_CD_NW_CLEARED	(1 << 1)
 #define KVM_X86_QUIRK_CD_NW_CLEARED	(1 << 1)
+#define KVM_X86_QUIRK_LAPIC_MMIO_HOLE	(1 << 2)
 
 
 #define KVM_STATE_NESTED_GUEST_MODE	0x00000001
 #define KVM_STATE_NESTED_GUEST_MODE	0x00000001
 #define KVM_STATE_NESTED_RUN_PENDING	0x00000002
 #define KVM_STATE_NESTED_RUN_PENDING	0x00000002

+ 1 - 0
tools/include/uapi/linux/kvm.h

@@ -952,6 +952,7 @@ struct kvm_ppc_resize_hpt {
 #define KVM_CAP_S390_HPAGE_1M 156
 #define KVM_CAP_S390_HPAGE_1M 156
 #define KVM_CAP_NESTED_STATE 157
 #define KVM_CAP_NESTED_STATE 157
 #define KVM_CAP_ARM_INJECT_SERROR_ESR 158
 #define KVM_CAP_ARM_INJECT_SERROR_ESR 158
+#define KVM_CAP_MSR_PLATFORM_INFO 159
 
 
 #ifdef KVM_CAP_IRQ_ROUTING
 #ifdef KVM_CAP_IRQ_ROUTING
 
 

+ 2 - 2
tools/lib/api/fs/tracing_path.c

@@ -36,7 +36,7 @@ static const char *tracing_path_tracefs_mount(void)
 
 
 	__tracing_path_set("", mnt);
 	__tracing_path_set("", mnt);
 
 
-	return mnt;
+	return tracing_path;
 }
 }
 
 
 static const char *tracing_path_debugfs_mount(void)
 static const char *tracing_path_debugfs_mount(void)
@@ -49,7 +49,7 @@ static const char *tracing_path_debugfs_mount(void)
 
 
 	__tracing_path_set("tracing/", mnt);
 	__tracing_path_set("tracing/", mnt);
 
 
-	return mnt;
+	return tracing_path;
 }
 }
 
 
 const char *tracing_path_mount(void)
 const char *tracing_path_mount(void)

+ 1 - 1
tools/perf/Makefile.config

@@ -833,7 +833,7 @@ ifndef NO_JVMTI
     JDIR=$(shell /usr/sbin/update-java-alternatives -l | head -1 | awk '{print $$3}')
     JDIR=$(shell /usr/sbin/update-java-alternatives -l | head -1 | awk '{print $$3}')
   else
   else
     ifneq (,$(wildcard /usr/sbin/alternatives))
     ifneq (,$(wildcard /usr/sbin/alternatives))
-      JDIR=$(shell alternatives --display java | tail -1 | cut -d' ' -f 5 | sed 's%/jre/bin/java.%%g')
+      JDIR=$(shell /usr/sbin/alternatives --display java | tail -1 | cut -d' ' -f 5 | sed 's%/jre/bin/java.%%g')
     endif
     endif
   endif
   endif
   ifndef JDIR
   ifndef JDIR

+ 1 - 1
tools/perf/Makefile.perf

@@ -635,7 +635,7 @@ $(LIBPERF_IN): prepare FORCE
 $(LIB_FILE): $(LIBPERF_IN)
 $(LIB_FILE): $(LIBPERF_IN)
 	$(QUIET_AR)$(RM) $@ && $(AR) rcs $@ $(LIBPERF_IN) $(LIB_OBJS)
 	$(QUIET_AR)$(RM) $@ && $(AR) rcs $@ $(LIBPERF_IN) $(LIB_OBJS)
 
 
-LIBTRACEEVENT_FLAGS += plugin_dir=$(plugindir_SQ)
+LIBTRACEEVENT_FLAGS += plugin_dir=$(plugindir_SQ) 'EXTRA_CFLAGS=$(EXTRA_CFLAGS)' 'LDFLAGS=$(LDFLAGS)'
 
 
 $(LIBTRACEEVENT): FORCE
 $(LIBTRACEEVENT): FORCE
 	$(Q)$(MAKE) -C $(TRACE_EVENT_DIR) $(LIBTRACEEVENT_FLAGS) O=$(OUTPUT) $(OUTPUT)libtraceevent.a
 	$(Q)$(MAKE) -C $(TRACE_EVENT_DIR) $(LIBTRACEEVENT_FLAGS) O=$(OUTPUT) $(OUTPUT)libtraceevent.a

+ 1 - 0
tools/perf/builtin-report.c

@@ -981,6 +981,7 @@ int cmd_report(int argc, const char **argv)
 			.id_index	 = perf_event__process_id_index,
 			.id_index	 = perf_event__process_id_index,
 			.auxtrace_info	 = perf_event__process_auxtrace_info,
 			.auxtrace_info	 = perf_event__process_auxtrace_info,
 			.auxtrace	 = perf_event__process_auxtrace,
 			.auxtrace	 = perf_event__process_auxtrace,
+			.event_update	 = perf_event__process_event_update,
 			.feature	 = process_feature_event,
 			.feature	 = process_feature_event,
 			.ordered_events	 = true,
 			.ordered_events	 = true,
 			.ordering_requires_timestamps = true,
 			.ordering_requires_timestamps = true,

+ 8 - 8
tools/perf/pmu-events/arch/x86/ivytown/uncore-power.json

@@ -188,7 +188,7 @@
         "Counter": "0,1,2,3",
         "Counter": "0,1,2,3",
         "EventCode": "0xb",
         "EventCode": "0xb",
         "EventName": "UNC_P_FREQ_GE_1200MHZ_CYCLES",
         "EventName": "UNC_P_FREQ_GE_1200MHZ_CYCLES",
-        "Filter": "filter_band0=1200",
+        "Filter": "filter_band0=12",
         "MetricExpr": "(UNC_P_FREQ_GE_1200MHZ_CYCLES / UNC_P_CLOCKTICKS) * 100.",
         "MetricExpr": "(UNC_P_FREQ_GE_1200MHZ_CYCLES / UNC_P_CLOCKTICKS) * 100.",
         "MetricName": "freq_ge_1200mhz_cycles %",
         "MetricName": "freq_ge_1200mhz_cycles %",
         "PerPkg": "1",
         "PerPkg": "1",
@@ -199,7 +199,7 @@
         "Counter": "0,1,2,3",
         "Counter": "0,1,2,3",
         "EventCode": "0xc",
         "EventCode": "0xc",
         "EventName": "UNC_P_FREQ_GE_2000MHZ_CYCLES",
         "EventName": "UNC_P_FREQ_GE_2000MHZ_CYCLES",
-        "Filter": "filter_band1=2000",
+        "Filter": "filter_band1=20",
         "MetricExpr": "(UNC_P_FREQ_GE_2000MHZ_CYCLES / UNC_P_CLOCKTICKS) * 100.",
         "MetricExpr": "(UNC_P_FREQ_GE_2000MHZ_CYCLES / UNC_P_CLOCKTICKS) * 100.",
         "MetricName": "freq_ge_2000mhz_cycles %",
         "MetricName": "freq_ge_2000mhz_cycles %",
         "PerPkg": "1",
         "PerPkg": "1",
@@ -210,7 +210,7 @@
         "Counter": "0,1,2,3",
         "Counter": "0,1,2,3",
         "EventCode": "0xd",
         "EventCode": "0xd",
         "EventName": "UNC_P_FREQ_GE_3000MHZ_CYCLES",
         "EventName": "UNC_P_FREQ_GE_3000MHZ_CYCLES",
-        "Filter": "filter_band2=3000",
+        "Filter": "filter_band2=30",
         "MetricExpr": "(UNC_P_FREQ_GE_3000MHZ_CYCLES / UNC_P_CLOCKTICKS) * 100.",
         "MetricExpr": "(UNC_P_FREQ_GE_3000MHZ_CYCLES / UNC_P_CLOCKTICKS) * 100.",
         "MetricName": "freq_ge_3000mhz_cycles %",
         "MetricName": "freq_ge_3000mhz_cycles %",
         "PerPkg": "1",
         "PerPkg": "1",
@@ -221,7 +221,7 @@
         "Counter": "0,1,2,3",
         "Counter": "0,1,2,3",
         "EventCode": "0xe",
         "EventCode": "0xe",
         "EventName": "UNC_P_FREQ_GE_4000MHZ_CYCLES",
         "EventName": "UNC_P_FREQ_GE_4000MHZ_CYCLES",
-        "Filter": "filter_band3=4000",
+        "Filter": "filter_band3=40",
         "MetricExpr": "(UNC_P_FREQ_GE_4000MHZ_CYCLES / UNC_P_CLOCKTICKS) * 100.",
         "MetricExpr": "(UNC_P_FREQ_GE_4000MHZ_CYCLES / UNC_P_CLOCKTICKS) * 100.",
         "MetricName": "freq_ge_4000mhz_cycles %",
         "MetricName": "freq_ge_4000mhz_cycles %",
         "PerPkg": "1",
         "PerPkg": "1",
@@ -232,7 +232,7 @@
         "Counter": "0,1,2,3",
         "Counter": "0,1,2,3",
         "EventCode": "0xb",
         "EventCode": "0xb",
         "EventName": "UNC_P_FREQ_GE_1200MHZ_TRANSITIONS",
         "EventName": "UNC_P_FREQ_GE_1200MHZ_TRANSITIONS",
-        "Filter": "edge=1,filter_band0=1200",
+        "Filter": "edge=1,filter_band0=12",
         "MetricExpr": "(UNC_P_FREQ_GE_1200MHZ_CYCLES / UNC_P_CLOCKTICKS) * 100.",
         "MetricExpr": "(UNC_P_FREQ_GE_1200MHZ_CYCLES / UNC_P_CLOCKTICKS) * 100.",
         "MetricName": "freq_ge_1200mhz_cycles %",
         "MetricName": "freq_ge_1200mhz_cycles %",
         "PerPkg": "1",
         "PerPkg": "1",
@@ -243,7 +243,7 @@
         "Counter": "0,1,2,3",
         "Counter": "0,1,2,3",
         "EventCode": "0xc",
         "EventCode": "0xc",
         "EventName": "UNC_P_FREQ_GE_2000MHZ_TRANSITIONS",
         "EventName": "UNC_P_FREQ_GE_2000MHZ_TRANSITIONS",
-        "Filter": "edge=1,filter_band1=2000",
+        "Filter": "edge=1,filter_band1=20",
         "MetricExpr": "(UNC_P_FREQ_GE_2000MHZ_CYCLES / UNC_P_CLOCKTICKS) * 100.",
         "MetricExpr": "(UNC_P_FREQ_GE_2000MHZ_CYCLES / UNC_P_CLOCKTICKS) * 100.",
         "MetricName": "freq_ge_2000mhz_cycles %",
         "MetricName": "freq_ge_2000mhz_cycles %",
         "PerPkg": "1",
         "PerPkg": "1",
@@ -254,7 +254,7 @@
         "Counter": "0,1,2,3",
         "Counter": "0,1,2,3",
         "EventCode": "0xd",
         "EventCode": "0xd",
         "EventName": "UNC_P_FREQ_GE_3000MHZ_TRANSITIONS",
         "EventName": "UNC_P_FREQ_GE_3000MHZ_TRANSITIONS",
-        "Filter": "edge=1,filter_band2=4000",
+        "Filter": "edge=1,filter_band2=30",
         "MetricExpr": "(UNC_P_FREQ_GE_3000MHZ_CYCLES / UNC_P_CLOCKTICKS) * 100.",
         "MetricExpr": "(UNC_P_FREQ_GE_3000MHZ_CYCLES / UNC_P_CLOCKTICKS) * 100.",
         "MetricName": "freq_ge_3000mhz_cycles %",
         "MetricName": "freq_ge_3000mhz_cycles %",
         "PerPkg": "1",
         "PerPkg": "1",
@@ -265,7 +265,7 @@
         "Counter": "0,1,2,3",
         "Counter": "0,1,2,3",
         "EventCode": "0xe",
         "EventCode": "0xe",
         "EventName": "UNC_P_FREQ_GE_4000MHZ_TRANSITIONS",
         "EventName": "UNC_P_FREQ_GE_4000MHZ_TRANSITIONS",
-        "Filter": "edge=1,filter_band3=4000",
+        "Filter": "edge=1,filter_band3=40",
         "MetricExpr": "(UNC_P_FREQ_GE_4000MHZ_CYCLES / UNC_P_CLOCKTICKS) * 100.",
         "MetricExpr": "(UNC_P_FREQ_GE_4000MHZ_CYCLES / UNC_P_CLOCKTICKS) * 100.",
         "MetricName": "freq_ge_4000mhz_cycles %",
         "MetricName": "freq_ge_4000mhz_cycles %",
         "PerPkg": "1",
         "PerPkg": "1",

+ 8 - 8
tools/perf/pmu-events/arch/x86/jaketown/uncore-power.json

@@ -187,7 +187,7 @@
         "Counter": "0,1,2,3",
         "Counter": "0,1,2,3",
         "EventCode": "0xb",
         "EventCode": "0xb",
         "EventName": "UNC_P_FREQ_GE_1200MHZ_CYCLES",
         "EventName": "UNC_P_FREQ_GE_1200MHZ_CYCLES",
-        "Filter": "filter_band0=1200",
+        "Filter": "filter_band0=12",
         "MetricExpr": "(UNC_P_FREQ_GE_1200MHZ_CYCLES / UNC_P_CLOCKTICKS) * 100.",
         "MetricExpr": "(UNC_P_FREQ_GE_1200MHZ_CYCLES / UNC_P_CLOCKTICKS) * 100.",
         "MetricName": "freq_ge_1200mhz_cycles %",
         "MetricName": "freq_ge_1200mhz_cycles %",
         "PerPkg": "1",
         "PerPkg": "1",
@@ -198,7 +198,7 @@
         "Counter": "0,1,2,3",
         "Counter": "0,1,2,3",
         "EventCode": "0xc",
         "EventCode": "0xc",
         "EventName": "UNC_P_FREQ_GE_2000MHZ_CYCLES",
         "EventName": "UNC_P_FREQ_GE_2000MHZ_CYCLES",
-        "Filter": "filter_band1=2000",
+        "Filter": "filter_band1=20",
         "MetricExpr": "(UNC_P_FREQ_GE_2000MHZ_CYCLES / UNC_P_CLOCKTICKS) * 100.",
         "MetricExpr": "(UNC_P_FREQ_GE_2000MHZ_CYCLES / UNC_P_CLOCKTICKS) * 100.",
         "MetricName": "freq_ge_2000mhz_cycles %",
         "MetricName": "freq_ge_2000mhz_cycles %",
         "PerPkg": "1",
         "PerPkg": "1",
@@ -209,7 +209,7 @@
         "Counter": "0,1,2,3",
         "Counter": "0,1,2,3",
         "EventCode": "0xd",
         "EventCode": "0xd",
         "EventName": "UNC_P_FREQ_GE_3000MHZ_CYCLES",
         "EventName": "UNC_P_FREQ_GE_3000MHZ_CYCLES",
-        "Filter": "filter_band2=3000",
+        "Filter": "filter_band2=30",
         "MetricExpr": "(UNC_P_FREQ_GE_3000MHZ_CYCLES / UNC_P_CLOCKTICKS) * 100.",
         "MetricExpr": "(UNC_P_FREQ_GE_3000MHZ_CYCLES / UNC_P_CLOCKTICKS) * 100.",
         "MetricName": "freq_ge_3000mhz_cycles %",
         "MetricName": "freq_ge_3000mhz_cycles %",
         "PerPkg": "1",
         "PerPkg": "1",
@@ -220,7 +220,7 @@
         "Counter": "0,1,2,3",
         "Counter": "0,1,2,3",
         "EventCode": "0xe",
         "EventCode": "0xe",
         "EventName": "UNC_P_FREQ_GE_4000MHZ_CYCLES",
         "EventName": "UNC_P_FREQ_GE_4000MHZ_CYCLES",
-        "Filter": "filter_band3=4000",
+        "Filter": "filter_band3=40",
         "MetricExpr": "(UNC_P_FREQ_GE_4000MHZ_CYCLES / UNC_P_CLOCKTICKS) * 100.",
         "MetricExpr": "(UNC_P_FREQ_GE_4000MHZ_CYCLES / UNC_P_CLOCKTICKS) * 100.",
         "MetricName": "freq_ge_4000mhz_cycles %",
         "MetricName": "freq_ge_4000mhz_cycles %",
         "PerPkg": "1",
         "PerPkg": "1",
@@ -231,7 +231,7 @@
         "Counter": "0,1,2,3",
         "Counter": "0,1,2,3",
         "EventCode": "0xb",
         "EventCode": "0xb",
         "EventName": "UNC_P_FREQ_GE_1200MHZ_TRANSITIONS",
         "EventName": "UNC_P_FREQ_GE_1200MHZ_TRANSITIONS",
-        "Filter": "edge=1,filter_band0=1200",
+        "Filter": "edge=1,filter_band0=12",
         "MetricExpr": "(UNC_P_FREQ_GE_1200MHZ_CYCLES / UNC_P_CLOCKTICKS) * 100.",
         "MetricExpr": "(UNC_P_FREQ_GE_1200MHZ_CYCLES / UNC_P_CLOCKTICKS) * 100.",
         "MetricName": "freq_ge_1200mhz_cycles %",
         "MetricName": "freq_ge_1200mhz_cycles %",
         "PerPkg": "1",
         "PerPkg": "1",
@@ -242,7 +242,7 @@
         "Counter": "0,1,2,3",
         "Counter": "0,1,2,3",
         "EventCode": "0xc",
         "EventCode": "0xc",
         "EventName": "UNC_P_FREQ_GE_2000MHZ_TRANSITIONS",
         "EventName": "UNC_P_FREQ_GE_2000MHZ_TRANSITIONS",
-        "Filter": "edge=1,filter_band1=2000",
+        "Filter": "edge=1,filter_band1=20",
         "MetricExpr": "(UNC_P_FREQ_GE_2000MHZ_CYCLES / UNC_P_CLOCKTICKS) * 100.",
         "MetricExpr": "(UNC_P_FREQ_GE_2000MHZ_CYCLES / UNC_P_CLOCKTICKS) * 100.",
         "MetricName": "freq_ge_2000mhz_cycles %",
         "MetricName": "freq_ge_2000mhz_cycles %",
         "PerPkg": "1",
         "PerPkg": "1",
@@ -253,7 +253,7 @@
         "Counter": "0,1,2,3",
         "Counter": "0,1,2,3",
         "EventCode": "0xd",
         "EventCode": "0xd",
         "EventName": "UNC_P_FREQ_GE_3000MHZ_TRANSITIONS",
         "EventName": "UNC_P_FREQ_GE_3000MHZ_TRANSITIONS",
-        "Filter": "edge=1,filter_band2=4000",
+        "Filter": "edge=1,filter_band2=30",
         "MetricExpr": "(UNC_P_FREQ_GE_3000MHZ_CYCLES / UNC_P_CLOCKTICKS) * 100.",
         "MetricExpr": "(UNC_P_FREQ_GE_3000MHZ_CYCLES / UNC_P_CLOCKTICKS) * 100.",
         "MetricName": "freq_ge_3000mhz_cycles %",
         "MetricName": "freq_ge_3000mhz_cycles %",
         "PerPkg": "1",
         "PerPkg": "1",
@@ -264,7 +264,7 @@
         "Counter": "0,1,2,3",
         "Counter": "0,1,2,3",
         "EventCode": "0xe",
         "EventCode": "0xe",
         "EventName": "UNC_P_FREQ_GE_4000MHZ_TRANSITIONS",
         "EventName": "UNC_P_FREQ_GE_4000MHZ_TRANSITIONS",
-        "Filter": "edge=1,filter_band3=4000",
+        "Filter": "edge=1,filter_band3=40",
         "MetricExpr": "(UNC_P_FREQ_GE_4000MHZ_CYCLES / UNC_P_CLOCKTICKS) * 100.",
         "MetricExpr": "(UNC_P_FREQ_GE_4000MHZ_CYCLES / UNC_P_CLOCKTICKS) * 100.",
         "MetricName": "freq_ge_4000mhz_cycles %",
         "MetricName": "freq_ge_4000mhz_cycles %",
         "PerPkg": "1",
         "PerPkg": "1",

+ 3 - 19
tools/perf/util/event.c

@@ -1081,6 +1081,7 @@ void *cpu_map_data__alloc(struct cpu_map *map, size_t *size, u16 *type, int *max
 	}
 	}
 
 
 	*size += sizeof(struct cpu_map_data);
 	*size += sizeof(struct cpu_map_data);
+	*size = PERF_ALIGN(*size, sizeof(u64));
 	return zalloc(*size);
 	return zalloc(*size);
 }
 }
 
 
@@ -1560,26 +1561,9 @@ struct map *thread__find_map(struct thread *thread, u8 cpumode, u64 addr,
 
 
 		return NULL;
 		return NULL;
 	}
 	}
-try_again:
+
 	al->map = map_groups__find(mg, al->addr);
 	al->map = map_groups__find(mg, al->addr);
-	if (al->map == NULL) {
-		/*
-		 * If this is outside of all known maps, and is a negative
-		 * address, try to look it up in the kernel dso, as it might be
-		 * a vsyscall or vdso (which executes in user-mode).
-		 *
-		 * XXX This is nasty, we should have a symbol list in the
-		 * "[vdso]" dso, but for now lets use the old trick of looking
-		 * in the whole kernel symbol list.
-		 */
-		if (cpumode == PERF_RECORD_MISC_USER && machine &&
-		    mg != &machine->kmaps &&
-		    machine__kernel_ip(machine, al->addr)) {
-			mg = &machine->kmaps;
-			load_map = true;
-			goto try_again;
-		}
-	} else {
+	if (al->map != NULL) {
 		/*
 		/*
 		 * Kernel maps might be changed when loading symbols so loading
 		 * Kernel maps might be changed when loading symbols so loading
 		 * must be done prior to using kernel maps.
 		 * must be done prior to using kernel maps.

+ 3 - 0
tools/perf/util/evsel.c

@@ -1089,6 +1089,9 @@ void perf_evsel__config(struct perf_evsel *evsel, struct record_opts *opts,
 		attr->exclude_user   = 1;
 		attr->exclude_user   = 1;
 	}
 	}
 
 
+	if (evsel->own_cpus)
+		evsel->attr.read_format |= PERF_FORMAT_ID;
+
 	/*
 	/*
 	 * Apply event specific term settings,
 	 * Apply event specific term settings,
 	 * it overloads any global configuration.
 	 * it overloads any global configuration.

+ 7 - 6
tools/perf/util/pmu.c

@@ -930,13 +930,14 @@ static void pmu_format_value(unsigned long *format, __u64 value, __u64 *v,
 
 
 static __u64 pmu_format_max_value(const unsigned long *format)
 static __u64 pmu_format_max_value(const unsigned long *format)
 {
 {
-	__u64 w = 0;
-	int fbit;
-
-	for_each_set_bit(fbit, format, PERF_PMU_FORMAT_BITS)
-		w |= (1ULL << fbit);
+	int w;
 
 
-	return w;
+	w = bitmap_weight(format, PERF_PMU_FORMAT_BITS);
+	if (!w)
+		return 0;
+	if (w < 64)
+		return (1ULL << w) - 1;
+	return -1;
 }
 }
 
 
 /*
 /*

+ 3 - 0
tools/perf/util/srcline.c

@@ -85,6 +85,9 @@ static struct symbol *new_inline_sym(struct dso *dso,
 	struct symbol *inline_sym;
 	struct symbol *inline_sym;
 	char *demangled = NULL;
 	char *demangled = NULL;
 
 
+	if (!funcname)
+		funcname = "??";
+
 	if (dso) {
 	if (dso) {
 		demangled = dso__demangle_sym(dso, 0, funcname);
 		demangled = dso__demangle_sym(dso, 0, funcname);
 		if (demangled)
 		if (demangled)

+ 4 - 0
tools/testing/selftests/drivers/usb/usbip/usbip_test.sh

@@ -141,6 +141,10 @@ echo "Import devices from localhost - should work"
 src/usbip attach -r localhost -b $busid;
 src/usbip attach -r localhost -b $busid;
 echo "=============================================================="
 echo "=============================================================="
 
 
+# Wait for sysfs file to be updated. Without this sleep, usbip port
+# shows no imported devices.
+sleep 3;
+
 echo "List imported devices - expect to see imported devices";
 echo "List imported devices - expect to see imported devices";
 src/usbip port;
 src/usbip port;
 echo "=============================================================="
 echo "=============================================================="

+ 80 - 0
tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-synthetic-event-syntax.tc

@@ -0,0 +1,80 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0
+# description: event trigger - test synthetic_events syntax parser
+
+do_reset() {
+    reset_trigger
+    echo > set_event
+    clear_trace
+}
+
+fail() { #msg
+    do_reset
+    echo $1
+    exit_fail
+}
+
+if [ ! -f set_event ]; then
+    echo "event tracing is not supported"
+    exit_unsupported
+fi
+
+if [ ! -f synthetic_events ]; then
+    echo "synthetic event is not supported"
+    exit_unsupported
+fi
+
+reset_tracer
+do_reset
+
+echo "Test synthetic_events syntax parser"
+
+echo > synthetic_events
+
+# synthetic event must have a field
+! echo "myevent" >> synthetic_events
+echo "myevent u64 var1" >> synthetic_events
+
+# synthetic event must be found in synthetic_events
+grep "myevent[[:space:]]u64 var1" synthetic_events
+
+# it is not possible to add same name event
+! echo "myevent u64 var2" >> synthetic_events
+
+# Non-append open will cleanup all events and add new one
+echo "myevent u64 var2" > synthetic_events
+
+# multiple fields with different spaces
+echo "myevent u64 var1; u64 var2;" > synthetic_events
+grep "myevent[[:space:]]u64 var1; u64 var2" synthetic_events
+echo "myevent u64 var1 ; u64 var2 ;" > synthetic_events
+grep "myevent[[:space:]]u64 var1; u64 var2" synthetic_events
+echo "myevent u64 var1 ;u64 var2" > synthetic_events
+grep "myevent[[:space:]]u64 var1; u64 var2" synthetic_events
+
+# test field types
+echo "myevent u32 var" > synthetic_events
+echo "myevent u16 var" > synthetic_events
+echo "myevent u8 var" > synthetic_events
+echo "myevent s64 var" > synthetic_events
+echo "myevent s32 var" > synthetic_events
+echo "myevent s16 var" > synthetic_events
+echo "myevent s8 var" > synthetic_events
+
+echo "myevent char var" > synthetic_events
+echo "myevent int var" > synthetic_events
+echo "myevent long var" > synthetic_events
+echo "myevent pid_t var" > synthetic_events
+
+echo "myevent unsigned char var" > synthetic_events
+echo "myevent unsigned int var" > synthetic_events
+echo "myevent unsigned long var" > synthetic_events
+grep "myevent[[:space:]]unsigned long var" synthetic_events
+
+# test string type
+echo "myevent char var[10]" > synthetic_events
+grep "myevent[[:space:]]char\[10\] var" synthetic_events
+
+do_reset
+
+exit 0