Browse Source

bpf: minor cleanups on fd maps and helpers

Some minor cleanups: i) Remove the unlikely() from fd array map lookups
and let the CPU branch predictor do its job, scenarios where there is not
always a map entry are very well valid. ii) Move the attribute type check
in the bpf_perf_event_read() helper a bit earlier so it's consistent wrt
checks with bpf_perf_event_output() helper as well. iii) remove some
comments that are self-documenting in kprobe_prog_is_valid_access() and
therefore make it consistent to tp_prog_is_valid_access() as well.

Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Acked-by: Alexei Starovoitov <ast@kernel.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
Daniel Borkmann 9 years ago
parent
commit
1ca1cc98bf
2 changed files with 7 additions and 14 deletions
  1. 1 2
      kernel/bpf/core.c
  2. 6 12
      kernel/trace/bpf_trace.c

+ 1 - 2
kernel/bpf/core.c

@@ -719,14 +719,13 @@ select_insn:
 
 
 		if (unlikely(index >= array->map.max_entries))
 		if (unlikely(index >= array->map.max_entries))
 			goto out;
 			goto out;
-
 		if (unlikely(tail_call_cnt > MAX_TAIL_CALL_CNT))
 		if (unlikely(tail_call_cnt > MAX_TAIL_CALL_CNT))
 			goto out;
 			goto out;
 
 
 		tail_call_cnt++;
 		tail_call_cnt++;
 
 
 		prog = READ_ONCE(array->ptrs[index]);
 		prog = READ_ONCE(array->ptrs[index]);
-		if (unlikely(!prog))
+		if (!prog)
 			goto out;
 			goto out;
 
 
 		/* ARG1 at this point is guaranteed to point to CTX from
 		/* ARG1 at this point is guaranteed to point to CTX from

+ 6 - 12
kernel/trace/bpf_trace.c

@@ -199,19 +199,19 @@ static u64 bpf_perf_event_read(u64 r1, u64 index, u64 r3, u64 r4, u64 r5)
 		return -E2BIG;
 		return -E2BIG;
 
 
 	ee = READ_ONCE(array->ptrs[index]);
 	ee = READ_ONCE(array->ptrs[index]);
-	if (unlikely(!ee))
+	if (!ee)
 		return -ENOENT;
 		return -ENOENT;
 
 
 	event = ee->event;
 	event = ee->event;
+	if (unlikely(event->attr.type != PERF_TYPE_HARDWARE &&
+		     event->attr.type != PERF_TYPE_RAW))
+		return -EINVAL;
+
 	/* make sure event is local and doesn't have pmu::count */
 	/* make sure event is local and doesn't have pmu::count */
 	if (event->oncpu != smp_processor_id() ||
 	if (event->oncpu != smp_processor_id() ||
 	    event->pmu->count)
 	    event->pmu->count)
 		return -EINVAL;
 		return -EINVAL;
 
 
-	if (unlikely(event->attr.type != PERF_TYPE_HARDWARE &&
-		     event->attr.type != PERF_TYPE_RAW))
-		return -EINVAL;
-
 	/*
 	/*
 	 * we don't know if the function is run successfully by the
 	 * we don't know if the function is run successfully by the
 	 * return value. It can be judged in other places, such as
 	 * return value. It can be judged in other places, such as
@@ -251,7 +251,7 @@ static u64 bpf_perf_event_output(u64 r1, u64 r2, u64 flags, u64 r4, u64 size)
 		return -E2BIG;
 		return -E2BIG;
 
 
 	ee = READ_ONCE(array->ptrs[index]);
 	ee = READ_ONCE(array->ptrs[index]);
-	if (unlikely(!ee))
+	if (!ee)
 		return -ENOENT;
 		return -ENOENT;
 
 
 	event = ee->event;
 	event = ee->event;
@@ -354,18 +354,12 @@ static const struct bpf_func_proto *kprobe_prog_func_proto(enum bpf_func_id func
 static bool kprobe_prog_is_valid_access(int off, int size, enum bpf_access_type type,
 static bool kprobe_prog_is_valid_access(int off, int size, enum bpf_access_type type,
 					enum bpf_reg_type *reg_type)
 					enum bpf_reg_type *reg_type)
 {
 {
-	/* check bounds */
 	if (off < 0 || off >= sizeof(struct pt_regs))
 	if (off < 0 || off >= sizeof(struct pt_regs))
 		return false;
 		return false;
-
-	/* only read is allowed */
 	if (type != BPF_READ)
 	if (type != BPF_READ)
 		return false;
 		return false;
-
-	/* disallow misaligned access */
 	if (off % size != 0)
 	if (off % size != 0)
 		return false;
 		return false;
-
 	return true;
 	return true;
 }
 }