|
@@ -24,6 +24,7 @@
|
|
#include <linux/workqueue.h>
|
|
#include <linux/workqueue.h>
|
|
#include <linux/kthread.h>
|
|
#include <linux/kthread.h>
|
|
#include <linux/capability.h>
|
|
#include <linux/capability.h>
|
|
|
|
+#include <trace/events/xdp.h>
|
|
|
|
|
|
#include <linux/netdevice.h> /* netif_receive_skb_core */
|
|
#include <linux/netdevice.h> /* netif_receive_skb_core */
|
|
#include <linux/etherdevice.h> /* eth_type_trans */
|
|
#include <linux/etherdevice.h> /* eth_type_trans */
|
|
@@ -43,6 +44,8 @@ struct xdp_bulk_queue {
|
|
|
|
|
|
/* Struct for every remote "destination" CPU in map */
|
|
/* Struct for every remote "destination" CPU in map */
|
|
struct bpf_cpu_map_entry {
|
|
struct bpf_cpu_map_entry {
|
|
|
|
+ u32 cpu; /* kthread CPU and map index */
|
|
|
|
+ int map_id; /* Back reference to map */
|
|
u32 qsize; /* Queue size placeholder for map lookup */
|
|
u32 qsize; /* Queue size placeholder for map lookup */
|
|
|
|
|
|
/* XDP can run multiple RX-ring queues, need __percpu enqueue store */
|
|
/* XDP can run multiple RX-ring queues, need __percpu enqueue store */
|
|
@@ -280,15 +283,16 @@ static int cpu_map_kthread_run(void *data)
|
|
* kthread_stop signal until queue is empty.
|
|
* kthread_stop signal until queue is empty.
|
|
*/
|
|
*/
|
|
while (!kthread_should_stop() || !__ptr_ring_empty(rcpu->queue)) {
|
|
while (!kthread_should_stop() || !__ptr_ring_empty(rcpu->queue)) {
|
|
- unsigned int processed = 0, drops = 0;
|
|
|
|
|
|
+ unsigned int processed = 0, drops = 0, sched = 0;
|
|
struct xdp_pkt *xdp_pkt;
|
|
struct xdp_pkt *xdp_pkt;
|
|
|
|
|
|
/* Release CPU reschedule checks */
|
|
/* Release CPU reschedule checks */
|
|
if (__ptr_ring_empty(rcpu->queue)) {
|
|
if (__ptr_ring_empty(rcpu->queue)) {
|
|
__set_current_state(TASK_INTERRUPTIBLE);
|
|
__set_current_state(TASK_INTERRUPTIBLE);
|
|
schedule();
|
|
schedule();
|
|
|
|
+ sched = 1;
|
|
} else {
|
|
} else {
|
|
- cond_resched();
|
|
|
|
|
|
+ sched = cond_resched();
|
|
}
|
|
}
|
|
__set_current_state(TASK_RUNNING);
|
|
__set_current_state(TASK_RUNNING);
|
|
|
|
|
|
@@ -318,6 +322,9 @@ static int cpu_map_kthread_run(void *data)
|
|
if (++processed == 8)
|
|
if (++processed == 8)
|
|
break;
|
|
break;
|
|
}
|
|
}
|
|
|
|
+ /* Feedback loop via tracepoint */
|
|
|
|
+ trace_xdp_cpumap_kthread(rcpu->map_id, processed, drops, sched);
|
|
|
|
+
|
|
local_bh_enable(); /* resched point, may call do_softirq() */
|
|
local_bh_enable(); /* resched point, may call do_softirq() */
|
|
}
|
|
}
|
|
__set_current_state(TASK_RUNNING);
|
|
__set_current_state(TASK_RUNNING);
|
|
@@ -354,7 +361,9 @@ struct bpf_cpu_map_entry *__cpu_map_entry_alloc(u32 qsize, u32 cpu, int map_id)
|
|
if (err)
|
|
if (err)
|
|
goto free_queue;
|
|
goto free_queue;
|
|
|
|
|
|
- rcpu->qsize = qsize;
|
|
|
|
|
|
+ rcpu->cpu = cpu;
|
|
|
|
+ rcpu->map_id = map_id;
|
|
|
|
+ rcpu->qsize = qsize;
|
|
|
|
|
|
/* Setup kthread */
|
|
/* Setup kthread */
|
|
rcpu->kthread = kthread_create_on_node(cpu_map_kthread_run, rcpu, numa,
|
|
rcpu->kthread = kthread_create_on_node(cpu_map_kthread_run, rcpu, numa,
|
|
@@ -584,6 +593,8 @@ const struct bpf_map_ops cpu_map_ops = {
|
|
static int bq_flush_to_queue(struct bpf_cpu_map_entry *rcpu,
|
|
static int bq_flush_to_queue(struct bpf_cpu_map_entry *rcpu,
|
|
struct xdp_bulk_queue *bq)
|
|
struct xdp_bulk_queue *bq)
|
|
{
|
|
{
|
|
|
|
+ unsigned int processed = 0, drops = 0;
|
|
|
|
+ const int to_cpu = rcpu->cpu;
|
|
struct ptr_ring *q;
|
|
struct ptr_ring *q;
|
|
int i;
|
|
int i;
|
|
|
|
|
|
@@ -599,13 +610,16 @@ static int bq_flush_to_queue(struct bpf_cpu_map_entry *rcpu,
|
|
|
|
|
|
err = __ptr_ring_produce(q, xdp_pkt);
|
|
err = __ptr_ring_produce(q, xdp_pkt);
|
|
if (err) {
|
|
if (err) {
|
|
- /* Free xdp_pkt */
|
|
|
|
- page_frag_free(xdp_pkt);
|
|
|
|
|
|
+ drops++;
|
|
|
|
+ page_frag_free(xdp_pkt); /* Free xdp_pkt */
|
|
}
|
|
}
|
|
|
|
+ processed++;
|
|
}
|
|
}
|
|
bq->count = 0;
|
|
bq->count = 0;
|
|
spin_unlock(&q->producer_lock);
|
|
spin_unlock(&q->producer_lock);
|
|
|
|
|
|
|
|
+ /* Feedback loop via tracepoints */
|
|
|
|
+ trace_xdp_cpumap_enqueue(rcpu->map_id, processed, drops, to_cpu);
|
|
return 0;
|
|
return 0;
|
|
}
|
|
}
|
|
|
|
|