|
@@ -61,66 +61,7 @@
|
|
|
|
|
|
#include <asm/octeon/cvmx-gmxx-defs.h>
|
|
#include <asm/octeon/cvmx-gmxx-defs.h>
|
|
|
|
|
|
-struct cvm_napi_wrapper {
|
|
|
|
- struct napi_struct napi;
|
|
|
|
-} ____cacheline_aligned_in_smp;
|
|
|
|
-
|
|
|
|
-static struct cvm_napi_wrapper cvm_oct_napi[NR_CPUS] __cacheline_aligned_in_smp;
|
|
|
|
-
|
|
|
|
-struct cvm_oct_core_state {
|
|
|
|
- int baseline_cores;
|
|
|
|
- /*
|
|
|
|
- * The number of additional cores that could be processing
|
|
|
|
- * input packets.
|
|
|
|
- */
|
|
|
|
- atomic_t available_cores;
|
|
|
|
- cpumask_t cpu_state;
|
|
|
|
-} ____cacheline_aligned_in_smp;
|
|
|
|
-
|
|
|
|
-static struct cvm_oct_core_state core_state __cacheline_aligned_in_smp;
|
|
|
|
-
|
|
|
|
-static int cvm_irq_cpu;
|
|
|
|
-
|
|
|
|
-static void cvm_oct_enable_napi(void *_)
|
|
|
|
-{
|
|
|
|
- int cpu = smp_processor_id();
|
|
|
|
- napi_schedule(&cvm_oct_napi[cpu].napi);
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
-static void cvm_oct_enable_one_cpu(void)
|
|
|
|
-{
|
|
|
|
- int v;
|
|
|
|
- int cpu;
|
|
|
|
-
|
|
|
|
- /* Check to see if more CPUs are available for receive processing... */
|
|
|
|
- v = atomic_sub_if_positive(1, &core_state.available_cores);
|
|
|
|
- if (v < 0)
|
|
|
|
- return;
|
|
|
|
-
|
|
|
|
- /* ... if a CPU is available, Turn on NAPI polling for that CPU. */
|
|
|
|
- for_each_online_cpu(cpu) {
|
|
|
|
- if (!cpu_test_and_set(cpu, core_state.cpu_state)) {
|
|
|
|
- v = smp_call_function_single(cpu, cvm_oct_enable_napi,
|
|
|
|
- NULL, 0);
|
|
|
|
- if (v)
|
|
|
|
- panic("Can't enable NAPI.");
|
|
|
|
- break;
|
|
|
|
- }
|
|
|
|
- }
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
-static void cvm_oct_no_more_work(void)
|
|
|
|
-{
|
|
|
|
- int cpu = smp_processor_id();
|
|
|
|
-
|
|
|
|
- if (cpu == cvm_irq_cpu) {
|
|
|
|
- enable_irq(OCTEON_IRQ_WORKQ0 + pow_receive_group);
|
|
|
|
- return;
|
|
|
|
- }
|
|
|
|
-
|
|
|
|
- cpu_clear(cpu, core_state.cpu_state);
|
|
|
|
- atomic_add(1, &core_state.available_cores);
|
|
|
|
-}
|
|
|
|
|
|
+static struct napi_struct cvm_oct_napi;
|
|
|
|
|
|
/**
|
|
/**
|
|
* cvm_oct_do_interrupt - interrupt handler.
|
|
* cvm_oct_do_interrupt - interrupt handler.
|
|
@@ -132,8 +73,7 @@ static irqreturn_t cvm_oct_do_interrupt(int cpl, void *dev_id)
|
|
{
|
|
{
|
|
/* Disable the IRQ and start napi_poll. */
|
|
/* Disable the IRQ and start napi_poll. */
|
|
disable_irq_nosync(OCTEON_IRQ_WORKQ0 + pow_receive_group);
|
|
disable_irq_nosync(OCTEON_IRQ_WORKQ0 + pow_receive_group);
|
|
- cvm_irq_cpu = smp_processor_id();
|
|
|
|
- cvm_oct_enable_napi(NULL);
|
|
|
|
|
|
+ napi_schedule(&cvm_oct_napi);
|
|
|
|
|
|
return IRQ_HANDLED;
|
|
return IRQ_HANDLED;
|
|
}
|
|
}
|
|
@@ -285,23 +225,6 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
|
|
cvmx_pow_work_request_async_nocheck(CVMX_SCR_SCRATCH, CVMX_POW_NO_WAIT);
|
|
cvmx_pow_work_request_async_nocheck(CVMX_SCR_SCRATCH, CVMX_POW_NO_WAIT);
|
|
did_work_request = 1;
|
|
did_work_request = 1;
|
|
}
|
|
}
|
|
-
|
|
|
|
-#ifndef CONFIG_RPS
|
|
|
|
- if (rx_count == 0) {
|
|
|
|
- /*
|
|
|
|
- * First time through, see if there is enough
|
|
|
|
- * work waiting to merit waking another
|
|
|
|
- * CPU.
|
|
|
|
- */
|
|
|
|
- union cvmx_pow_wq_int_cntx counts;
|
|
|
|
- int backlog;
|
|
|
|
- int cores_in_use = core_state.baseline_cores - atomic_read(&core_state.available_cores);
|
|
|
|
- counts.u64 = cvmx_read_csr(CVMX_POW_WQ_INT_CNTX(pow_receive_group));
|
|
|
|
- backlog = counts.s.iq_cnt + counts.s.ds_cnt;
|
|
|
|
- if (backlog > budget * cores_in_use && napi != NULL)
|
|
|
|
- cvm_oct_enable_one_cpu();
|
|
|
|
- }
|
|
|
|
-#endif
|
|
|
|
rx_count++;
|
|
rx_count++;
|
|
|
|
|
|
skb_in_hw = USE_SKBUFFS_IN_HW && work->word2.s.bufs == 1;
|
|
skb_in_hw = USE_SKBUFFS_IN_HW && work->word2.s.bufs == 1;
|
|
@@ -478,7 +401,7 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
|
|
if (rx_count < budget && napi != NULL) {
|
|
if (rx_count < budget && napi != NULL) {
|
|
/* No more work */
|
|
/* No more work */
|
|
napi_complete(napi);
|
|
napi_complete(napi);
|
|
- cvm_oct_no_more_work();
|
|
|
|
|
|
+ enable_irq(OCTEON_IRQ_WORKQ0 + pow_receive_group);
|
|
}
|
|
}
|
|
return rx_count;
|
|
return rx_count;
|
|
}
|
|
}
|
|
@@ -513,18 +436,10 @@ void cvm_oct_rx_initialize(void)
|
|
if (NULL == dev_for_napi)
|
|
if (NULL == dev_for_napi)
|
|
panic("No net_devices were allocated.");
|
|
panic("No net_devices were allocated.");
|
|
|
|
|
|
- if (max_rx_cpus >= 1 && max_rx_cpus < num_online_cpus())
|
|
|
|
- atomic_set(&core_state.available_cores, max_rx_cpus);
|
|
|
|
- else
|
|
|
|
- atomic_set(&core_state.available_cores, num_online_cpus());
|
|
|
|
- core_state.baseline_cores = atomic_read(&core_state.available_cores);
|
|
|
|
-
|
|
|
|
- core_state.cpu_state = CPU_MASK_NONE;
|
|
|
|
- for_each_possible_cpu(i) {
|
|
|
|
- netif_napi_add(dev_for_napi, &cvm_oct_napi[i].napi,
|
|
|
|
- cvm_oct_napi_poll, rx_napi_weight);
|
|
|
|
- napi_enable(&cvm_oct_napi[i].napi);
|
|
|
|
- }
|
|
|
|
|
|
+ netif_napi_add(dev_for_napi, &cvm_oct_napi, cvm_oct_napi_poll,
|
|
|
|
+ rx_napi_weight);
|
|
|
|
+ napi_enable(&cvm_oct_napi);
|
|
|
|
+
|
|
/* Register an IRQ handler to receive POW interrupts */
|
|
/* Register an IRQ handler to receive POW interrupts */
|
|
i = request_irq(OCTEON_IRQ_WORKQ0 + pow_receive_group,
|
|
i = request_irq(OCTEON_IRQ_WORKQ0 + pow_receive_group,
|
|
cvm_oct_do_interrupt, 0, "Ethernet", cvm_oct_device);
|
|
cvm_oct_do_interrupt, 0, "Ethernet", cvm_oct_device);
|
|
@@ -545,15 +460,11 @@ void cvm_oct_rx_initialize(void)
|
|
int_pc.s.pc_thr = 5;
|
|
int_pc.s.pc_thr = 5;
|
|
cvmx_write_csr(CVMX_POW_WQ_INT_PC, int_pc.u64);
|
|
cvmx_write_csr(CVMX_POW_WQ_INT_PC, int_pc.u64);
|
|
|
|
|
|
-
|
|
|
|
- /* Scheduld NAPI now. This will indirectly enable interrupts. */
|
|
|
|
- cvm_oct_enable_one_cpu();
|
|
|
|
|
|
+ /* Schedule NAPI now. This will indirectly enable the interrupt. */
|
|
|
|
+ napi_schedule(&cvm_oct_napi);
|
|
}
|
|
}
|
|
|
|
|
|
void cvm_oct_rx_shutdown(void)
|
|
void cvm_oct_rx_shutdown(void)
|
|
{
|
|
{
|
|
- int i;
|
|
|
|
- /* Shutdown all of the NAPIs */
|
|
|
|
- for_each_possible_cpu(i)
|
|
|
|
- netif_napi_del(&cvm_oct_napi[i].napi);
|
|
|
|
|
|
+ netif_napi_del(&cvm_oct_napi);
|
|
}
|
|
}
|