|
@@ -67,9 +67,6 @@ static DEFINE_MUTEX(fcoe_config_mutex);
|
|
|
|
|
|
static struct workqueue_struct *fcoe_wq;
|
|
|
|
|
|
-/* fcoe_percpu_clean completion. Waiter protected by fcoe_create_mutex */
|
|
|
-static DECLARE_COMPLETION(fcoe_flush_completion);
|
|
|
-
|
|
|
/* fcoe host list */
|
|
|
/* must only by accessed under the RTNL mutex */
|
|
|
static LIST_HEAD(fcoe_hostlist);
|
|
@@ -80,7 +77,6 @@ static int fcoe_reset(struct Scsi_Host *);
|
|
|
static int fcoe_xmit(struct fc_lport *, struct fc_frame *);
|
|
|
static int fcoe_rcv(struct sk_buff *, struct net_device *,
|
|
|
struct packet_type *, struct net_device *);
|
|
|
-static int fcoe_percpu_receive_thread(void *);
|
|
|
static void fcoe_percpu_clean(struct fc_lport *);
|
|
|
static int fcoe_link_ok(struct fc_lport *);
|
|
|
|
|
@@ -107,7 +103,6 @@ static int fcoe_ddp_setup(struct fc_lport *, u16, struct scatterlist *,
|
|
|
static int fcoe_ddp_done(struct fc_lport *, u16);
|
|
|
static int fcoe_ddp_target(struct fc_lport *, u16, struct scatterlist *,
|
|
|
unsigned int);
|
|
|
-static int fcoe_cpu_callback(struct notifier_block *, unsigned long, void *);
|
|
|
static int fcoe_dcb_app_notification(struct notifier_block *notifier,
|
|
|
ulong event, void *ptr);
|
|
|
|
|
@@ -136,11 +131,6 @@ static struct notifier_block fcoe_notifier = {
|
|
|
.notifier_call = fcoe_device_notification,
|
|
|
};
|
|
|
|
|
|
-/* notification function for CPU hotplug events */
|
|
|
-static struct notifier_block fcoe_cpu_notifier = {
|
|
|
- .notifier_call = fcoe_cpu_callback,
|
|
|
-};
|
|
|
-
|
|
|
/* notification function for DCB events */
|
|
|
static struct notifier_block dcb_notifier = {
|
|
|
.notifier_call = fcoe_dcb_app_notification,
|
|
@@ -1245,152 +1235,21 @@ static int __exit fcoe_if_exit(void)
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
-/**
|
|
|
- * fcoe_percpu_thread_create() - Create a receive thread for an online CPU
|
|
|
- * @cpu: The CPU index of the CPU to create a receive thread for
|
|
|
- */
|
|
|
-static void fcoe_percpu_thread_create(unsigned int cpu)
|
|
|
+static void fcoe_thread_cleanup_local(unsigned int cpu)
|
|
|
{
|
|
|
- struct fcoe_percpu_s *p;
|
|
|
- struct task_struct *thread;
|
|
|
-
|
|
|
- p = &per_cpu(fcoe_percpu, cpu);
|
|
|
-
|
|
|
- thread = kthread_create_on_node(fcoe_percpu_receive_thread,
|
|
|
- (void *)p, cpu_to_node(cpu),
|
|
|
- "fcoethread/%d", cpu);
|
|
|
-
|
|
|
- if (likely(!IS_ERR(thread))) {
|
|
|
- kthread_bind(thread, cpu);
|
|
|
- wake_up_process(thread);
|
|
|
-
|
|
|
- spin_lock_bh(&p->fcoe_rx_list.lock);
|
|
|
- p->thread = thread;
|
|
|
- spin_unlock_bh(&p->fcoe_rx_list.lock);
|
|
|
- }
|
|
|
-}
|
|
|
-
|
|
|
-/**
|
|
|
- * fcoe_percpu_thread_destroy() - Remove the receive thread of a CPU
|
|
|
- * @cpu: The CPU index of the CPU whose receive thread is to be destroyed
|
|
|
- *
|
|
|
- * Destroys a per-CPU Rx thread. Any pending skbs are moved to the
|
|
|
- * current CPU's Rx thread. If the thread being destroyed is bound to
|
|
|
- * the CPU processing this context the skbs will be freed.
|
|
|
- */
|
|
|
-static void fcoe_percpu_thread_destroy(unsigned int cpu)
|
|
|
-{
|
|
|
- struct fcoe_percpu_s *p;
|
|
|
- struct task_struct *thread;
|
|
|
struct page *crc_eof;
|
|
|
- struct sk_buff *skb;
|
|
|
-#ifdef CONFIG_SMP
|
|
|
- struct fcoe_percpu_s *p0;
|
|
|
- unsigned targ_cpu = get_cpu();
|
|
|
-#endif /* CONFIG_SMP */
|
|
|
-
|
|
|
- FCOE_DBG("Destroying receive thread for CPU %d\n", cpu);
|
|
|
+ struct fcoe_percpu_s *p;
|
|
|
|
|
|
- /* Prevent any new skbs from being queued for this CPU. */
|
|
|
- p = &per_cpu(fcoe_percpu, cpu);
|
|
|
+ p = per_cpu_ptr(&fcoe_percpu, cpu);
|
|
|
spin_lock_bh(&p->fcoe_rx_list.lock);
|
|
|
- thread = p->thread;
|
|
|
- p->thread = NULL;
|
|
|
crc_eof = p->crc_eof_page;
|
|
|
p->crc_eof_page = NULL;
|
|
|
p->crc_eof_offset = 0;
|
|
|
spin_unlock_bh(&p->fcoe_rx_list.lock);
|
|
|
|
|
|
-#ifdef CONFIG_SMP
|
|
|
- /*
|
|
|
- * Don't bother moving the skb's if this context is running
|
|
|
- * on the same CPU that is having its thread destroyed. This
|
|
|
- * can easily happen when the module is removed.
|
|
|
- */
|
|
|
- if (cpu != targ_cpu) {
|
|
|
- p0 = &per_cpu(fcoe_percpu, targ_cpu);
|
|
|
- spin_lock_bh(&p0->fcoe_rx_list.lock);
|
|
|
- if (p0->thread) {
|
|
|
- FCOE_DBG("Moving frames from CPU %d to CPU %d\n",
|
|
|
- cpu, targ_cpu);
|
|
|
-
|
|
|
- while ((skb = __skb_dequeue(&p->fcoe_rx_list)) != NULL)
|
|
|
- __skb_queue_tail(&p0->fcoe_rx_list, skb);
|
|
|
- spin_unlock_bh(&p0->fcoe_rx_list.lock);
|
|
|
- } else {
|
|
|
- /*
|
|
|
- * The targeted CPU is not initialized and cannot accept
|
|
|
- * new skbs. Unlock the targeted CPU and drop the skbs
|
|
|
- * on the CPU that is going offline.
|
|
|
- */
|
|
|
- while ((skb = __skb_dequeue(&p->fcoe_rx_list)) != NULL)
|
|
|
- kfree_skb(skb);
|
|
|
- spin_unlock_bh(&p0->fcoe_rx_list.lock);
|
|
|
- }
|
|
|
- } else {
|
|
|
- /*
|
|
|
- * This scenario occurs when the module is being removed
|
|
|
- * and all threads are being destroyed. skbs will continue
|
|
|
- * to be shifted from the CPU thread that is being removed
|
|
|
- * to the CPU thread associated with the CPU that is processing
|
|
|
- * the module removal. Once there is only one CPU Rx thread it
|
|
|
- * will reach this case and we will drop all skbs and later
|
|
|
- * stop the thread.
|
|
|
- */
|
|
|
- spin_lock_bh(&p->fcoe_rx_list.lock);
|
|
|
- while ((skb = __skb_dequeue(&p->fcoe_rx_list)) != NULL)
|
|
|
- kfree_skb(skb);
|
|
|
- spin_unlock_bh(&p->fcoe_rx_list.lock);
|
|
|
- }
|
|
|
- put_cpu();
|
|
|
-#else
|
|
|
- /*
|
|
|
- * This a non-SMP scenario where the singular Rx thread is
|
|
|
- * being removed. Free all skbs and stop the thread.
|
|
|
- */
|
|
|
- spin_lock_bh(&p->fcoe_rx_list.lock);
|
|
|
- while ((skb = __skb_dequeue(&p->fcoe_rx_list)) != NULL)
|
|
|
- kfree_skb(skb);
|
|
|
- spin_unlock_bh(&p->fcoe_rx_list.lock);
|
|
|
-#endif
|
|
|
-
|
|
|
- if (thread)
|
|
|
- kthread_stop(thread);
|
|
|
-
|
|
|
if (crc_eof)
|
|
|
put_page(crc_eof);
|
|
|
-}
|
|
|
-
|
|
|
-/**
|
|
|
- * fcoe_cpu_callback() - Handler for CPU hotplug events
|
|
|
- * @nfb: The callback data block
|
|
|
- * @action: The event triggering the callback
|
|
|
- * @hcpu: The index of the CPU that the event is for
|
|
|
- *
|
|
|
- * This creates or destroys per-CPU data for fcoe
|
|
|
- *
|
|
|
- * Returns NOTIFY_OK always.
|
|
|
- */
|
|
|
-static int fcoe_cpu_callback(struct notifier_block *nfb,
|
|
|
- unsigned long action, void *hcpu)
|
|
|
-{
|
|
|
- unsigned cpu = (unsigned long)hcpu;
|
|
|
-
|
|
|
- switch (action) {
|
|
|
- case CPU_ONLINE:
|
|
|
- case CPU_ONLINE_FROZEN:
|
|
|
- FCOE_DBG("CPU %x online: Create Rx thread\n", cpu);
|
|
|
- fcoe_percpu_thread_create(cpu);
|
|
|
- break;
|
|
|
- case CPU_DEAD:
|
|
|
- case CPU_DEAD_FROZEN:
|
|
|
- FCOE_DBG("CPU %x offline: Remove Rx thread\n", cpu);
|
|
|
- fcoe_percpu_thread_destroy(cpu);
|
|
|
- break;
|
|
|
- default:
|
|
|
- break;
|
|
|
- }
|
|
|
- return NOTIFY_OK;
|
|
|
+ flush_work(&p->work);
|
|
|
}
|
|
|
|
|
|
/**
|
|
@@ -1509,26 +1368,6 @@ static int fcoe_rcv(struct sk_buff *skb, struct net_device *netdev,
|
|
|
|
|
|
fps = &per_cpu(fcoe_percpu, cpu);
|
|
|
spin_lock(&fps->fcoe_rx_list.lock);
|
|
|
- if (unlikely(!fps->thread)) {
|
|
|
- /*
|
|
|
- * The targeted CPU is not ready, let's target
|
|
|
- * the first CPU now. For non-SMP systems this
|
|
|
- * will check the same CPU twice.
|
|
|
- */
|
|
|
- FCOE_NETDEV_DBG(netdev, "CPU is online, but no receive thread "
|
|
|
- "ready for incoming skb- using first online "
|
|
|
- "CPU.\n");
|
|
|
-
|
|
|
- spin_unlock(&fps->fcoe_rx_list.lock);
|
|
|
- cpu = cpumask_first(cpu_online_mask);
|
|
|
- fps = &per_cpu(fcoe_percpu, cpu);
|
|
|
- spin_lock(&fps->fcoe_rx_list.lock);
|
|
|
- if (!fps->thread) {
|
|
|
- spin_unlock(&fps->fcoe_rx_list.lock);
|
|
|
- goto err;
|
|
|
- }
|
|
|
- }
|
|
|
-
|
|
|
/*
|
|
|
* We now have a valid CPU that we're targeting for
|
|
|
* this skb. We also have this receive thread locked,
|
|
@@ -1543,8 +1382,7 @@ static int fcoe_rcv(struct sk_buff *skb, struct net_device *netdev,
|
|
|
* in softirq context.
|
|
|
*/
|
|
|
__skb_queue_tail(&fps->fcoe_rx_list, skb);
|
|
|
- if (fps->thread->state == TASK_INTERRUPTIBLE)
|
|
|
- wake_up_process(fps->thread);
|
|
|
+ schedule_work_on(cpu, &fps->work);
|
|
|
spin_unlock(&fps->fcoe_rx_list.lock);
|
|
|
|
|
|
return NET_RX_SUCCESS;
|
|
@@ -1712,15 +1550,6 @@ static int fcoe_xmit(struct fc_lport *lport, struct fc_frame *fp)
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
-/**
|
|
|
- * fcoe_percpu_flush_done() - Indicate per-CPU queue flush completion
|
|
|
- * @skb: The completed skb (argument required by destructor)
|
|
|
- */
|
|
|
-static void fcoe_percpu_flush_done(struct sk_buff *skb)
|
|
|
-{
|
|
|
- complete(&fcoe_flush_completion);
|
|
|
-}
|
|
|
-
|
|
|
/**
|
|
|
* fcoe_filter_frames() - filter out bad fcoe frames, i.e. bad CRC
|
|
|
* @lport: The local port the frame was received on
|
|
@@ -1792,8 +1621,7 @@ static void fcoe_recv_frame(struct sk_buff *skb)
|
|
|
fr = fcoe_dev_from_skb(skb);
|
|
|
lport = fr->fr_dev;
|
|
|
if (unlikely(!lport)) {
|
|
|
- if (skb->destructor != fcoe_percpu_flush_done)
|
|
|
- FCOE_NETDEV_DBG(skb->dev, "NULL lport in skb\n");
|
|
|
+ FCOE_NETDEV_DBG(skb->dev, "NULL lport in skb\n");
|
|
|
kfree_skb(skb);
|
|
|
return;
|
|
|
}
|
|
@@ -1857,40 +1685,28 @@ drop:
|
|
|
}
|
|
|
|
|
|
/**
|
|
|
- * fcoe_percpu_receive_thread() - The per-CPU packet receive thread
|
|
|
- * @arg: The per-CPU context
|
|
|
+ * fcoe_receive_work() - The per-CPU worker
|
|
|
+ * @work: The work struct
|
|
|
*
|
|
|
- * Return: 0 for success
|
|
|
*/
|
|
|
-static int fcoe_percpu_receive_thread(void *arg)
|
|
|
+static void fcoe_receive_work(struct work_struct *work)
|
|
|
{
|
|
|
- struct fcoe_percpu_s *p = arg;
|
|
|
+ struct fcoe_percpu_s *p;
|
|
|
struct sk_buff *skb;
|
|
|
struct sk_buff_head tmp;
|
|
|
|
|
|
+ p = container_of(work, struct fcoe_percpu_s, work);
|
|
|
skb_queue_head_init(&tmp);
|
|
|
|
|
|
- set_user_nice(current, MIN_NICE);
|
|
|
-
|
|
|
- while (!kthread_should_stop()) {
|
|
|
-
|
|
|
- spin_lock_bh(&p->fcoe_rx_list.lock);
|
|
|
- skb_queue_splice_init(&p->fcoe_rx_list, &tmp);
|
|
|
-
|
|
|
- if (!skb_queue_len(&tmp)) {
|
|
|
- set_current_state(TASK_INTERRUPTIBLE);
|
|
|
- spin_unlock_bh(&p->fcoe_rx_list.lock);
|
|
|
- schedule();
|
|
|
- continue;
|
|
|
- }
|
|
|
-
|
|
|
- spin_unlock_bh(&p->fcoe_rx_list.lock);
|
|
|
+ spin_lock_bh(&p->fcoe_rx_list.lock);
|
|
|
+ skb_queue_splice_init(&p->fcoe_rx_list, &tmp);
|
|
|
+ spin_unlock_bh(&p->fcoe_rx_list.lock);
|
|
|
|
|
|
- while ((skb = __skb_dequeue(&tmp)) != NULL)
|
|
|
- fcoe_recv_frame(skb);
|
|
|
+ if (!skb_queue_len(&tmp))
|
|
|
+ return;
|
|
|
|
|
|
- }
|
|
|
- return 0;
|
|
|
+ while ((skb = __skb_dequeue(&tmp)))
|
|
|
+ fcoe_recv_frame(skb);
|
|
|
}
|
|
|
|
|
|
/**
|
|
@@ -2450,36 +2266,19 @@ static int fcoe_link_ok(struct fc_lport *lport)
|
|
|
*
|
|
|
* Must be called with fcoe_create_mutex held to single-thread completion.
|
|
|
*
|
|
|
- * This flushes the pending skbs by adding a new skb to each queue and
|
|
|
- * waiting until they are all freed. This assures us that not only are
|
|
|
- * there no packets that will be handled by the lport, but also that any
|
|
|
- * threads already handling packet have returned.
|
|
|
+ * This flushes the pending skbs by flush the work item for each CPU. The work
|
|
|
+ * item on each possible CPU is flushed because we may have used the per-CPU
|
|
|
+ * struct of an offline CPU.
|
|
|
*/
|
|
|
static void fcoe_percpu_clean(struct fc_lport *lport)
|
|
|
{
|
|
|
struct fcoe_percpu_s *pp;
|
|
|
- struct sk_buff *skb;
|
|
|
unsigned int cpu;
|
|
|
|
|
|
for_each_possible_cpu(cpu) {
|
|
|
pp = &per_cpu(fcoe_percpu, cpu);
|
|
|
|
|
|
- if (!pp->thread || !cpu_online(cpu))
|
|
|
- continue;
|
|
|
-
|
|
|
- skb = dev_alloc_skb(0);
|
|
|
- if (!skb)
|
|
|
- continue;
|
|
|
-
|
|
|
- skb->destructor = fcoe_percpu_flush_done;
|
|
|
-
|
|
|
- spin_lock_bh(&pp->fcoe_rx_list.lock);
|
|
|
- __skb_queue_tail(&pp->fcoe_rx_list, skb);
|
|
|
- if (pp->fcoe_rx_list.qlen == 1)
|
|
|
- wake_up_process(pp->thread);
|
|
|
- spin_unlock_bh(&pp->fcoe_rx_list.lock);
|
|
|
-
|
|
|
- wait_for_completion(&fcoe_flush_completion);
|
|
|
+ flush_work(&pp->work);
|
|
|
}
|
|
|
}
|
|
|
|
|
@@ -2625,22 +2424,11 @@ static int __init fcoe_init(void)
|
|
|
mutex_lock(&fcoe_config_mutex);
|
|
|
|
|
|
for_each_possible_cpu(cpu) {
|
|
|
- p = &per_cpu(fcoe_percpu, cpu);
|
|
|
+ p = per_cpu_ptr(&fcoe_percpu, cpu);
|
|
|
+ INIT_WORK(&p->work, fcoe_receive_work);
|
|
|
skb_queue_head_init(&p->fcoe_rx_list);
|
|
|
}
|
|
|
|
|
|
- cpu_notifier_register_begin();
|
|
|
-
|
|
|
- for_each_online_cpu(cpu)
|
|
|
- fcoe_percpu_thread_create(cpu);
|
|
|
-
|
|
|
- /* Initialize per CPU interrupt thread */
|
|
|
- rc = __register_hotcpu_notifier(&fcoe_cpu_notifier);
|
|
|
- if (rc)
|
|
|
- goto out_free;
|
|
|
-
|
|
|
- cpu_notifier_register_done();
|
|
|
-
|
|
|
/* Setup link change notification */
|
|
|
fcoe_dev_setup();
|
|
|
|
|
@@ -2652,12 +2440,6 @@ static int __init fcoe_init(void)
|
|
|
return 0;
|
|
|
|
|
|
out_free:
|
|
|
- for_each_online_cpu(cpu) {
|
|
|
- fcoe_percpu_thread_destroy(cpu);
|
|
|
- }
|
|
|
-
|
|
|
- cpu_notifier_register_done();
|
|
|
-
|
|
|
mutex_unlock(&fcoe_config_mutex);
|
|
|
destroy_workqueue(fcoe_wq);
|
|
|
return rc;
|
|
@@ -2690,14 +2472,8 @@ static void __exit fcoe_exit(void)
|
|
|
}
|
|
|
rtnl_unlock();
|
|
|
|
|
|
- cpu_notifier_register_begin();
|
|
|
-
|
|
|
- for_each_online_cpu(cpu)
|
|
|
- fcoe_percpu_thread_destroy(cpu);
|
|
|
-
|
|
|
- __unregister_hotcpu_notifier(&fcoe_cpu_notifier);
|
|
|
-
|
|
|
- cpu_notifier_register_done();
|
|
|
+ for_each_possible_cpu(cpu)
|
|
|
+ fcoe_thread_cleanup_local(cpu);
|
|
|
|
|
|
mutex_unlock(&fcoe_config_mutex);
|
|
|
|