|
|
@@ -4227,9 +4227,8 @@ static int process_backlog(struct napi_struct *napi, int quota)
|
|
|
#endif
|
|
|
napi->weight = weight_p;
|
|
|
local_irq_disable();
|
|
|
- while (work < quota) {
|
|
|
+ while (1) {
|
|
|
struct sk_buff *skb;
|
|
|
- unsigned int qlen;
|
|
|
|
|
|
while ((skb = __skb_dequeue(&sd->process_queue))) {
|
|
|
local_irq_enable();
|
|
|
@@ -4243,24 +4242,24 @@ static int process_backlog(struct napi_struct *napi, int quota)
|
|
|
}
|
|
|
|
|
|
rps_lock(sd);
|
|
|
- qlen = skb_queue_len(&sd->input_pkt_queue);
|
|
|
- if (qlen)
|
|
|
- skb_queue_splice_tail_init(&sd->input_pkt_queue,
|
|
|
- &sd->process_queue);
|
|
|
-
|
|
|
- if (qlen < quota - work) {
|
|
|
+ if (skb_queue_empty(&sd->input_pkt_queue)) {
|
|
|
/*
|
|
|
* Inline a custom version of __napi_complete().
|
|
|
* only current cpu owns and manipulates this napi,
|
|
|
- * and NAPI_STATE_SCHED is the only possible flag set on backlog.
|
|
|
- * we can use a plain write instead of clear_bit(),
|
|
|
+ * and NAPI_STATE_SCHED is the only possible flag set
|
|
|
+ * on backlog.
|
|
|
+ * We can use a plain write instead of clear_bit(),
|
|
|
* and we dont need an smp_mb() memory barrier.
|
|
|
*/
|
|
|
list_del(&napi->poll_list);
|
|
|
napi->state = 0;
|
|
|
+ rps_unlock(sd);
|
|
|
|
|
|
- quota = work + qlen;
|
|
|
+ break;
|
|
|
}
|
|
|
+
|
|
|
+ skb_queue_splice_tail_init(&sd->input_pkt_queue,
|
|
|
+ &sd->process_queue);
|
|
|
rps_unlock(sd);
|
|
|
}
|
|
|
local_irq_enable();
|