|
@@ -4372,7 +4372,8 @@ static int process_backlog(struct napi_struct *napi, int quota)
|
|
|
* __napi_schedule - schedule for receive
|
|
|
* @n: entry to schedule
|
|
|
*
|
|
|
- * The entry's receive function will be scheduled to run
|
|
|
+ * The entry's receive function will be scheduled to run.
|
|
|
+ * Consider using __napi_schedule_irqoff() if hard irqs are masked.
|
|
|
*/
|
|
|
void __napi_schedule(struct napi_struct *n)
|
|
|
{
|
|
@@ -4384,6 +4385,18 @@ void __napi_schedule(struct napi_struct *n)
|
|
|
}
|
|
|
EXPORT_SYMBOL(__napi_schedule);
|
|
|
|
|
|
+/**
|
|
|
+ * __napi_schedule_irqoff - schedule for receive
|
|
|
+ * @n: entry to schedule
|
|
|
+ *
|
|
|
+ * Variant of __napi_schedule() assuming hard irqs are masked
|
|
|
+ */
|
|
|
+void __napi_schedule_irqoff(struct napi_struct *n)
|
|
|
+{
|
|
|
+ ____napi_schedule(this_cpu_ptr(&softnet_data), n);
|
|
|
+}
|
|
|
+EXPORT_SYMBOL(__napi_schedule_irqoff);
|
|
|
+
|
|
|
void __napi_complete(struct napi_struct *n)
|
|
|
{
|
|
|
BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state));
|