|
@@ -386,119 +386,87 @@ struct ixgbe_q_vector {
|
|
|
char name[IFNAMSIZ + 9];
|
|
|
|
|
|
#ifdef CONFIG_NET_RX_BUSY_POLL
|
|
|
- unsigned int state;
|
|
|
-#define IXGBE_QV_STATE_IDLE 0
|
|
|
-#define IXGBE_QV_STATE_NAPI 1 /* NAPI owns this QV */
|
|
|
-#define IXGBE_QV_STATE_POLL 2 /* poll owns this QV */
|
|
|
-#define IXGBE_QV_STATE_DISABLED 4 /* QV is disabled */
|
|
|
-#define IXGBE_QV_OWNED (IXGBE_QV_STATE_NAPI | IXGBE_QV_STATE_POLL)
|
|
|
-#define IXGBE_QV_LOCKED (IXGBE_QV_OWNED | IXGBE_QV_STATE_DISABLED)
|
|
|
-#define IXGBE_QV_STATE_NAPI_YIELD 8 /* NAPI yielded this QV */
|
|
|
-#define IXGBE_QV_STATE_POLL_YIELD 16 /* poll yielded this QV */
|
|
|
-#define IXGBE_QV_YIELD (IXGBE_QV_STATE_NAPI_YIELD | IXGBE_QV_STATE_POLL_YIELD)
|
|
|
-#define IXGBE_QV_USER_PEND (IXGBE_QV_STATE_POLL | IXGBE_QV_STATE_POLL_YIELD)
|
|
|
- spinlock_t lock;
|
|
|
+ atomic_t state;
|
|
|
#endif /* CONFIG_NET_RX_BUSY_POLL */
|
|
|
|
|
|
/* for dynamic allocation of rings associated with this q_vector */
|
|
|
struct ixgbe_ring ring[0] ____cacheline_internodealigned_in_smp;
|
|
|
};
|
|
|
+
|
|
|
#ifdef CONFIG_NET_RX_BUSY_POLL
|
|
|
+enum ixgbe_qv_state_t {
|
|
|
+ IXGBE_QV_STATE_IDLE = 0,
|
|
|
+ IXGBE_QV_STATE_NAPI,
|
|
|
+ IXGBE_QV_STATE_POLL,
|
|
|
+ IXGBE_QV_STATE_DISABLE
|
|
|
+};
|
|
|
+
|
|
|
static inline void ixgbe_qv_init_lock(struct ixgbe_q_vector *q_vector)
|
|
|
{
|
|
|
-
|
|
|
- spin_lock_init(&q_vector->lock);
|
|
|
- q_vector->state = IXGBE_QV_STATE_IDLE;
|
|
|
+ /* reset state to idle */
|
|
|
+ atomic_set(&q_vector->state, IXGBE_QV_STATE_IDLE);
|
|
|
}
|
|
|
|
|
|
/* called from the device poll routine to get ownership of a q_vector */
|
|
|
static inline bool ixgbe_qv_lock_napi(struct ixgbe_q_vector *q_vector)
|
|
|
{
|
|
|
- int rc = true;
|
|
|
- spin_lock_bh(&q_vector->lock);
|
|
|
- if (q_vector->state & IXGBE_QV_LOCKED) {
|
|
|
- WARN_ON(q_vector->state & IXGBE_QV_STATE_NAPI);
|
|
|
- q_vector->state |= IXGBE_QV_STATE_NAPI_YIELD;
|
|
|
- rc = false;
|
|
|
+ int rc = atomic_cmpxchg(&q_vector->state, IXGBE_QV_STATE_IDLE,
|
|
|
+ IXGBE_QV_STATE_NAPI);
|
|
|
#ifdef BP_EXTENDED_STATS
|
|
|
+ if (rc != IXGBE_QV_STATE_IDLE)
|
|
|
q_vector->tx.ring->stats.yields++;
|
|
|
#endif
|
|
|
- } else {
|
|
|
- /* we don't care if someone yielded */
|
|
|
- q_vector->state = IXGBE_QV_STATE_NAPI;
|
|
|
- }
|
|
|
- spin_unlock_bh(&q_vector->lock);
|
|
|
- return rc;
|
|
|
+
|
|
|
+ return rc == IXGBE_QV_STATE_IDLE;
|
|
|
}
|
|
|
|
|
|
/* returns true is someone tried to get the qv while napi had it */
|
|
|
-static inline bool ixgbe_qv_unlock_napi(struct ixgbe_q_vector *q_vector)
|
|
|
+static inline void ixgbe_qv_unlock_napi(struct ixgbe_q_vector *q_vector)
|
|
|
{
|
|
|
- int rc = false;
|
|
|
- spin_lock_bh(&q_vector->lock);
|
|
|
- WARN_ON(q_vector->state & (IXGBE_QV_STATE_POLL |
|
|
|
- IXGBE_QV_STATE_NAPI_YIELD));
|
|
|
-
|
|
|
- if (q_vector->state & IXGBE_QV_STATE_POLL_YIELD)
|
|
|
- rc = true;
|
|
|
- /* will reset state to idle, unless QV is disabled */
|
|
|
- q_vector->state &= IXGBE_QV_STATE_DISABLED;
|
|
|
- spin_unlock_bh(&q_vector->lock);
|
|
|
- return rc;
|
|
|
+ WARN_ON(atomic_read(&q_vector->state) != IXGBE_QV_STATE_NAPI);
|
|
|
+
|
|
|
+ /* flush any outstanding Rx frames */
|
|
|
+ if (q_vector->napi.gro_list)
|
|
|
+ napi_gro_flush(&q_vector->napi, false);
|
|
|
+
|
|
|
+ /* reset state to idle */
|
|
|
+ atomic_set(&q_vector->state, IXGBE_QV_STATE_IDLE);
|
|
|
}
|
|
|
|
|
|
/* called from ixgbe_low_latency_poll() */
|
|
|
static inline bool ixgbe_qv_lock_poll(struct ixgbe_q_vector *q_vector)
|
|
|
{
|
|
|
- int rc = true;
|
|
|
- spin_lock_bh(&q_vector->lock);
|
|
|
- if ((q_vector->state & IXGBE_QV_LOCKED)) {
|
|
|
- q_vector->state |= IXGBE_QV_STATE_POLL_YIELD;
|
|
|
- rc = false;
|
|
|
+ int rc = atomic_cmpxchg(&q_vector->state, IXGBE_QV_STATE_IDLE,
|
|
|
+ IXGBE_QV_STATE_POLL);
|
|
|
#ifdef BP_EXTENDED_STATS
|
|
|
- q_vector->rx.ring->stats.yields++;
|
|
|
+ if (rc != IXGBE_QV_STATE_IDLE)
|
|
|
+ q_vector->tx.ring->stats.yields++;
|
|
|
#endif
|
|
|
- } else {
|
|
|
- /* preserve yield marks */
|
|
|
- q_vector->state |= IXGBE_QV_STATE_POLL;
|
|
|
- }
|
|
|
- spin_unlock_bh(&q_vector->lock);
|
|
|
- return rc;
|
|
|
+ return rc == IXGBE_QV_STATE_IDLE;
|
|
|
}
|
|
|
|
|
|
/* returns true if someone tried to get the qv while it was locked */
|
|
|
-static inline bool ixgbe_qv_unlock_poll(struct ixgbe_q_vector *q_vector)
|
|
|
+static inline void ixgbe_qv_unlock_poll(struct ixgbe_q_vector *q_vector)
|
|
|
{
|
|
|
- int rc = false;
|
|
|
- spin_lock_bh(&q_vector->lock);
|
|
|
- WARN_ON(q_vector->state & (IXGBE_QV_STATE_NAPI));
|
|
|
-
|
|
|
- if (q_vector->state & IXGBE_QV_STATE_POLL_YIELD)
|
|
|
- rc = true;
|
|
|
- /* will reset state to idle, unless QV is disabled */
|
|
|
- q_vector->state &= IXGBE_QV_STATE_DISABLED;
|
|
|
- spin_unlock_bh(&q_vector->lock);
|
|
|
- return rc;
|
|
|
+ WARN_ON(atomic_read(&q_vector->state) != IXGBE_QV_STATE_POLL);
|
|
|
+
|
|
|
+ /* reset state to idle */
|
|
|
+ atomic_set(&q_vector->state, IXGBE_QV_STATE_IDLE);
|
|
|
}
|
|
|
|
|
|
/* true if a socket is polling, even if it did not get the lock */
|
|
|
static inline bool ixgbe_qv_busy_polling(struct ixgbe_q_vector *q_vector)
|
|
|
{
|
|
|
- WARN_ON(!(q_vector->state & IXGBE_QV_OWNED));
|
|
|
- return q_vector->state & IXGBE_QV_USER_PEND;
|
|
|
+ return atomic_read(&q_vector->state) == IXGBE_QV_STATE_POLL;
|
|
|
}
|
|
|
|
|
|
/* false if QV is currently owned */
|
|
|
static inline bool ixgbe_qv_disable(struct ixgbe_q_vector *q_vector)
|
|
|
{
|
|
|
- int rc = true;
|
|
|
- spin_lock_bh(&q_vector->lock);
|
|
|
- if (q_vector->state & IXGBE_QV_OWNED)
|
|
|
- rc = false;
|
|
|
- q_vector->state |= IXGBE_QV_STATE_DISABLED;
|
|
|
- spin_unlock_bh(&q_vector->lock);
|
|
|
-
|
|
|
- return rc;
|
|
|
+ int rc = atomic_cmpxchg(&q_vector->state, IXGBE_QV_STATE_IDLE,
|
|
|
+ IXGBE_QV_STATE_DISABLE);
|
|
|
+
|
|
|
+ return rc == IXGBE_QV_STATE_IDLE;
|
|
|
}
|
|
|
|
|
|
#else /* CONFIG_NET_RX_BUSY_POLL */
|