|
@@ -21,6 +21,7 @@
|
|
|
#define _VNIC_RQ_H_
|
|
|
|
|
|
#include <linux/pci.h>
|
|
|
+#include <linux/netdevice.h>
|
|
|
|
|
|
#include "vnic_dev.h"
|
|
|
#include "vnic_cq.h"
|
|
@@ -75,6 +76,12 @@ struct vnic_rq_buf {
|
|
|
uint64_t wr_id;
|
|
|
};
|
|
|
|
|
|
+enum enic_poll_state {
|
|
|
+ ENIC_POLL_STATE_IDLE,
|
|
|
+ ENIC_POLL_STATE_NAPI,
|
|
|
+ ENIC_POLL_STATE_POLL
|
|
|
+};
|
|
|
+
|
|
|
struct vnic_rq {
|
|
|
unsigned int index;
|
|
|
struct vnic_dev *vdev;
|
|
@@ -86,19 +93,7 @@ struct vnic_rq {
|
|
|
void *os_buf_head;
|
|
|
unsigned int pkts_outstanding;
|
|
|
#ifdef CONFIG_NET_RX_BUSY_POLL
|
|
|
-#define ENIC_POLL_STATE_IDLE 0
|
|
|
-#define ENIC_POLL_STATE_NAPI (1 << 0) /* NAPI owns this poll */
|
|
|
-#define ENIC_POLL_STATE_POLL (1 << 1) /* poll owns this poll */
|
|
|
-#define ENIC_POLL_STATE_NAPI_YIELD (1 << 2) /* NAPI yielded this poll */
|
|
|
-#define ENIC_POLL_STATE_POLL_YIELD (1 << 3) /* poll yielded this poll */
|
|
|
-#define ENIC_POLL_YIELD (ENIC_POLL_STATE_NAPI_YIELD | \
|
|
|
- ENIC_POLL_STATE_POLL_YIELD)
|
|
|
-#define ENIC_POLL_LOCKED (ENIC_POLL_STATE_NAPI | \
|
|
|
- ENIC_POLL_STATE_POLL)
|
|
|
-#define ENIC_POLL_USER_PEND (ENIC_POLL_STATE_POLL | \
|
|
|
- ENIC_POLL_STATE_POLL_YIELD)
|
|
|
- unsigned int bpoll_state;
|
|
|
- spinlock_t bpoll_lock;
|
|
|
+ atomic_t bpoll_state;
|
|
|
#endif /* CONFIG_NET_RX_BUSY_POLL */
|
|
|
};
|
|
|
|
|
@@ -215,76 +210,43 @@ static inline int vnic_rq_fill(struct vnic_rq *rq,
|
|
|
#ifdef CONFIG_NET_RX_BUSY_POLL
|
|
|
static inline void enic_busy_poll_init_lock(struct vnic_rq *rq)
|
|
|
{
|
|
|
- spin_lock_init(&rq->bpoll_lock);
|
|
|
- rq->bpoll_state = ENIC_POLL_STATE_IDLE;
|
|
|
+ atomic_set(&rq->bpoll_state, ENIC_POLL_STATE_IDLE);
|
|
|
}
|
|
|
|
|
|
static inline bool enic_poll_lock_napi(struct vnic_rq *rq)
|
|
|
{
|
|
|
- bool rc = true;
|
|
|
-
|
|
|
- spin_lock(&rq->bpoll_lock);
|
|
|
- if (rq->bpoll_state & ENIC_POLL_LOCKED) {
|
|
|
- WARN_ON(rq->bpoll_state & ENIC_POLL_STATE_NAPI);
|
|
|
- rq->bpoll_state |= ENIC_POLL_STATE_NAPI_YIELD;
|
|
|
- rc = false;
|
|
|
- } else {
|
|
|
- rq->bpoll_state = ENIC_POLL_STATE_NAPI;
|
|
|
- }
|
|
|
- spin_unlock(&rq->bpoll_lock);
|
|
|
+ int rc = atomic_cmpxchg(&rq->bpoll_state, ENIC_POLL_STATE_IDLE,
|
|
|
+ ENIC_POLL_STATE_NAPI);
|
|
|
|
|
|
- return rc;
|
|
|
+ return (rc == ENIC_POLL_STATE_IDLE);
|
|
|
}
|
|
|
|
|
|
-static inline bool enic_poll_unlock_napi(struct vnic_rq *rq)
|
|
|
+static inline void enic_poll_unlock_napi(struct vnic_rq *rq,
|
|
|
+ struct napi_struct *napi)
|
|
|
{
|
|
|
- bool rc = false;
|
|
|
-
|
|
|
- spin_lock(&rq->bpoll_lock);
|
|
|
- WARN_ON(rq->bpoll_state &
|
|
|
- (ENIC_POLL_STATE_POLL | ENIC_POLL_STATE_NAPI_YIELD));
|
|
|
- if (rq->bpoll_state & ENIC_POLL_STATE_POLL_YIELD)
|
|
|
- rc = true;
|
|
|
- rq->bpoll_state = ENIC_POLL_STATE_IDLE;
|
|
|
- spin_unlock(&rq->bpoll_lock);
|
|
|
-
|
|
|
- return rc;
|
|
|
+ WARN_ON(atomic_read(&rq->bpoll_state) != ENIC_POLL_STATE_NAPI);
|
|
|
+ napi_gro_flush(napi, false);
|
|
|
+ atomic_set(&rq->bpoll_state, ENIC_POLL_STATE_IDLE);
|
|
|
}
|
|
|
|
|
|
static inline bool enic_poll_lock_poll(struct vnic_rq *rq)
|
|
|
{
|
|
|
- bool rc = true;
|
|
|
-
|
|
|
- spin_lock_bh(&rq->bpoll_lock);
|
|
|
- if (rq->bpoll_state & ENIC_POLL_LOCKED) {
|
|
|
- rq->bpoll_state |= ENIC_POLL_STATE_POLL_YIELD;
|
|
|
- rc = false;
|
|
|
- } else {
|
|
|
- rq->bpoll_state |= ENIC_POLL_STATE_POLL;
|
|
|
- }
|
|
|
- spin_unlock_bh(&rq->bpoll_lock);
|
|
|
+ int rc = atomic_cmpxchg(&rq->bpoll_state, ENIC_POLL_STATE_IDLE,
|
|
|
+ ENIC_POLL_STATE_POLL);
|
|
|
|
|
|
- return rc;
|
|
|
+ return (rc == ENIC_POLL_STATE_IDLE);
|
|
|
}
|
|
|
|
|
|
-static inline bool enic_poll_unlock_poll(struct vnic_rq *rq)
|
|
|
-{
|
|
|
- bool rc = false;
|
|
|
|
|
|
- spin_lock_bh(&rq->bpoll_lock);
|
|
|
- WARN_ON(rq->bpoll_state & ENIC_POLL_STATE_NAPI);
|
|
|
- if (rq->bpoll_state & ENIC_POLL_STATE_POLL_YIELD)
|
|
|
- rc = true;
|
|
|
- rq->bpoll_state = ENIC_POLL_STATE_IDLE;
|
|
|
- spin_unlock_bh(&rq->bpoll_lock);
|
|
|
-
|
|
|
- return rc;
|
|
|
+static inline void enic_poll_unlock_poll(struct vnic_rq *rq)
|
|
|
+{
|
|
|
+ WARN_ON(atomic_read(&rq->bpoll_state) != ENIC_POLL_STATE_POLL);
|
|
|
+ atomic_set(&rq->bpoll_state, ENIC_POLL_STATE_IDLE);
|
|
|
}
|
|
|
|
|
|
static inline bool enic_poll_busy_polling(struct vnic_rq *rq)
|
|
|
{
|
|
|
- WARN_ON(!(rq->bpoll_state & ENIC_POLL_LOCKED));
|
|
|
- return rq->bpoll_state & ENIC_POLL_USER_PEND;
|
|
|
+ return atomic_read(&rq->bpoll_state) & ENIC_POLL_STATE_POLL;
|
|
|
}
|
|
|
|
|
|
#else
|
|
@@ -298,7 +260,8 @@ static inline bool enic_poll_lock_napi(struct vnic_rq *rq)
|
|
|
return true;
|
|
|
}
|
|
|
|
|
|
-static inline bool enic_poll_unlock_napi(struct vnic_rq *rq)
|
|
|
+static inline bool enic_poll_unlock_napi(struct vnic_rq *rq,
|
|
|
+ struct napi_struct *napi)
|
|
|
{
|
|
|
return false;
|
|
|
}
|