vnic_rq.h 8.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334
  1. /*
  2. * Copyright 2008-2010 Cisco Systems, Inc. All rights reserved.
  3. * Copyright 2007 Nuova Systems, Inc. All rights reserved.
  4. *
  5. * This program is free software; you may redistribute it and/or modify
  6. * it under the terms of the GNU General Public License as published by
  7. * the Free Software Foundation; version 2 of the License.
  8. *
  9. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  10. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  11. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  12. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  13. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  14. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  15. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  16. * SOFTWARE.
  17. *
  18. */
  19. #ifndef _VNIC_RQ_H_
  20. #define _VNIC_RQ_H_
  21. #include <linux/pci.h>
  22. #include "vnic_dev.h"
  23. #include "vnic_cq.h"
  24. /* Receive queue control */
  25. struct vnic_rq_ctrl {
  26. u64 ring_base; /* 0x00 */
  27. u32 ring_size; /* 0x08 */
  28. u32 pad0;
  29. u32 posted_index; /* 0x10 */
  30. u32 pad1;
  31. u32 cq_index; /* 0x18 */
  32. u32 pad2;
  33. u32 enable; /* 0x20 */
  34. u32 pad3;
  35. u32 running; /* 0x28 */
  36. u32 pad4;
  37. u32 fetch_index; /* 0x30 */
  38. u32 pad5;
  39. u32 error_interrupt_enable; /* 0x38 */
  40. u32 pad6;
  41. u32 error_interrupt_offset; /* 0x40 */
  42. u32 pad7;
  43. u32 error_status; /* 0x48 */
  44. u32 pad8;
  45. u32 dropped_packet_count; /* 0x50 */
  46. u32 pad9;
  47. u32 dropped_packet_count_rc; /* 0x58 */
  48. u32 pad10;
  49. };
  50. /* Break the vnic_rq_buf allocations into blocks of 32/64 entries */
  51. #define VNIC_RQ_BUF_MIN_BLK_ENTRIES 32
  52. #define VNIC_RQ_BUF_DFLT_BLK_ENTRIES 64
  53. #define VNIC_RQ_BUF_BLK_ENTRIES(entries) \
  54. ((unsigned int)((entries < VNIC_RQ_BUF_DFLT_BLK_ENTRIES) ? \
  55. VNIC_RQ_BUF_MIN_BLK_ENTRIES : VNIC_RQ_BUF_DFLT_BLK_ENTRIES))
  56. #define VNIC_RQ_BUF_BLK_SZ(entries) \
  57. (VNIC_RQ_BUF_BLK_ENTRIES(entries) * sizeof(struct vnic_rq_buf))
  58. #define VNIC_RQ_BUF_BLKS_NEEDED(entries) \
  59. DIV_ROUND_UP(entries, VNIC_RQ_BUF_BLK_ENTRIES(entries))
  60. #define VNIC_RQ_BUF_BLKS_MAX VNIC_RQ_BUF_BLKS_NEEDED(4096)
  61. struct vnic_rq_buf {
  62. struct vnic_rq_buf *next;
  63. dma_addr_t dma_addr;
  64. void *os_buf;
  65. unsigned int os_buf_index;
  66. unsigned int len;
  67. unsigned int index;
  68. void *desc;
  69. uint64_t wr_id;
  70. };
  71. struct vnic_rq {
  72. unsigned int index;
  73. struct vnic_dev *vdev;
  74. struct vnic_rq_ctrl __iomem *ctrl; /* memory-mapped */
  75. struct vnic_dev_ring ring;
  76. struct vnic_rq_buf *bufs[VNIC_RQ_BUF_BLKS_MAX];
  77. struct vnic_rq_buf *to_use;
  78. struct vnic_rq_buf *to_clean;
  79. void *os_buf_head;
  80. unsigned int pkts_outstanding;
  81. #ifdef CONFIG_NET_RX_BUSY_POLL
  82. #define ENIC_POLL_STATE_IDLE 0
  83. #define ENIC_POLL_STATE_NAPI (1 << 0) /* NAPI owns this poll */
  84. #define ENIC_POLL_STATE_POLL (1 << 1) /* poll owns this poll */
  85. #define ENIC_POLL_STATE_NAPI_YIELD (1 << 2) /* NAPI yielded this poll */
  86. #define ENIC_POLL_STATE_POLL_YIELD (1 << 3) /* poll yielded this poll */
  87. #define ENIC_POLL_YIELD (ENIC_POLL_STATE_NAPI_YIELD | \
  88. ENIC_POLL_STATE_POLL_YIELD)
  89. #define ENIC_POLL_LOCKED (ENIC_POLL_STATE_NAPI | \
  90. ENIC_POLL_STATE_POLL)
  91. #define ENIC_POLL_USER_PEND (ENIC_POLL_STATE_POLL | \
  92. ENIC_POLL_STATE_POLL_YIELD)
  93. unsigned int bpoll_state;
  94. spinlock_t bpoll_lock;
  95. #endif /* CONFIG_NET_RX_BUSY_POLL */
  96. };
  97. static inline unsigned int vnic_rq_desc_avail(struct vnic_rq *rq)
  98. {
  99. /* how many does SW own? */
  100. return rq->ring.desc_avail;
  101. }
  102. static inline unsigned int vnic_rq_desc_used(struct vnic_rq *rq)
  103. {
  104. /* how many does HW own? */
  105. return rq->ring.desc_count - rq->ring.desc_avail - 1;
  106. }
  107. static inline void *vnic_rq_next_desc(struct vnic_rq *rq)
  108. {
  109. return rq->to_use->desc;
  110. }
  111. static inline unsigned int vnic_rq_next_index(struct vnic_rq *rq)
  112. {
  113. return rq->to_use->index;
  114. }
  115. static inline void vnic_rq_post(struct vnic_rq *rq,
  116. void *os_buf, unsigned int os_buf_index,
  117. dma_addr_t dma_addr, unsigned int len,
  118. uint64_t wrid)
  119. {
  120. struct vnic_rq_buf *buf = rq->to_use;
  121. buf->os_buf = os_buf;
  122. buf->os_buf_index = os_buf_index;
  123. buf->dma_addr = dma_addr;
  124. buf->len = len;
  125. buf->wr_id = wrid;
  126. buf = buf->next;
  127. rq->to_use = buf;
  128. rq->ring.desc_avail--;
  129. /* Move the posted_index every nth descriptor
  130. */
  131. #ifndef VNIC_RQ_RETURN_RATE
  132. #define VNIC_RQ_RETURN_RATE 0xf /* keep 2^n - 1 */
  133. #endif
  134. if ((buf->index & VNIC_RQ_RETURN_RATE) == 0) {
  135. /* Adding write memory barrier prevents compiler and/or CPU
  136. * reordering, thus avoiding descriptor posting before
  137. * descriptor is initialized. Otherwise, hardware can read
  138. * stale descriptor fields.
  139. */
  140. wmb();
  141. iowrite32(buf->index, &rq->ctrl->posted_index);
  142. }
  143. }
  144. static inline void vnic_rq_return_descs(struct vnic_rq *rq, unsigned int count)
  145. {
  146. rq->ring.desc_avail += count;
  147. }
  148. enum desc_return_options {
  149. VNIC_RQ_RETURN_DESC,
  150. VNIC_RQ_DEFER_RETURN_DESC,
  151. };
  152. static inline void vnic_rq_service(struct vnic_rq *rq,
  153. struct cq_desc *cq_desc, u16 completed_index,
  154. int desc_return, void (*buf_service)(struct vnic_rq *rq,
  155. struct cq_desc *cq_desc, struct vnic_rq_buf *buf,
  156. int skipped, void *opaque), void *opaque)
  157. {
  158. struct vnic_rq_buf *buf;
  159. int skipped;
  160. buf = rq->to_clean;
  161. while (1) {
  162. skipped = (buf->index != completed_index);
  163. (*buf_service)(rq, cq_desc, buf, skipped, opaque);
  164. if (desc_return == VNIC_RQ_RETURN_DESC)
  165. rq->ring.desc_avail++;
  166. rq->to_clean = buf->next;
  167. if (!skipped)
  168. break;
  169. buf = rq->to_clean;
  170. }
  171. }
  172. static inline int vnic_rq_fill(struct vnic_rq *rq,
  173. int (*buf_fill)(struct vnic_rq *rq))
  174. {
  175. int err;
  176. while (vnic_rq_desc_avail(rq) > 0) {
  177. err = (*buf_fill)(rq);
  178. if (err)
  179. return err;
  180. }
  181. return 0;
  182. }
  183. #ifdef CONFIG_NET_RX_BUSY_POLL
  184. static inline void enic_busy_poll_init_lock(struct vnic_rq *rq)
  185. {
  186. spin_lock_init(&rq->bpoll_lock);
  187. rq->bpoll_state = ENIC_POLL_STATE_IDLE;
  188. }
  189. static inline bool enic_poll_lock_napi(struct vnic_rq *rq)
  190. {
  191. bool rc = true;
  192. spin_lock(&rq->bpoll_lock);
  193. if (rq->bpoll_state & ENIC_POLL_LOCKED) {
  194. WARN_ON(rq->bpoll_state & ENIC_POLL_STATE_NAPI);
  195. rq->bpoll_state |= ENIC_POLL_STATE_NAPI_YIELD;
  196. rc = false;
  197. } else {
  198. rq->bpoll_state = ENIC_POLL_STATE_NAPI;
  199. }
  200. spin_unlock(&rq->bpoll_lock);
  201. return rc;
  202. }
  203. static inline bool enic_poll_unlock_napi(struct vnic_rq *rq)
  204. {
  205. bool rc = false;
  206. spin_lock(&rq->bpoll_lock);
  207. WARN_ON(rq->bpoll_state &
  208. (ENIC_POLL_STATE_POLL | ENIC_POLL_STATE_NAPI_YIELD));
  209. if (rq->bpoll_state & ENIC_POLL_STATE_POLL_YIELD)
  210. rc = true;
  211. rq->bpoll_state = ENIC_POLL_STATE_IDLE;
  212. spin_unlock(&rq->bpoll_lock);
  213. return rc;
  214. }
  215. static inline bool enic_poll_lock_poll(struct vnic_rq *rq)
  216. {
  217. bool rc = true;
  218. spin_lock_bh(&rq->bpoll_lock);
  219. if (rq->bpoll_state & ENIC_POLL_LOCKED) {
  220. rq->bpoll_state |= ENIC_POLL_STATE_POLL_YIELD;
  221. rc = false;
  222. } else {
  223. rq->bpoll_state |= ENIC_POLL_STATE_POLL;
  224. }
  225. spin_unlock_bh(&rq->bpoll_lock);
  226. return rc;
  227. }
  228. static inline bool enic_poll_unlock_poll(struct vnic_rq *rq)
  229. {
  230. bool rc = false;
  231. spin_lock_bh(&rq->bpoll_lock);
  232. WARN_ON(rq->bpoll_state & ENIC_POLL_STATE_NAPI);
  233. if (rq->bpoll_state & ENIC_POLL_STATE_POLL_YIELD)
  234. rc = true;
  235. rq->bpoll_state = ENIC_POLL_STATE_IDLE;
  236. spin_unlock_bh(&rq->bpoll_lock);
  237. return rc;
  238. }
  239. static inline bool enic_poll_busy_polling(struct vnic_rq *rq)
  240. {
  241. WARN_ON(!(rq->bpoll_state & ENIC_POLL_LOCKED));
  242. return rq->bpoll_state & ENIC_POLL_USER_PEND;
  243. }
  244. #else
  245. static inline void enic_busy_poll_init_lock(struct vnic_rq *rq)
  246. {
  247. }
  248. static inline bool enic_poll_lock_napi(struct vnic_rq *rq)
  249. {
  250. return true;
  251. }
  252. static inline bool enic_poll_unlock_napi(struct vnic_rq *rq)
  253. {
  254. return false;
  255. }
  256. static inline bool enic_poll_lock_poll(struct vnic_rq *rq)
  257. {
  258. return false;
  259. }
  260. static inline bool enic_poll_unlock_poll(struct vnic_rq *rq)
  261. {
  262. return false;
  263. }
  264. static inline bool enic_poll_ll_polling(struct vnic_rq *rq)
  265. {
  266. return false;
  267. }
  268. #endif /* CONFIG_NET_RX_BUSY_POLL */
  269. void vnic_rq_free(struct vnic_rq *rq);
  270. int vnic_rq_alloc(struct vnic_dev *vdev, struct vnic_rq *rq, unsigned int index,
  271. unsigned int desc_count, unsigned int desc_size);
  272. void vnic_rq_init(struct vnic_rq *rq, unsigned int cq_index,
  273. unsigned int error_interrupt_enable,
  274. unsigned int error_interrupt_offset);
  275. unsigned int vnic_rq_error_status(struct vnic_rq *rq);
  276. void vnic_rq_enable(struct vnic_rq *rq);
  277. int vnic_rq_disable(struct vnic_rq *rq);
  278. void vnic_rq_clean(struct vnic_rq *rq,
  279. void (*buf_clean)(struct vnic_rq *rq, struct vnic_rq_buf *buf));
  280. #endif /* _VNIC_RQ_H_ */