|
@@ -135,6 +135,58 @@ enum i40e_dyn_idx_t {
|
|
|
#define I40E_RX_DMA_ATTR \
|
|
|
(DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING)
|
|
|
|
|
|
+/* Attempt to maximize the headroom available for incoming frames. We
|
|
|
+ * use a 2K buffer for receives and need 1536/1534 to store the data for
|
|
|
+ * the frame. This leaves us with 512 bytes of room. From that we need
|
|
|
+ * to deduct the space needed for the shared info and the padding needed
|
|
|
+ * to IP align the frame.
|
|
|
+ *
|
|
|
+ * Note: For cache line sizes 256 or larger this value is going to end
|
|
|
+ * up negative. In these cases we should fall back to the legacy
|
|
|
+ * receive path.
|
|
|
+ */
|
|
|
+#if (PAGE_SIZE < 8192)
|
|
|
+#define I40E_2K_TOO_SMALL_WITH_PADDING \
|
|
|
+((NET_SKB_PAD + I40E_RXBUFFER_1536) > SKB_WITH_OVERHEAD(I40E_RXBUFFER_2048))
|
|
|
+
|
|
|
+static inline int i40e_compute_pad(int rx_buf_len)
|
|
|
+{
|
|
|
+ int page_size, pad_size;
|
|
|
+
|
|
|
+ page_size = ALIGN(rx_buf_len, PAGE_SIZE / 2);
|
|
|
+ pad_size = SKB_WITH_OVERHEAD(page_size) - rx_buf_len;
|
|
|
+
|
|
|
+ return pad_size;
|
|
|
+}
|
|
|
+
|
|
|
+static inline int i40e_skb_pad(void)
|
|
|
+{
|
|
|
+ int rx_buf_len;
|
|
|
+
|
|
|
+ /* If a 2K buffer cannot handle a standard Ethernet frame then
|
|
|
+ * optimize padding for a 3K buffer instead of a 1.5K buffer.
|
|
|
+ *
|
|
|
+ * For a 3K buffer we need to add enough padding to allow for
|
|
|
+ * tailroom due to NET_IP_ALIGN possibly shifting us out of
|
|
|
+ * cache-line alignment.
|
|
|
+ */
|
|
|
+ if (I40E_2K_TOO_SMALL_WITH_PADDING)
|
|
|
+ rx_buf_len = I40E_RXBUFFER_3072 + SKB_DATA_ALIGN(NET_IP_ALIGN);
|
|
|
+ else
|
|
|
+ rx_buf_len = I40E_RXBUFFER_1536;
|
|
|
+
|
|
|
+ /* if needed make room for NET_IP_ALIGN */
|
|
|
+ rx_buf_len -= NET_IP_ALIGN;
|
|
|
+
|
|
|
+ return i40e_compute_pad(rx_buf_len);
|
|
|
+}
|
|
|
+
|
|
|
+#define I40E_SKB_PAD i40e_skb_pad()
|
|
|
+#else
|
|
|
+#define I40E_2K_TOO_SMALL_WITH_PADDING false
|
|
|
+#define I40E_SKB_PAD (NET_SKB_PAD + NET_IP_ALIGN)
|
|
|
+#endif
|
|
|
+
|
|
|
/**
|
|
|
* i40e_test_staterr - tests bits in Rx descriptor status and error fields
|
|
|
* @rx_desc: pointer to receive descriptor (in le64 format)
|
|
@@ -341,7 +393,8 @@ struct i40e_ring {
|
|
|
u8 packet_stride;
|
|
|
|
|
|
u16 flags;
|
|
|
-#define I40E_TXR_FLAGS_WB_ON_ITR BIT(0)
|
|
|
+#define I40E_TXR_FLAGS_WB_ON_ITR BIT(0)
|
|
|
+#define I40E_RXR_FLAGS_BUILD_SKB_ENABLED BIT(1)
|
|
|
|
|
|
/* stats structs */
|
|
|
struct i40e_queue_stats stats;
|
|
@@ -369,6 +422,21 @@ struct i40e_ring {
|
|
|
*/
|
|
|
} ____cacheline_internodealigned_in_smp;
|
|
|
|
|
|
+static inline bool ring_uses_build_skb(struct i40e_ring *ring)
|
|
|
+{
|
|
|
+ return !!(ring->flags & I40E_RXR_FLAGS_BUILD_SKB_ENABLED);
|
|
|
+}
|
|
|
+
|
|
|
+static inline void set_ring_build_skb_enabled(struct i40e_ring *ring)
|
|
|
+{
|
|
|
+ ring->flags |= I40E_RXR_FLAGS_BUILD_SKB_ENABLED;
|
|
|
+}
|
|
|
+
|
|
|
+static inline void clear_ring_build_skb_enabled(struct i40e_ring *ring)
|
|
|
+{
|
|
|
+ ring->flags &= ~I40E_RXR_FLAGS_BUILD_SKB_ENABLED;
|
|
|
+}
|
|
|
+
|
|
|
enum i40e_latency_range {
|
|
|
I40E_LOWEST_LATENCY = 0,
|
|
|
I40E_LOW_LATENCY = 1,
|