|
@@ -101,6 +101,13 @@ struct xenvif_rx_meta {
|
|
|
|
|
|
#define MAX_PENDING_REQS 256
|
|
|
|
|
|
+/* It's possible for an skb to have a maximal number of frags
|
|
|
+ * but still be less than MAX_BUFFER_OFFSET in size. Thus the
|
|
|
+ * worst-case number of copy operations is MAX_SKB_FRAGS per
|
|
|
+ * ring slot.
|
|
|
+ */
|
|
|
+#define MAX_GRANT_COPY_OPS (MAX_SKB_FRAGS * XEN_NETIF_RX_RING_SIZE)
|
|
|
+
|
|
|
struct xenvif {
|
|
|
/* Unique identifier for this interface. */
|
|
|
domid_t domid;
|
|
@@ -143,13 +150,13 @@ struct xenvif {
|
|
|
*/
|
|
|
RING_IDX rx_req_cons_peek;
|
|
|
|
|
|
- /* Given MAX_BUFFER_OFFSET of 4096 the worst case is that each
|
|
|
- * head/fragment page uses 2 copy operations because it
|
|
|
- * straddles two buffers in the frontend.
|
|
|
- */
|
|
|
- struct gnttab_copy grant_copy_op[2*XEN_NETIF_RX_RING_SIZE];
|
|
|
- struct xenvif_rx_meta meta[2*XEN_NETIF_RX_RING_SIZE];
|
|
|
+ /* This array is allocated seperately as it is large */
|
|
|
+ struct gnttab_copy *grant_copy_op;
|
|
|
|
|
|
+ /* We create one meta structure per ring request we consume, so
|
|
|
+ * the maximum number is the same as the ring size.
|
|
|
+ */
|
|
|
+ struct xenvif_rx_meta meta[XEN_NETIF_RX_RING_SIZE];
|
|
|
|
|
|
u8 fe_dev_addr[6];
|
|
|
|