|
@@ -210,6 +210,10 @@
|
|
|
#define T_REMDEVALL (1<<2) /* Remove all devs */
|
|
|
#define T_REMDEV (1<<3) /* Remove one dev */
|
|
|
|
|
|
+/* Xmit modes */
|
|
|
+#define M_START_XMIT 0 /* Default normal TX */
|
|
|
+#define M_NETIF_RECEIVE 1 /* Inject packets into stack */
|
|
|
+
|
|
|
/* If lock -- protects updating of if_list */
|
|
|
#define if_lock(t) spin_lock(&(t->if_lock));
|
|
|
#define if_unlock(t) spin_unlock(&(t->if_lock));
|
|
@@ -251,13 +255,14 @@ struct pktgen_dev {
|
|
|
* we will do a random selection from within the range.
|
|
|
*/
|
|
|
__u32 flags;
|
|
|
- int removal_mark; /* non-zero => the device is marked for
|
|
|
- * removal by worker thread */
|
|
|
-
|
|
|
+ int xmit_mode;
|
|
|
int min_pkt_size;
|
|
|
int max_pkt_size;
|
|
|
int pkt_overhead; /* overhead for MPLS, VLANs, IPSEC etc */
|
|
|
int nfrags;
|
|
|
+ int removal_mark; /* non-zero => the device is marked for
|
|
|
+ * removal by worker thread */
|
|
|
+
|
|
|
struct page *page;
|
|
|
u64 delay; /* nano-seconds */
|
|
|
|
|
@@ -620,6 +625,9 @@ static int pktgen_if_show(struct seq_file *seq, void *v)
|
|
|
if (pkt_dev->node >= 0)
|
|
|
seq_printf(seq, " node: %d\n", pkt_dev->node);
|
|
|
|
|
|
+ if (pkt_dev->xmit_mode == M_NETIF_RECEIVE)
|
|
|
+ seq_puts(seq, " xmit_mode: netif_receive\n");
|
|
|
+
|
|
|
seq_puts(seq, " Flags: ");
|
|
|
|
|
|
if (pkt_dev->flags & F_IPV6)
|
|
@@ -1081,7 +1089,8 @@ static ssize_t pktgen_if_write(struct file *file,
|
|
|
if (len < 0)
|
|
|
return len;
|
|
|
if ((value > 0) &&
|
|
|
- (!(pkt_dev->odev->priv_flags & IFF_TX_SKB_SHARING)))
|
|
|
+ ((pkt_dev->xmit_mode == M_NETIF_RECEIVE) ||
|
|
|
+ !(pkt_dev->odev->priv_flags & IFF_TX_SKB_SHARING)))
|
|
|
return -ENOTSUPP;
|
|
|
i += len;
|
|
|
pkt_dev->clone_skb = value;
|
|
@@ -1134,7 +1143,7 @@ static ssize_t pktgen_if_write(struct file *file,
|
|
|
return len;
|
|
|
|
|
|
i += len;
|
|
|
- if ((value > 1) &&
|
|
|
+ if ((value > 1) && (pkt_dev->xmit_mode == M_START_XMIT) &&
|
|
|
(!(pkt_dev->odev->priv_flags & IFF_TX_SKB_SHARING)))
|
|
|
return -ENOTSUPP;
|
|
|
pkt_dev->burst = value < 1 ? 1 : value;
|
|
@@ -1160,6 +1169,35 @@ static ssize_t pktgen_if_write(struct file *file,
|
|
|
sprintf(pg_result, "ERROR: node not possible");
|
|
|
return count;
|
|
|
}
|
|
|
+ if (!strcmp(name, "xmit_mode")) {
|
|
|
+ char f[32];
|
|
|
+
|
|
|
+ memset(f, 0, 32);
|
|
|
+ len = strn_len(&user_buffer[i], sizeof(f) - 1);
|
|
|
+ if (len < 0)
|
|
|
+ return len;
|
|
|
+
|
|
|
+ if (copy_from_user(f, &user_buffer[i], len))
|
|
|
+ return -EFAULT;
|
|
|
+ i += len;
|
|
|
+
|
|
|
+ if (strcmp(f, "start_xmit") == 0) {
|
|
|
+ pkt_dev->xmit_mode = M_START_XMIT;
|
|
|
+ } else if (strcmp(f, "netif_receive") == 0) {
|
|
|
+ /* clone_skb set earlier, not supported in this mode */
|
|
|
+ if (pkt_dev->clone_skb > 0)
|
|
|
+ return -ENOTSUPP;
|
|
|
+
|
|
|
+ pkt_dev->xmit_mode = M_NETIF_RECEIVE;
|
|
|
+ } else {
|
|
|
+ sprintf(pg_result,
|
|
|
+ "xmit_mode -:%s:- unknown\nAvailable modes: %s",
|
|
|
+ f, "start_xmit, netif_receive\n");
|
|
|
+ return count;
|
|
|
+ }
|
|
|
+ sprintf(pg_result, "OK: xmit_mode=%s", f);
|
|
|
+ return count;
|
|
|
+ }
|
|
|
if (!strcmp(name, "flag")) {
|
|
|
char f[32];
|
|
|
memset(f, 0, 32);
|
|
@@ -3320,6 +3358,7 @@ static void pktgen_xmit(struct pktgen_dev *pkt_dev)
|
|
|
unsigned int burst = ACCESS_ONCE(pkt_dev->burst);
|
|
|
struct net_device *odev = pkt_dev->odev;
|
|
|
struct netdev_queue *txq;
|
|
|
+ struct sk_buff *skb;
|
|
|
int ret;
|
|
|
|
|
|
/* If device is offline, then don't send */
|
|
@@ -3357,6 +3396,38 @@ static void pktgen_xmit(struct pktgen_dev *pkt_dev)
|
|
|
if (pkt_dev->delay && pkt_dev->last_ok)
|
|
|
spin(pkt_dev, pkt_dev->next_tx);
|
|
|
|
|
|
+ if (pkt_dev->xmit_mode == M_NETIF_RECEIVE) {
|
|
|
+ skb = pkt_dev->skb;
|
|
|
+ skb->protocol = eth_type_trans(skb, skb->dev);
|
|
|
+ atomic_add(burst, &skb->users);
|
|
|
+ local_bh_disable();
|
|
|
+ do {
|
|
|
+ ret = netif_receive_skb(skb);
|
|
|
+ if (ret == NET_RX_DROP)
|
|
|
+ pkt_dev->errors++;
|
|
|
+ pkt_dev->sofar++;
|
|
|
+ pkt_dev->seq_num++;
|
|
|
+ if (atomic_read(&skb->users) != burst) {
|
|
|
+ /* skb was queued by rps/rfs or taps,
|
|
|
+ * so cannot reuse this skb
|
|
|
+ */
|
|
|
+ atomic_sub(burst - 1, &skb->users);
|
|
|
+ /* get out of the loop and wait
|
|
|
+ * until skb is consumed
|
|
|
+ */
|
|
|
+ pkt_dev->last_ok = 1;
|
|
|
+ break;
|
|
|
+ }
|
|
|
+ /* skb was 'freed' by stack, so clean few
|
|
|
+ * bits and reuse it
|
|
|
+ */
|
|
|
+#ifdef CONFIG_NET_CLS_ACT
|
|
|
+ skb->tc_verd = 0; /* reset reclass/redir ttl */
|
|
|
+#endif
|
|
|
+ } while (--burst > 0);
|
|
|
+ goto out; /* Skips xmit_mode M_START_XMIT */
|
|
|
+ }
|
|
|
+
|
|
|
txq = skb_get_tx_queue(odev, pkt_dev->skb);
|
|
|
|
|
|
local_bh_disable();
|
|
@@ -3404,6 +3475,7 @@ xmit_more:
|
|
|
unlock:
|
|
|
HARD_TX_UNLOCK(odev, txq);
|
|
|
|
|
|
+out:
|
|
|
local_bh_enable();
|
|
|
|
|
|
/* If pkt_dev->count is zero, then run forever */
|