|
@@ -2364,8 +2364,13 @@ static void ibmvnic_get_ringparam(struct net_device *netdev,
|
|
|
{
|
|
|
struct ibmvnic_adapter *adapter = netdev_priv(netdev);
|
|
|
|
|
|
- ring->rx_max_pending = adapter->max_rx_add_entries_per_subcrq;
|
|
|
- ring->tx_max_pending = adapter->max_tx_entries_per_subcrq;
|
|
|
+ if (adapter->priv_flags & IBMVNIC_USE_SERVER_MAXES) {
|
|
|
+ ring->rx_max_pending = adapter->max_rx_add_entries_per_subcrq;
|
|
|
+ ring->tx_max_pending = adapter->max_tx_entries_per_subcrq;
|
|
|
+ } else {
|
|
|
+ ring->rx_max_pending = IBMVNIC_MAX_QUEUE_SZ;
|
|
|
+ ring->tx_max_pending = IBMVNIC_MAX_QUEUE_SZ;
|
|
|
+ }
|
|
|
ring->rx_mini_max_pending = 0;
|
|
|
ring->rx_jumbo_max_pending = 0;
|
|
|
ring->rx_pending = adapter->req_rx_add_entries_per_subcrq;
|
|
@@ -2378,21 +2383,23 @@ static int ibmvnic_set_ringparam(struct net_device *netdev,
|
|
|
struct ethtool_ringparam *ring)
|
|
|
{
|
|
|
struct ibmvnic_adapter *adapter = netdev_priv(netdev);
|
|
|
+ int ret;
|
|
|
|
|
|
- if (ring->rx_pending > adapter->max_rx_add_entries_per_subcrq ||
|
|
|
- ring->tx_pending > adapter->max_tx_entries_per_subcrq) {
|
|
|
- netdev_err(netdev, "Invalid request.\n");
|
|
|
- netdev_err(netdev, "Max tx buffers = %llu\n",
|
|
|
- adapter->max_rx_add_entries_per_subcrq);
|
|
|
- netdev_err(netdev, "Max rx buffers = %llu\n",
|
|
|
- adapter->max_tx_entries_per_subcrq);
|
|
|
- return -EINVAL;
|
|
|
- }
|
|
|
-
|
|
|
+ ret = 0;
|
|
|
adapter->desired.rx_entries = ring->rx_pending;
|
|
|
adapter->desired.tx_entries = ring->tx_pending;
|
|
|
|
|
|
- return wait_for_reset(adapter);
|
|
|
+ ret = wait_for_reset(adapter);
|
|
|
+
|
|
|
+ if (!ret &&
|
|
|
+ (adapter->req_rx_add_entries_per_subcrq != ring->rx_pending ||
|
|
|
+ adapter->req_tx_entries_per_subcrq != ring->tx_pending))
|
|
|
+ netdev_info(netdev,
|
|
|
+ "Could not match full ringsize request. Requested: RX %d, TX %d; Allowed: RX %llu, TX %llu\n",
|
|
|
+ ring->rx_pending, ring->tx_pending,
|
|
|
+ adapter->req_rx_add_entries_per_subcrq,
|
|
|
+ adapter->req_tx_entries_per_subcrq);
|
|
|
+ return ret;
|
|
|
}
|
|
|
|
|
|
static void ibmvnic_get_channels(struct net_device *netdev,
|
|
@@ -2400,8 +2407,14 @@ static void ibmvnic_get_channels(struct net_device *netdev,
|
|
|
{
|
|
|
struct ibmvnic_adapter *adapter = netdev_priv(netdev);
|
|
|
|
|
|
- channels->max_rx = adapter->max_rx_queues;
|
|
|
- channels->max_tx = adapter->max_tx_queues;
|
|
|
+ if (adapter->priv_flags & IBMVNIC_USE_SERVER_MAXES) {
|
|
|
+ channels->max_rx = adapter->max_rx_queues;
|
|
|
+ channels->max_tx = adapter->max_tx_queues;
|
|
|
+ } else {
|
|
|
+ channels->max_rx = IBMVNIC_MAX_QUEUES;
|
|
|
+ channels->max_tx = IBMVNIC_MAX_QUEUES;
|
|
|
+ }
|
|
|
+
|
|
|
channels->max_other = 0;
|
|
|
channels->max_combined = 0;
|
|
|
channels->rx_count = adapter->req_rx_queues;
|
|
@@ -2414,11 +2427,23 @@ static int ibmvnic_set_channels(struct net_device *netdev,
|
|
|
struct ethtool_channels *channels)
|
|
|
{
|
|
|
struct ibmvnic_adapter *adapter = netdev_priv(netdev);
|
|
|
+ int ret;
|
|
|
|
|
|
+ ret = 0;
|
|
|
adapter->desired.rx_queues = channels->rx_count;
|
|
|
adapter->desired.tx_queues = channels->tx_count;
|
|
|
|
|
|
- return wait_for_reset(adapter);
|
|
|
+ ret = wait_for_reset(adapter);
|
|
|
+
|
|
|
+ if (!ret &&
|
|
|
+ (adapter->req_rx_queues != channels->rx_count ||
|
|
|
+ adapter->req_tx_queues != channels->tx_count))
|
|
|
+ netdev_info(netdev,
|
|
|
+ "Could not match full channels request. Requested: RX %d, TX %d; Allowed: RX %llu, TX %llu\n",
|
|
|
+ channels->rx_count, channels->tx_count,
|
|
|
+ adapter->req_rx_queues, adapter->req_tx_queues);
|
|
|
+ return ret;
|
|
|
+
|
|
|
}
|
|
|
|
|
|
static void ibmvnic_get_strings(struct net_device *dev, u32 stringset, u8 *data)
|
|
@@ -2426,32 +2451,43 @@ static void ibmvnic_get_strings(struct net_device *dev, u32 stringset, u8 *data)
|
|
|
struct ibmvnic_adapter *adapter = netdev_priv(dev);
|
|
|
int i;
|
|
|
|
|
|
- if (stringset != ETH_SS_STATS)
|
|
|
- return;
|
|
|
+ switch (stringset) {
|
|
|
+ case ETH_SS_STATS:
|
|
|
+ for (i = 0; i < ARRAY_SIZE(ibmvnic_stats);
|
|
|
+ i++, data += ETH_GSTRING_LEN)
|
|
|
+ memcpy(data, ibmvnic_stats[i].name, ETH_GSTRING_LEN);
|
|
|
|
|
|
- for (i = 0; i < ARRAY_SIZE(ibmvnic_stats); i++, data += ETH_GSTRING_LEN)
|
|
|
- memcpy(data, ibmvnic_stats[i].name, ETH_GSTRING_LEN);
|
|
|
+ for (i = 0; i < adapter->req_tx_queues; i++) {
|
|
|
+ snprintf(data, ETH_GSTRING_LEN, "tx%d_packets", i);
|
|
|
+ data += ETH_GSTRING_LEN;
|
|
|
|
|
|
- for (i = 0; i < adapter->req_tx_queues; i++) {
|
|
|
- snprintf(data, ETH_GSTRING_LEN, "tx%d_packets", i);
|
|
|
- data += ETH_GSTRING_LEN;
|
|
|
+ snprintf(data, ETH_GSTRING_LEN, "tx%d_bytes", i);
|
|
|
+ data += ETH_GSTRING_LEN;
|
|
|
|
|
|
- snprintf(data, ETH_GSTRING_LEN, "tx%d_bytes", i);
|
|
|
- data += ETH_GSTRING_LEN;
|
|
|
+ snprintf(data, ETH_GSTRING_LEN,
|
|
|
+ "tx%d_dropped_packets", i);
|
|
|
+ data += ETH_GSTRING_LEN;
|
|
|
+ }
|
|
|
|
|
|
- snprintf(data, ETH_GSTRING_LEN, "tx%d_dropped_packets", i);
|
|
|
- data += ETH_GSTRING_LEN;
|
|
|
- }
|
|
|
+ for (i = 0; i < adapter->req_rx_queues; i++) {
|
|
|
+ snprintf(data, ETH_GSTRING_LEN, "rx%d_packets", i);
|
|
|
+ data += ETH_GSTRING_LEN;
|
|
|
|
|
|
- for (i = 0; i < adapter->req_rx_queues; i++) {
|
|
|
- snprintf(data, ETH_GSTRING_LEN, "rx%d_packets", i);
|
|
|
- data += ETH_GSTRING_LEN;
|
|
|
+ snprintf(data, ETH_GSTRING_LEN, "rx%d_bytes", i);
|
|
|
+ data += ETH_GSTRING_LEN;
|
|
|
|
|
|
- snprintf(data, ETH_GSTRING_LEN, "rx%d_bytes", i);
|
|
|
- data += ETH_GSTRING_LEN;
|
|
|
+ snprintf(data, ETH_GSTRING_LEN, "rx%d_interrupts", i);
|
|
|
+ data += ETH_GSTRING_LEN;
|
|
|
+ }
|
|
|
+ break;
|
|
|
|
|
|
- snprintf(data, ETH_GSTRING_LEN, "rx%d_interrupts", i);
|
|
|
- data += ETH_GSTRING_LEN;
|
|
|
+ case ETH_SS_PRIV_FLAGS:
|
|
|
+ for (i = 0; i < ARRAY_SIZE(ibmvnic_priv_flags); i++)
|
|
|
+ strcpy(data + i * ETH_GSTRING_LEN,
|
|
|
+ ibmvnic_priv_flags[i]);
|
|
|
+ break;
|
|
|
+ default:
|
|
|
+ return;
|
|
|
}
|
|
|
}
|
|
|
|
|
@@ -2464,6 +2500,8 @@ static int ibmvnic_get_sset_count(struct net_device *dev, int sset)
|
|
|
return ARRAY_SIZE(ibmvnic_stats) +
|
|
|
adapter->req_tx_queues * NUM_TX_STATS +
|
|
|
adapter->req_rx_queues * NUM_RX_STATS;
|
|
|
+ case ETH_SS_PRIV_FLAGS:
|
|
|
+ return ARRAY_SIZE(ibmvnic_priv_flags);
|
|
|
default:
|
|
|
return -EOPNOTSUPP;
|
|
|
}
|
|
@@ -2514,6 +2552,25 @@ static void ibmvnic_get_ethtool_stats(struct net_device *dev,
|
|
|
}
|
|
|
}
|
|
|
|
|
|
+static u32 ibmvnic_get_priv_flags(struct net_device *netdev)
|
|
|
+{
|
|
|
+ struct ibmvnic_adapter *adapter = netdev_priv(netdev);
|
|
|
+
|
|
|
+ return adapter->priv_flags;
|
|
|
+}
|
|
|
+
|
|
|
+static int ibmvnic_set_priv_flags(struct net_device *netdev, u32 flags)
|
|
|
+{
|
|
|
+ struct ibmvnic_adapter *adapter = netdev_priv(netdev);
|
|
|
+ bool which_maxes = !!(flags & IBMVNIC_USE_SERVER_MAXES);
|
|
|
+
|
|
|
+ if (which_maxes)
|
|
|
+ adapter->priv_flags |= IBMVNIC_USE_SERVER_MAXES;
|
|
|
+ else
|
|
|
+ adapter->priv_flags &= ~IBMVNIC_USE_SERVER_MAXES;
|
|
|
+
|
|
|
+ return 0;
|
|
|
+}
|
|
|
static const struct ethtool_ops ibmvnic_ethtool_ops = {
|
|
|
.get_drvinfo = ibmvnic_get_drvinfo,
|
|
|
.get_msglevel = ibmvnic_get_msglevel,
|
|
@@ -2527,6 +2584,8 @@ static const struct ethtool_ops ibmvnic_ethtool_ops = {
|
|
|
.get_sset_count = ibmvnic_get_sset_count,
|
|
|
.get_ethtool_stats = ibmvnic_get_ethtool_stats,
|
|
|
.get_link_ksettings = ibmvnic_get_link_ksettings,
|
|
|
+ .get_priv_flags = ibmvnic_get_priv_flags,
|
|
|
+ .set_priv_flags = ibmvnic_set_priv_flags,
|
|
|
};
|
|
|
|
|
|
/* Routines for managing CRQs/sCRQs */
|