|
@@ -3140,6 +3140,87 @@ static void cxgb_netpoll(struct net_device *dev)
|
|
|
}
|
|
|
#endif
|
|
|
|
|
|
+static int cxgb_set_tx_maxrate(struct net_device *dev, int index, u32 rate)
|
|
|
+{
|
|
|
+ struct port_info *pi = netdev_priv(dev);
|
|
|
+ struct adapter *adap = pi->adapter;
|
|
|
+ struct sched_class *e;
|
|
|
+ struct ch_sched_params p;
|
|
|
+ struct ch_sched_queue qe;
|
|
|
+ u32 req_rate;
|
|
|
+ int err = 0;
|
|
|
+
|
|
|
+ if (!can_sched(dev))
|
|
|
+ return -ENOTSUPP;
|
|
|
+
|
|
|
+ if (index < 0 || index > pi->nqsets - 1)
|
|
|
+ return -EINVAL;
|
|
|
+
|
|
|
+ if (!(adap->flags & FULL_INIT_DONE)) {
|
|
|
+ dev_err(adap->pdev_dev,
|
|
|
+ "Failed to rate limit on queue %d. Link Down?\n",
|
|
|
+ index);
|
|
|
+ return -EINVAL;
|
|
|
+ }
|
|
|
+
|
|
|
+ /* Convert from Mbps to Kbps */
|
|
|
+ req_rate = rate << 10;
|
|
|
+
|
|
|
+ /* Max rate is 10 Gbps */
|
|
|
+ if (req_rate >= SCHED_MAX_RATE_KBPS) {
|
|
|
+ dev_err(adap->pdev_dev,
|
|
|
+ "Invalid rate %u Mbps, Max rate is %u Gbps\n",
|
|
|
+ rate, SCHED_MAX_RATE_KBPS);
|
|
|
+ return -ERANGE;
|
|
|
+ }
|
|
|
+
|
|
|
+ /* First unbind the queue from any existing class */
|
|
|
+ memset(&qe, 0, sizeof(qe));
|
|
|
+ qe.queue = index;
|
|
|
+ qe.class = SCHED_CLS_NONE;
|
|
|
+
|
|
|
+ err = cxgb4_sched_class_unbind(dev, (void *)(&qe), SCHED_QUEUE);
|
|
|
+ if (err) {
|
|
|
+ dev_err(adap->pdev_dev,
|
|
|
+ "Unbinding Queue %d on port %d fail. Err: %d\n",
|
|
|
+ index, pi->port_id, err);
|
|
|
+ return err;
|
|
|
+ }
|
|
|
+
|
|
|
+ /* Queue already unbound */
|
|
|
+ if (!req_rate)
|
|
|
+ return 0;
|
|
|
+
|
|
|
+ /* Fetch any available unused or matching scheduling class */
|
|
|
+ memset(&p, 0, sizeof(p));
|
|
|
+ p.type = SCHED_CLASS_TYPE_PACKET;
|
|
|
+ p.u.params.level = SCHED_CLASS_LEVEL_CL_RL;
|
|
|
+ p.u.params.mode = SCHED_CLASS_MODE_CLASS;
|
|
|
+ p.u.params.rateunit = SCHED_CLASS_RATEUNIT_BITS;
|
|
|
+ p.u.params.ratemode = SCHED_CLASS_RATEMODE_ABS;
|
|
|
+ p.u.params.channel = pi->tx_chan;
|
|
|
+ p.u.params.class = SCHED_CLS_NONE;
|
|
|
+ p.u.params.minrate = 0;
|
|
|
+ p.u.params.maxrate = req_rate;
|
|
|
+ p.u.params.weight = 0;
|
|
|
+ p.u.params.pktsize = dev->mtu;
|
|
|
+
|
|
|
+ e = cxgb4_sched_class_alloc(dev, &p);
|
|
|
+ if (!e)
|
|
|
+ return -ENOMEM;
|
|
|
+
|
|
|
+ /* Bind the queue to a scheduling class */
|
|
|
+ memset(&qe, 0, sizeof(qe));
|
|
|
+ qe.queue = index;
|
|
|
+ qe.class = e->idx;
|
|
|
+
|
|
|
+ err = cxgb4_sched_class_bind(dev, (void *)(&qe), SCHED_QUEUE);
|
|
|
+ if (err)
|
|
|
+ dev_err(adap->pdev_dev,
|
|
|
+ "Queue rate limiting failed. Err: %d\n", err);
|
|
|
+ return err;
|
|
|
+}
|
|
|
+
|
|
|
static const struct net_device_ops cxgb4_netdev_ops = {
|
|
|
.ndo_open = cxgb_open,
|
|
|
.ndo_stop = cxgb_close,
|
|
@@ -3162,6 +3243,7 @@ static const struct net_device_ops cxgb4_netdev_ops = {
|
|
|
#ifdef CONFIG_NET_RX_BUSY_POLL
|
|
|
.ndo_busy_poll = cxgb_busy_poll,
|
|
|
#endif
|
|
|
+ .ndo_set_tx_maxrate = cxgb_set_tx_maxrate,
|
|
|
};
|
|
|
|
|
|
static const struct net_device_ops cxgb4_mgmt_netdev_ops = {
|