|
@@ -211,7 +211,8 @@ static void be_txq_notify(struct be_adapter *adapter, struct be_tx_obj *txo,
|
|
|
}
|
|
|
|
|
|
static void be_eq_notify(struct be_adapter *adapter, u16 qid,
|
|
|
- bool arm, bool clear_int, u16 num_popped)
|
|
|
+ bool arm, bool clear_int, u16 num_popped,
|
|
|
+ u32 eq_delay_mult_enc)
|
|
|
{
|
|
|
u32 val = 0;
|
|
|
|
|
@@ -227,6 +228,7 @@ static void be_eq_notify(struct be_adapter *adapter, u16 qid,
|
|
|
val |= 1 << DB_EQ_CLR_SHIFT;
|
|
|
val |= 1 << DB_EQ_EVNT_SHIFT;
|
|
|
val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
|
|
|
+ val |= eq_delay_mult_enc << DB_EQ_R2I_DLY_SHIFT;
|
|
|
iowrite32(val, adapter->db + DB_EQ_OFFSET);
|
|
|
}
|
|
|
|
|
@@ -1686,61 +1688,110 @@ static void be_aic_update(struct be_aic_obj *aic, u64 rx_pkts, u64 tx_pkts,
|
|
|
aic->jiffies = now;
|
|
|
}
|
|
|
|
|
|
-static void be_eqd_update(struct be_adapter *adapter)
|
|
|
+static int be_get_new_eqd(struct be_eq_obj *eqo)
|
|
|
{
|
|
|
- struct be_set_eqd set_eqd[MAX_EVT_QS];
|
|
|
- int eqd, i, num = 0, start;
|
|
|
+ struct be_adapter *adapter = eqo->adapter;
|
|
|
+ int eqd, start;
|
|
|
struct be_aic_obj *aic;
|
|
|
- struct be_eq_obj *eqo;
|
|
|
struct be_rx_obj *rxo;
|
|
|
struct be_tx_obj *txo;
|
|
|
- u64 rx_pkts, tx_pkts;
|
|
|
+ u64 rx_pkts = 0, tx_pkts = 0;
|
|
|
ulong now;
|
|
|
u32 pps, delta;
|
|
|
+ int i;
|
|
|
|
|
|
- for_all_evt_queues(adapter, eqo, i) {
|
|
|
- aic = &adapter->aic_obj[eqo->idx];
|
|
|
- if (!aic->enable) {
|
|
|
- if (aic->jiffies)
|
|
|
- aic->jiffies = 0;
|
|
|
- eqd = aic->et_eqd;
|
|
|
- goto modify_eqd;
|
|
|
- }
|
|
|
+ aic = &adapter->aic_obj[eqo->idx];
|
|
|
+ if (!aic->enable) {
|
|
|
+ if (aic->jiffies)
|
|
|
+ aic->jiffies = 0;
|
|
|
+ eqd = aic->et_eqd;
|
|
|
+ return eqd;
|
|
|
+ }
|
|
|
|
|
|
- rxo = &adapter->rx_obj[eqo->idx];
|
|
|
+ for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
|
|
|
do {
|
|
|
start = u64_stats_fetch_begin_irq(&rxo->stats.sync);
|
|
|
- rx_pkts = rxo->stats.rx_pkts;
|
|
|
+ rx_pkts += rxo->stats.rx_pkts;
|
|
|
} while (u64_stats_fetch_retry_irq(&rxo->stats.sync, start));
|
|
|
+ }
|
|
|
|
|
|
- txo = &adapter->tx_obj[eqo->idx];
|
|
|
+ for_all_tx_queues_on_eq(adapter, eqo, txo, i) {
|
|
|
do {
|
|
|
start = u64_stats_fetch_begin_irq(&txo->stats.sync);
|
|
|
- tx_pkts = txo->stats.tx_reqs;
|
|
|
+ tx_pkts += txo->stats.tx_reqs;
|
|
|
} while (u64_stats_fetch_retry_irq(&txo->stats.sync, start));
|
|
|
+ }
|
|
|
|
|
|
- /* Skip, if wrapped around or first calculation */
|
|
|
- now = jiffies;
|
|
|
- if (!aic->jiffies || time_before(now, aic->jiffies) ||
|
|
|
- rx_pkts < aic->rx_pkts_prev ||
|
|
|
- tx_pkts < aic->tx_reqs_prev) {
|
|
|
- be_aic_update(aic, rx_pkts, tx_pkts, now);
|
|
|
- continue;
|
|
|
- }
|
|
|
+ /* Skip, if wrapped around or first calculation */
|
|
|
+ now = jiffies;
|
|
|
+ if (!aic->jiffies || time_before(now, aic->jiffies) ||
|
|
|
+ rx_pkts < aic->rx_pkts_prev ||
|
|
|
+ tx_pkts < aic->tx_reqs_prev) {
|
|
|
+ be_aic_update(aic, rx_pkts, tx_pkts, now);
|
|
|
+ return aic->prev_eqd;
|
|
|
+ }
|
|
|
|
|
|
- delta = jiffies_to_msecs(now - aic->jiffies);
|
|
|
- pps = (((u32)(rx_pkts - aic->rx_pkts_prev) * 1000) / delta) +
|
|
|
- (((u32)(tx_pkts - aic->tx_reqs_prev) * 1000) / delta);
|
|
|
- eqd = (pps / 15000) << 2;
|
|
|
+ delta = jiffies_to_msecs(now - aic->jiffies);
|
|
|
+ if (delta == 0)
|
|
|
+ return aic->prev_eqd;
|
|
|
|
|
|
- if (eqd < 8)
|
|
|
- eqd = 0;
|
|
|
- eqd = min_t(u32, eqd, aic->max_eqd);
|
|
|
- eqd = max_t(u32, eqd, aic->min_eqd);
|
|
|
+ pps = (((u32)(rx_pkts - aic->rx_pkts_prev) * 1000) / delta) +
|
|
|
+ (((u32)(tx_pkts - aic->tx_reqs_prev) * 1000) / delta);
|
|
|
+ eqd = (pps / 15000) << 2;
|
|
|
|
|
|
- be_aic_update(aic, rx_pkts, tx_pkts, now);
|
|
|
-modify_eqd:
|
|
|
- if (eqd != aic->prev_eqd) {
|
|
|
+ if (eqd < 8)
|
|
|
+ eqd = 0;
|
|
|
+ eqd = min_t(u32, eqd, aic->max_eqd);
|
|
|
+ eqd = max_t(u32, eqd, aic->min_eqd);
|
|
|
+
|
|
|
+ be_aic_update(aic, rx_pkts, tx_pkts, now);
|
|
|
+
|
|
|
+ return eqd;
|
|
|
+}
|
|
|
+
|
|
|
+/* For Skyhawk-R only */
|
|
|
+static u32 be_get_eq_delay_mult_enc(struct be_eq_obj *eqo)
|
|
|
+{
|
|
|
+ struct be_adapter *adapter = eqo->adapter;
|
|
|
+ struct be_aic_obj *aic = &adapter->aic_obj[eqo->idx];
|
|
|
+ ulong now = jiffies;
|
|
|
+ int eqd;
|
|
|
+ u32 mult_enc;
|
|
|
+
|
|
|
+ if (!aic->enable)
|
|
|
+ return 0;
|
|
|
+
|
|
|
+ if (time_before_eq(now, aic->jiffies) ||
|
|
|
+ jiffies_to_msecs(now - aic->jiffies) < 1)
|
|
|
+ eqd = aic->prev_eqd;
|
|
|
+ else
|
|
|
+ eqd = be_get_new_eqd(eqo);
|
|
|
+
|
|
|
+ if (eqd > 100)
|
|
|
+ mult_enc = R2I_DLY_ENC_1;
|
|
|
+ else if (eqd > 60)
|
|
|
+ mult_enc = R2I_DLY_ENC_2;
|
|
|
+ else if (eqd > 20)
|
|
|
+ mult_enc = R2I_DLY_ENC_3;
|
|
|
+ else
|
|
|
+ mult_enc = R2I_DLY_ENC_0;
|
|
|
+
|
|
|
+ aic->prev_eqd = eqd;
|
|
|
+
|
|
|
+ return mult_enc;
|
|
|
+}
|
|
|
+
|
|
|
+void be_eqd_update(struct be_adapter *adapter, bool force_update)
|
|
|
+{
|
|
|
+ struct be_set_eqd set_eqd[MAX_EVT_QS];
|
|
|
+ struct be_aic_obj *aic;
|
|
|
+ struct be_eq_obj *eqo;
|
|
|
+ int i, num = 0, eqd;
|
|
|
+
|
|
|
+ for_all_evt_queues(adapter, eqo, i) {
|
|
|
+ aic = &adapter->aic_obj[eqo->idx];
|
|
|
+ eqd = be_get_new_eqd(eqo);
|
|
|
+ if (force_update || eqd != aic->prev_eqd) {
|
|
|
set_eqd[num].delay_multiplier = (eqd * 65)/100;
|
|
|
set_eqd[num].eq_id = eqo->q.id;
|
|
|
aic->prev_eqd = eqd;
|
|
@@ -2248,7 +2299,7 @@ static void be_eq_clean(struct be_eq_obj *eqo)
|
|
|
{
|
|
|
int num = events_get(eqo);
|
|
|
|
|
|
- be_eq_notify(eqo->adapter, eqo->q.id, false, true, num);
|
|
|
+ be_eq_notify(eqo->adapter, eqo->q.id, false, true, num, 0);
|
|
|
}
|
|
|
|
|
|
static void be_rx_cq_clean(struct be_rx_obj *rxo)
|
|
@@ -2609,7 +2660,7 @@ static irqreturn_t be_intx(int irq, void *dev)
|
|
|
if (num_evts)
|
|
|
eqo->spurious_intr = 0;
|
|
|
}
|
|
|
- be_eq_notify(adapter, eqo->q.id, false, true, num_evts);
|
|
|
+ be_eq_notify(adapter, eqo->q.id, false, true, num_evts, 0);
|
|
|
|
|
|
/* Return IRQ_HANDLED only for the the first spurious intr
|
|
|
* after a valid intr to stop the kernel from branding
|
|
@@ -2625,7 +2676,7 @@ static irqreturn_t be_msix(int irq, void *dev)
|
|
|
{
|
|
|
struct be_eq_obj *eqo = dev;
|
|
|
|
|
|
- be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
|
|
|
+ be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0, 0);
|
|
|
napi_schedule(&eqo->napi);
|
|
|
return IRQ_HANDLED;
|
|
|
}
|
|
@@ -2874,6 +2925,7 @@ int be_poll(struct napi_struct *napi, int budget)
|
|
|
int max_work = 0, work, i, num_evts;
|
|
|
struct be_rx_obj *rxo;
|
|
|
struct be_tx_obj *txo;
|
|
|
+ u32 mult_enc = 0;
|
|
|
|
|
|
num_evts = events_get(eqo);
|
|
|
|
|
@@ -2899,10 +2951,18 @@ int be_poll(struct napi_struct *napi, int budget)
|
|
|
|
|
|
if (max_work < budget) {
|
|
|
napi_complete(napi);
|
|
|
- be_eq_notify(adapter, eqo->q.id, true, false, num_evts);
|
|
|
+
|
|
|
+ /* Skyhawk EQ_DB has a provision to set the rearm to interrupt
|
|
|
+ * delay via a delay multiplier encoding value
|
|
|
+ */
|
|
|
+ if (skyhawk_chip(adapter))
|
|
|
+ mult_enc = be_get_eq_delay_mult_enc(eqo);
|
|
|
+
|
|
|
+ be_eq_notify(adapter, eqo->q.id, true, false, num_evts,
|
|
|
+ mult_enc);
|
|
|
} else {
|
|
|
/* As we'll continue in polling mode, count and clear events */
|
|
|
- be_eq_notify(adapter, eqo->q.id, false, false, num_evts);
|
|
|
+ be_eq_notify(adapter, eqo->q.id, false, false, num_evts, 0);
|
|
|
}
|
|
|
return max_work;
|
|
|
}
|
|
@@ -3299,7 +3359,7 @@ static int be_open(struct net_device *netdev)
|
|
|
for_all_evt_queues(adapter, eqo, i) {
|
|
|
napi_enable(&eqo->napi);
|
|
|
be_enable_busy_poll(eqo);
|
|
|
- be_eq_notify(adapter, eqo->q.id, true, true, 0);
|
|
|
+ be_eq_notify(adapter, eqo->q.id, true, true, 0, 0);
|
|
|
}
|
|
|
adapter->flags |= BE_FLAGS_NAPI_ENABLED;
|
|
|
|
|
@@ -4225,7 +4285,7 @@ static void be_netpoll(struct net_device *netdev)
|
|
|
int i;
|
|
|
|
|
|
for_all_evt_queues(adapter, eqo, i) {
|
|
|
- be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
|
|
|
+ be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0, 0);
|
|
|
napi_schedule(&eqo->napi);
|
|
|
}
|
|
|
}
|
|
@@ -5227,7 +5287,9 @@ static void be_worker(struct work_struct *work)
|
|
|
be_post_rx_frags(rxo, GFP_KERNEL, MAX_RX_POST);
|
|
|
}
|
|
|
|
|
|
- be_eqd_update(adapter);
|
|
|
+ /* EQ-delay update for Skyhawk is done while notifying EQ */
|
|
|
+ if (!skyhawk_chip(adapter))
|
|
|
+ be_eqd_update(adapter, false);
|
|
|
|
|
|
if (adapter->flags & BE_FLAGS_EVT_INCOMPATIBLE_SFP)
|
|
|
be_log_sfp_info(adapter);
|