|
@@ -1943,13 +1943,13 @@ static int sge_qinfo_show(struct seq_file *seq, void *v)
|
|
|
{
|
|
|
struct adapter *adap = seq->private;
|
|
|
int eth_entries = DIV_ROUND_UP(adap->sge.ethqsets, 4);
|
|
|
- int toe_entries = DIV_ROUND_UP(adap->sge.ofldqsets, 4);
|
|
|
+ int iscsi_entries = DIV_ROUND_UP(adap->sge.ofldqsets, 4);
|
|
|
int rdma_entries = DIV_ROUND_UP(adap->sge.rdmaqs, 4);
|
|
|
int ciq_entries = DIV_ROUND_UP(adap->sge.rdmaciqs, 4);
|
|
|
int ctrl_entries = DIV_ROUND_UP(MAX_CTRL_QUEUES, 4);
|
|
|
int i, r = (uintptr_t)v - 1;
|
|
|
- int toe_idx = r - eth_entries;
|
|
|
- int rdma_idx = toe_idx - toe_entries;
|
|
|
+ int iscsi_idx = r - eth_entries;
|
|
|
+ int rdma_idx = iscsi_idx - iscsi_entries;
|
|
|
int ciq_idx = rdma_idx - rdma_entries;
|
|
|
int ctrl_idx = ciq_idx - ciq_entries;
|
|
|
int fq_idx = ctrl_idx - ctrl_entries;
|
|
@@ -1965,8 +1965,12 @@ do { \
|
|
|
seq_putc(seq, '\n'); \
|
|
|
} while (0)
|
|
|
#define S(s, v) S3("s", s, v)
|
|
|
+#define T3(fmt_spec, s, v) S3(fmt_spec, s, tx[i].v)
|
|
|
#define T(s, v) S3("u", s, tx[i].v)
|
|
|
+#define TL(s, v) T3("lu", s, v)
|
|
|
+#define R3(fmt_spec, s, v) S3(fmt_spec, s, rx[i].v)
|
|
|
#define R(s, v) S3("u", s, rx[i].v)
|
|
|
+#define RL(s, v) R3("lu", s, v)
|
|
|
|
|
|
if (r < eth_entries) {
|
|
|
int base_qset = r * 4;
|
|
@@ -2005,12 +2009,30 @@ do { \
|
|
|
R("FL avail:", fl.avail);
|
|
|
R("FL PIDX:", fl.pidx);
|
|
|
R("FL CIDX:", fl.cidx);
|
|
|
- } else if (toe_idx < toe_entries) {
|
|
|
- const struct sge_ofld_rxq *rx = &adap->sge.ofldrxq[toe_idx * 4];
|
|
|
- const struct sge_ofld_txq *tx = &adap->sge.ofldtxq[toe_idx * 4];
|
|
|
- int n = min(4, adap->sge.ofldqsets - 4 * toe_idx);
|
|
|
+ RL("RxPackets:", stats.pkts);
|
|
|
+ RL("RxCSO:", stats.rx_cso);
|
|
|
+ RL("VLANxtract:", stats.vlan_ex);
|
|
|
+ RL("LROmerged:", stats.lro_merged);
|
|
|
+ RL("LROpackets:", stats.lro_pkts);
|
|
|
+ RL("RxDrops:", stats.rx_drops);
|
|
|
+ TL("TSO:", tso);
|
|
|
+ TL("TxCSO:", tx_cso);
|
|
|
+ TL("VLANins:", vlan_ins);
|
|
|
+ TL("TxQFull:", q.stops);
|
|
|
+ TL("TxQRestarts:", q.restarts);
|
|
|
+ TL("TxMapErr:", mapping_err);
|
|
|
+ RL("FLAllocErr:", fl.alloc_failed);
|
|
|
+ RL("FLLrgAlcErr:", fl.large_alloc_failed);
|
|
|
+ RL("FLStarving:", fl.starving);
|
|
|
+
|
|
|
+ } else if (iscsi_idx < iscsi_entries) {
|
|
|
+ const struct sge_ofld_rxq *rx =
|
|
|
+ &adap->sge.ofldrxq[iscsi_idx * 4];
|
|
|
+ const struct sge_ofld_txq *tx =
|
|
|
+ &adap->sge.ofldtxq[iscsi_idx * 4];
|
|
|
+ int n = min(4, adap->sge.ofldqsets - 4 * iscsi_idx);
|
|
|
|
|
|
- S("QType:", "TOE");
|
|
|
+ S("QType:", "iSCSI");
|
|
|
T("TxQ ID:", q.cntxt_id);
|
|
|
T("TxQ size:", q.size);
|
|
|
T("TxQ inuse:", q.in_use);
|
|
@@ -2030,6 +2052,13 @@ do { \
|
|
|
R("FL avail:", fl.avail);
|
|
|
R("FL PIDX:", fl.pidx);
|
|
|
R("FL CIDX:", fl.cidx);
|
|
|
+ RL("RxPackets:", stats.pkts);
|
|
|
+ RL("RxImmPkts:", stats.imm);
|
|
|
+ RL("RxNoMem:", stats.nomem);
|
|
|
+ RL("FLAllocErr:", fl.alloc_failed);
|
|
|
+ RL("FLLrgAlcErr:", fl.large_alloc_failed);
|
|
|
+ RL("FLStarving:", fl.starving);
|
|
|
+
|
|
|
} else if (rdma_idx < rdma_entries) {
|
|
|
const struct sge_ofld_rxq *rx =
|
|
|
&adap->sge.rdmarxq[rdma_idx * 4];
|
|
@@ -2052,6 +2081,13 @@ do { \
|
|
|
R("FL avail:", fl.avail);
|
|
|
R("FL PIDX:", fl.pidx);
|
|
|
R("FL CIDX:", fl.cidx);
|
|
|
+ RL("RxPackets:", stats.pkts);
|
|
|
+ RL("RxImmPkts:", stats.imm);
|
|
|
+ RL("RxNoMem:", stats.nomem);
|
|
|
+ RL("FLAllocErr:", fl.alloc_failed);
|
|
|
+ RL("FLLrgAlcErr:", fl.large_alloc_failed);
|
|
|
+ RL("FLStarving:", fl.starving);
|
|
|
+
|
|
|
} else if (ciq_idx < ciq_entries) {
|
|
|
const struct sge_ofld_rxq *rx = &adap->sge.rdmaciq[ciq_idx * 4];
|
|
|
int n = min(4, adap->sge.rdmaciqs - 4 * ciq_idx);
|
|
@@ -2067,6 +2103,9 @@ do { \
|
|
|
S3("u", "Intr delay:", qtimer_val(adap, &rx[i].rspq));
|
|
|
S3("u", "Intr pktcnt:",
|
|
|
adap->sge.counter_val[rx[i].rspq.pktcnt_idx]);
|
|
|
+ RL("RxAN:", stats.an);
|
|
|
+ RL("RxNoMem:", stats.nomem);
|
|
|
+
|
|
|
} else if (ctrl_idx < ctrl_entries) {
|
|
|
const struct sge_ctrl_txq *tx = &adap->sge.ctrlq[ctrl_idx * 4];
|
|
|
int n = min(4, adap->params.nports - 4 * ctrl_idx);
|
|
@@ -2077,6 +2116,8 @@ do { \
|
|
|
T("TxQ inuse:", q.in_use);
|
|
|
T("TxQ CIDX:", q.cidx);
|
|
|
T("TxQ PIDX:", q.pidx);
|
|
|
+ TL("TxQFull:", q.stops);
|
|
|
+ TL("TxQRestarts:", q.restarts);
|
|
|
} else if (fq_idx == 0) {
|
|
|
const struct sge_rspq *evtq = &adap->sge.fw_evtq;
|
|
|
|
|
@@ -2092,8 +2133,12 @@ do { \
|
|
|
adap->sge.counter_val[evtq->pktcnt_idx]);
|
|
|
}
|
|
|
#undef R
|
|
|
+#undef RL
|
|
|
#undef T
|
|
|
+#undef TL
|
|
|
#undef S
|
|
|
+#undef R3
|
|
|
+#undef T3
|
|
|
#undef S3
|
|
|
return 0;
|
|
|
}
|
|
@@ -2212,6 +2257,73 @@ static const struct file_operations mem_debugfs_fops = {
|
|
|
.llseek = default_llseek,
|
|
|
};
|
|
|
|
|
|
+static int tid_info_show(struct seq_file *seq, void *v)
|
|
|
+{
|
|
|
+ struct adapter *adap = seq->private;
|
|
|
+ const struct tid_info *t = &adap->tids;
|
|
|
+ enum chip_type chip = CHELSIO_CHIP_VERSION(adap->params.chip);
|
|
|
+
|
|
|
+ if (t4_read_reg(adap, LE_DB_CONFIG_A) & HASHEN_F) {
|
|
|
+ unsigned int sb;
|
|
|
+
|
|
|
+ if (chip <= CHELSIO_T5)
|
|
|
+ sb = t4_read_reg(adap, LE_DB_SERVER_INDEX_A) / 4;
|
|
|
+ else
|
|
|
+ sb = t4_read_reg(adap, LE_DB_SRVR_START_INDEX_A);
|
|
|
+
|
|
|
+ if (sb) {
|
|
|
+ seq_printf(seq, "TID range: 0..%u/%u..%u", sb - 1,
|
|
|
+ adap->tids.hash_base,
|
|
|
+ t->ntids - 1);
|
|
|
+ seq_printf(seq, ", in use: %u/%u\n",
|
|
|
+ atomic_read(&t->tids_in_use),
|
|
|
+ atomic_read(&t->hash_tids_in_use));
|
|
|
+ } else if (adap->flags & FW_OFLD_CONN) {
|
|
|
+ seq_printf(seq, "TID range: %u..%u/%u..%u",
|
|
|
+ t->aftid_base,
|
|
|
+ t->aftid_end,
|
|
|
+ adap->tids.hash_base,
|
|
|
+ t->ntids - 1);
|
|
|
+ seq_printf(seq, ", in use: %u/%u\n",
|
|
|
+ atomic_read(&t->tids_in_use),
|
|
|
+ atomic_read(&t->hash_tids_in_use));
|
|
|
+ } else {
|
|
|
+ seq_printf(seq, "TID range: %u..%u",
|
|
|
+ adap->tids.hash_base,
|
|
|
+ t->ntids - 1);
|
|
|
+ seq_printf(seq, ", in use: %u\n",
|
|
|
+ atomic_read(&t->hash_tids_in_use));
|
|
|
+ }
|
|
|
+ } else if (t->ntids) {
|
|
|
+ seq_printf(seq, "TID range: 0..%u", t->ntids - 1);
|
|
|
+ seq_printf(seq, ", in use: %u\n",
|
|
|
+ atomic_read(&t->tids_in_use));
|
|
|
+ }
|
|
|
+
|
|
|
+ if (t->nstids)
|
|
|
+ seq_printf(seq, "STID range: %u..%u, in use: %u\n",
|
|
|
+ (!t->stid_base &&
|
|
|
+ (chip <= CHELSIO_T5)) ?
|
|
|
+ t->stid_base + 1 : t->stid_base,
|
|
|
+ t->stid_base + t->nstids - 1, t->stids_in_use);
|
|
|
+ if (t->natids)
|
|
|
+ seq_printf(seq, "ATID range: 0..%u, in use: %u\n",
|
|
|
+ t->natids - 1, t->atids_in_use);
|
|
|
+ seq_printf(seq, "FTID range: %u..%u\n", t->ftid_base,
|
|
|
+ t->ftid_base + t->nftids - 1);
|
|
|
+ if (t->nsftids)
|
|
|
+ seq_printf(seq, "SFTID range: %u..%u in use: %u\n",
|
|
|
+ t->sftid_base, t->sftid_base + t->nsftids - 2,
|
|
|
+ t->sftids_in_use);
|
|
|
+ if (t->ntids)
|
|
|
+ seq_printf(seq, "HW TID usage: %u IP users, %u IPv6 users\n",
|
|
|
+ t4_read_reg(adap, LE_DB_ACT_CNT_IPV4_A),
|
|
|
+ t4_read_reg(adap, LE_DB_ACT_CNT_IPV6_A));
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
+DEFINE_SIMPLE_DEBUGFS_FILE(tid_info);
|
|
|
+
|
|
|
static void add_debugfs_mem(struct adapter *adap, const char *name,
|
|
|
unsigned int idx, unsigned int size_mb)
|
|
|
{
|
|
@@ -2625,6 +2737,7 @@ int t4_setup_debugfs(struct adapter *adap)
|
|
|
#if IS_ENABLED(CONFIG_IPV6)
|
|
|
{ "clip_tbl", &clip_tbl_debugfs_fops, S_IRUSR, 0 },
|
|
|
#endif
|
|
|
+ { "tids", &tid_info_debugfs_fops, S_IRUSR, 0},
|
|
|
{ "blocked_fl", &blocked_fl_fops, S_IRUSR | S_IWUSR, 0 },
|
|
|
{ "meminfo", &meminfo_fops, S_IRUSR, 0 },
|
|
|
};
|