Explorar o código

Merge branch 'cxgb4-collect-more-hardware-dumps-via-ethtool'

Rahul Lakkireddy says:

====================
cxgb4: collect more hardware dumps via ethtool

This series of patches collect more firmware and hardware dumps
via ethool --get-dump facility.

Patch 1 collects hardware logic analyzer dumps.

Patch 2 collects CIM queue configuration dump.

Patch 3 collects RSS dumps.

Patch 4 collects TID info dump.

Patch 5 collects MPS-TCAM dump.

Patch 6 collects PBT tables dump.

Patch 7 collects hardware scheduler and pace table dumps.

Patch 8 collects miscellaneous hardware information, including
path mtu, PM stats, TP clock info, congestion control, and VPD
data dumps.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
David S. Miller %!s(int64=7) %!d(string=hai) anos
pai
achega
42c8ae1137

+ 159 - 0
drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h

@@ -21,6 +21,8 @@
 #define EDC0_FLAG 3
 #define EDC1_FLAG 4
 
+#define CUDBG_ENTITY_SIGNATURE 0xCCEDB001
+
 struct card_mem {
 	u16 size_edc0;
 	u16 size_edc1;
@@ -33,6 +35,35 @@ struct cudbg_mbox_log {
 	u32 lo[MBOX_LEN / 8];
 };
 
+struct cudbg_cim_qcfg {
+	u8 chip;
+	u16 base[CIM_NUM_IBQ + CIM_NUM_OBQ_T5];
+	u16 size[CIM_NUM_IBQ + CIM_NUM_OBQ_T5];
+	u16 thres[CIM_NUM_IBQ];
+	u32 obq_wr[2 * CIM_NUM_OBQ_T5];
+	u32 stat[4 * (CIM_NUM_IBQ + CIM_NUM_OBQ_T5)];
+};
+
+struct cudbg_rss_vf_conf {
+	u32 rss_vf_vfl;
+	u32 rss_vf_vfh;
+};
+
+struct cudbg_pm_stats {
+	u32 tx_cnt[T6_PM_NSTATS];
+	u32 rx_cnt[T6_PM_NSTATS];
+	u64 tx_cyc[T6_PM_NSTATS];
+	u64 rx_cyc[T6_PM_NSTATS];
+};
+
+struct cudbg_hw_sched {
+	u32 kbps[NTX_SCHED];
+	u32 ipg[NTX_SCHED];
+	u32 pace_tab[NTX_SCHED];
+	u32 mode;
+	u32 map;
+};
+
 struct ireg_field {
 	u32 ireg_addr;
 	u32 ireg_data;
@@ -45,6 +76,134 @@ struct ireg_buf {
 	u32 outbuf[32];
 };
 
+struct cudbg_ulprx_la {
+	u32 data[ULPRX_LA_SIZE * 8];
+	u32 size;
+};
+
+struct cudbg_tp_la {
+	u32 size;
+	u32 mode;
+	u8 data[0];
+};
+
+struct cudbg_cim_pif_la {
+	int size;
+	u8 data[0];
+};
+
+struct cudbg_clk_info {
+	u64 retransmit_min;
+	u64 retransmit_max;
+	u64 persist_timer_min;
+	u64 persist_timer_max;
+	u64 keepalive_idle_timer;
+	u64 keepalive_interval;
+	u64 initial_srtt;
+	u64 finwait2_timer;
+	u32 dack_timer;
+	u32 res;
+	u32 cclk_ps;
+	u32 tre;
+	u32 dack_re;
+};
+
+struct cudbg_tid_info_region {
+	u32 ntids;
+	u32 nstids;
+	u32 stid_base;
+	u32 hash_base;
+
+	u32 natids;
+	u32 nftids;
+	u32 ftid_base;
+	u32 aftid_base;
+	u32 aftid_end;
+
+	u32 sftid_base;
+	u32 nsftids;
+
+	u32 uotid_base;
+	u32 nuotids;
+
+	u32 sb;
+	u32 flags;
+	u32 le_db_conf;
+	u32 ip_users;
+	u32 ipv6_users;
+
+	u32 hpftid_base;
+	u32 nhpftids;
+};
+
+#define CUDBG_TID_INFO_REV 1
+
+struct cudbg_tid_info_region_rev1 {
+	struct cudbg_ver_hdr ver_hdr;
+	struct cudbg_tid_info_region tid;
+	u32 tid_start;
+	u32 reserved[16];
+};
+
+#define CUDBG_MAX_RPLC_SIZE 128
+
+struct cudbg_mps_tcam {
+	u64 mask;
+	u32 rplc[8];
+	u32 idx;
+	u32 cls_lo;
+	u32 cls_hi;
+	u32 rplc_size;
+	u32 vniy;
+	u32 vnix;
+	u32 dip_hit;
+	u32 vlan_vld;
+	u32 repli;
+	u16 ivlan;
+	u8 addr[ETH_ALEN];
+	u8 lookup_type;
+	u8 port_num;
+	u8 reserved[2];
+};
+
+struct cudbg_vpd_data {
+	u8 sn[SERNUM_LEN + 1];
+	u8 bn[PN_LEN + 1];
+	u8 na[MACADDR_LEN + 1];
+	u8 mn[ID_LEN + 1];
+	u16 fw_major;
+	u16 fw_minor;
+	u16 fw_micro;
+	u16 fw_build;
+	u32 scfg_vers;
+	u32 vpd_vers;
+};
+
+#define CUDBG_NUM_ULPTX 11
+#define CUDBG_NUM_ULPTX_READ 512
+
+struct cudbg_ulptx_la {
+	u32 rdptr[CUDBG_NUM_ULPTX];
+	u32 wrptr[CUDBG_NUM_ULPTX];
+	u32 rddata[CUDBG_NUM_ULPTX];
+	u32 rd_data[CUDBG_NUM_ULPTX][CUDBG_NUM_ULPTX_READ];
+};
+
+#define CUDBG_CHAC_PBT_ADDR 0x2800
+#define CUDBG_CHAC_PBT_LRF  0x3000
+#define CUDBG_CHAC_PBT_DATA 0x3800
+#define CUDBG_PBT_DYNAMIC_ENTRIES 8
+#define CUDBG_PBT_STATIC_ENTRIES 16
+#define CUDBG_LRF_ENTRIES 8
+#define CUDBG_PBT_DATA_ENTRIES 512
+
+struct cudbg_pbt_tables {
+	u32 pbt_dynamic[CUDBG_PBT_DYNAMIC_ENTRIES];
+	u32 pbt_static[CUDBG_PBT_STATIC_ENTRIES];
+	u32 lrf_table[CUDBG_LRF_ENTRIES];
+	u32 pbt_data[CUDBG_PBT_DATA_ENTRIES];
+};
+
 #define IREG_NUM_ELEM 4
 
 static const u32 t6_tp_pio_array[][IREG_NUM_ELEM] = {

+ 19 - 0
drivers/net/ethernet/chelsio/cxgb4/cudbg_if.h

@@ -22,6 +22,7 @@
 #define CUDBG_STATUS_NO_MEM -19
 #define CUDBG_STATUS_ENTITY_NOT_FOUND -24
 #define CUDBG_SYSTEM_ERROR -29
+#define CUDBG_STATUS_CCLK_NOT_DEFINED -32
 
 #define CUDBG_MAJOR_VERSION 1
 #define CUDBG_MINOR_VERSION 14
@@ -29,6 +30,9 @@
 enum cudbg_dbg_entity_type {
 	CUDBG_REG_DUMP = 1,
 	CUDBG_DEV_LOG = 2,
+	CUDBG_CIM_LA = 3,
+	CUDBG_CIM_MA_LA = 4,
+	CUDBG_CIM_QCFG = 5,
 	CUDBG_CIM_IBQ_TP0 = 6,
 	CUDBG_CIM_IBQ_TP1 = 7,
 	CUDBG_CIM_IBQ_ULP = 8,
@@ -43,14 +47,29 @@ enum cudbg_dbg_entity_type {
 	CUDBG_CIM_OBQ_NCSI = 17,
 	CUDBG_EDC0 = 18,
 	CUDBG_EDC1 = 19,
+	CUDBG_RSS = 22,
+	CUDBG_RSS_VF_CONF = 25,
+	CUDBG_PATH_MTU = 27,
+	CUDBG_PM_STATS = 30,
+	CUDBG_HW_SCHED = 31,
 	CUDBG_TP_INDIRECT = 36,
 	CUDBG_SGE_INDIRECT = 37,
+	CUDBG_ULPRX_LA = 41,
+	CUDBG_TP_LA = 43,
+	CUDBG_CIM_PIF_LA = 45,
+	CUDBG_CLK = 46,
 	CUDBG_CIM_OBQ_RXQ0 = 47,
 	CUDBG_CIM_OBQ_RXQ1 = 48,
 	CUDBG_PCIE_INDIRECT = 50,
 	CUDBG_PM_INDIRECT = 51,
+	CUDBG_TID_INFO = 54,
+	CUDBG_MPS_TCAM = 57,
+	CUDBG_VPD_DATA = 58,
+	CUDBG_CCTRL = 60,
 	CUDBG_MA_INDIRECT = 61,
+	CUDBG_ULPTX_LA = 62,
 	CUDBG_UP_CIM_INDIRECT = 64,
+	CUDBG_PBT_TABLE = 65,
 	CUDBG_MBOX_LOG = 66,
 	CUDBG_HMA_INDIRECT = 67,
 	CUDBG_MAX_ENTITY = 70,

+ 746 - 0
drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c

@@ -129,6 +129,108 @@ int cudbg_collect_fw_devlog(struct cudbg_init *pdbg_init,
 	return rc;
 }
 
+int cudbg_collect_cim_la(struct cudbg_init *pdbg_init,
+			 struct cudbg_buffer *dbg_buff,
+			 struct cudbg_error *cudbg_err)
+{
+	struct adapter *padap = pdbg_init->adap;
+	struct cudbg_buffer temp_buff = { 0 };
+	int size, rc;
+	u32 cfg = 0;
+
+	if (is_t6(padap->params.chip)) {
+		size = padap->params.cim_la_size / 10 + 1;
+		size *= 11 * sizeof(u32);
+	} else {
+		size = padap->params.cim_la_size / 8;
+		size *= 8 * sizeof(u32);
+	}
+
+	size += sizeof(cfg);
+	rc = cudbg_get_buff(dbg_buff, size, &temp_buff);
+	if (rc)
+		return rc;
+
+	rc = t4_cim_read(padap, UP_UP_DBG_LA_CFG_A, 1, &cfg);
+	if (rc) {
+		cudbg_err->sys_err = rc;
+		cudbg_put_buff(&temp_buff, dbg_buff);
+		return rc;
+	}
+
+	memcpy((char *)temp_buff.data, &cfg, sizeof(cfg));
+	rc = t4_cim_read_la(padap,
+			    (u32 *)((char *)temp_buff.data + sizeof(cfg)),
+			    NULL);
+	if (rc < 0) {
+		cudbg_err->sys_err = rc;
+		cudbg_put_buff(&temp_buff, dbg_buff);
+		return rc;
+	}
+	cudbg_write_and_release_buff(&temp_buff, dbg_buff);
+	return rc;
+}
+
+int cudbg_collect_cim_ma_la(struct cudbg_init *pdbg_init,
+			    struct cudbg_buffer *dbg_buff,
+			    struct cudbg_error *cudbg_err)
+{
+	struct adapter *padap = pdbg_init->adap;
+	struct cudbg_buffer temp_buff = { 0 };
+	int size, rc;
+
+	size = 2 * CIM_MALA_SIZE * 5 * sizeof(u32);
+	rc = cudbg_get_buff(dbg_buff, size, &temp_buff);
+	if (rc)
+		return rc;
+
+	t4_cim_read_ma_la(padap,
+			  (u32 *)temp_buff.data,
+			  (u32 *)((char *)temp_buff.data +
+				  5 * CIM_MALA_SIZE));
+	cudbg_write_and_release_buff(&temp_buff, dbg_buff);
+	return rc;
+}
+
+int cudbg_collect_cim_qcfg(struct cudbg_init *pdbg_init,
+			   struct cudbg_buffer *dbg_buff,
+			   struct cudbg_error *cudbg_err)
+{
+	struct adapter *padap = pdbg_init->adap;
+	struct cudbg_buffer temp_buff = { 0 };
+	struct cudbg_cim_qcfg *cim_qcfg_data;
+	int rc;
+
+	rc = cudbg_get_buff(dbg_buff, sizeof(struct cudbg_cim_qcfg),
+			    &temp_buff);
+	if (rc)
+		return rc;
+
+	cim_qcfg_data = (struct cudbg_cim_qcfg *)temp_buff.data;
+	cim_qcfg_data->chip = padap->params.chip;
+	rc = t4_cim_read(padap, UP_IBQ_0_RDADDR_A,
+			 ARRAY_SIZE(cim_qcfg_data->stat), cim_qcfg_data->stat);
+	if (rc) {
+		cudbg_err->sys_err = rc;
+		cudbg_put_buff(&temp_buff, dbg_buff);
+		return rc;
+	}
+
+	rc = t4_cim_read(padap, UP_OBQ_0_REALADDR_A,
+			 ARRAY_SIZE(cim_qcfg_data->obq_wr),
+			 cim_qcfg_data->obq_wr);
+	if (rc) {
+		cudbg_err->sys_err = rc;
+		cudbg_put_buff(&temp_buff, dbg_buff);
+		return rc;
+	}
+
+	t4_read_cimq_cfg(padap, cim_qcfg_data->base, cim_qcfg_data->size,
+			 cim_qcfg_data->thres);
+	cudbg_write_and_release_buff(&temp_buff, dbg_buff);
+	return rc;
+}
+
 static int cudbg_read_cim_ibq(struct cudbg_init *pdbg_init,
 			      struct cudbg_buffer *dbg_buff,
 			      struct cudbg_error *cudbg_err, int qid)
@@ -426,6 +528,115 @@ int cudbg_collect_edc1_meminfo(struct cudbg_init *pdbg_init,
 					MEM_EDC1);
 }
 
+int cudbg_collect_rss(struct cudbg_init *pdbg_init,
+		      struct cudbg_buffer *dbg_buff,
+		      struct cudbg_error *cudbg_err)
+{
+	struct adapter *padap = pdbg_init->adap;
+	struct cudbg_buffer temp_buff = { 0 };
+	int rc;
+
+	rc = cudbg_get_buff(dbg_buff, RSS_NENTRIES * sizeof(u16), &temp_buff);
+	if (rc)
+		return rc;
+
+	rc = t4_read_rss(padap, (u16 *)temp_buff.data);
+	if (rc) {
+		cudbg_err->sys_err = rc;
+		cudbg_put_buff(&temp_buff, dbg_buff);
+		return rc;
+	}
+	cudbg_write_and_release_buff(&temp_buff, dbg_buff);
+	return rc;
+}
+
+int cudbg_collect_rss_vf_config(struct cudbg_init *pdbg_init,
+				struct cudbg_buffer *dbg_buff,
+				struct cudbg_error *cudbg_err)
+{
+	struct adapter *padap = pdbg_init->adap;
+	struct cudbg_buffer temp_buff = { 0 };
+	struct cudbg_rss_vf_conf *vfconf;
+	int vf, rc, vf_count;
+
+	vf_count = padap->params.arch.vfcount;
+	rc = cudbg_get_buff(dbg_buff,
+			    vf_count * sizeof(struct cudbg_rss_vf_conf),
+			    &temp_buff);
+	if (rc)
+		return rc;
+
+	vfconf = (struct cudbg_rss_vf_conf *)temp_buff.data;
+	for (vf = 0; vf < vf_count; vf++)
+		t4_read_rss_vf_config(padap, vf, &vfconf[vf].rss_vf_vfl,
+				      &vfconf[vf].rss_vf_vfh, true);
+	cudbg_write_and_release_buff(&temp_buff, dbg_buff);
+	return rc;
+}
+
+int cudbg_collect_path_mtu(struct cudbg_init *pdbg_init,
+			   struct cudbg_buffer *dbg_buff,
+			   struct cudbg_error *cudbg_err)
+{
+	struct adapter *padap = pdbg_init->adap;
+	struct cudbg_buffer temp_buff = { 0 };
+	int rc;
+
+	rc = cudbg_get_buff(dbg_buff, NMTUS * sizeof(u16), &temp_buff);
+	if (rc)
+		return rc;
+
+	t4_read_mtu_tbl(padap, (u16 *)temp_buff.data, NULL);
+	cudbg_write_and_release_buff(&temp_buff, dbg_buff);
+	return rc;
+}
+
+int cudbg_collect_pm_stats(struct cudbg_init *pdbg_init,
+			   struct cudbg_buffer *dbg_buff,
+			   struct cudbg_error *cudbg_err)
+{
+	struct adapter *padap = pdbg_init->adap;
+	struct cudbg_buffer temp_buff = { 0 };
+	struct cudbg_pm_stats *pm_stats_buff;
+	int rc;
+
+	rc = cudbg_get_buff(dbg_buff, sizeof(struct cudbg_pm_stats),
+			    &temp_buff);
+	if (rc)
+		return rc;
+
+	pm_stats_buff = (struct cudbg_pm_stats *)temp_buff.data;
+	t4_pmtx_get_stats(padap, pm_stats_buff->tx_cnt, pm_stats_buff->tx_cyc);
+	t4_pmrx_get_stats(padap, pm_stats_buff->rx_cnt, pm_stats_buff->rx_cyc);
+	cudbg_write_and_release_buff(&temp_buff, dbg_buff);
+	return rc;
+}
+
+int cudbg_collect_hw_sched(struct cudbg_init *pdbg_init,
+			   struct cudbg_buffer *dbg_buff,
+			   struct cudbg_error *cudbg_err)
+{
+	struct adapter *padap = pdbg_init->adap;
+	struct cudbg_buffer temp_buff = { 0 };
+	struct cudbg_hw_sched *hw_sched_buff;
+	int i, rc = 0;
+
+	if (!padap->params.vpd.cclk)
+		return CUDBG_STATUS_CCLK_NOT_DEFINED;
+
+	rc = cudbg_get_buff(dbg_buff, sizeof(struct cudbg_hw_sched),
+			    &temp_buff);
+	hw_sched_buff = (struct cudbg_hw_sched *)temp_buff.data;
+	hw_sched_buff->map = t4_read_reg(padap, TP_TX_MOD_QUEUE_REQ_MAP_A);
+	hw_sched_buff->mode = TIMERMODE_G(t4_read_reg(padap, TP_MOD_CONFIG_A));
+	t4_read_pace_tbl(padap, hw_sched_buff->pace_tab);
+	for (i = 0; i < NTX_SCHED; ++i)
+		t4_get_tx_sched(padap, i, &hw_sched_buff->kbps[i],
+				&hw_sched_buff->ipg[i], true);
+	cudbg_write_and_release_buff(&temp_buff, dbg_buff);
+	return rc;
+}
+
 int cudbg_collect_tp_indirect(struct cudbg_init *pdbg_init,
 			      struct cudbg_buffer *dbg_buff,
 			      struct cudbg_error *cudbg_err)
@@ -574,6 +785,121 @@ int cudbg_collect_sge_indirect(struct cudbg_init *pdbg_init,
 	return rc;
 }
 
+int cudbg_collect_ulprx_la(struct cudbg_init *pdbg_init,
+			   struct cudbg_buffer *dbg_buff,
+			   struct cudbg_error *cudbg_err)
+{
+	struct adapter *padap = pdbg_init->adap;
+	struct cudbg_buffer temp_buff = { 0 };
+	struct cudbg_ulprx_la *ulprx_la_buff;
+	int rc;
+
+	rc = cudbg_get_buff(dbg_buff, sizeof(struct cudbg_ulprx_la),
+			    &temp_buff);
+	if (rc)
+		return rc;
+
+	ulprx_la_buff = (struct cudbg_ulprx_la *)temp_buff.data;
+	t4_ulprx_read_la(padap, (u32 *)ulprx_la_buff->data);
+	ulprx_la_buff->size = ULPRX_LA_SIZE;
+	cudbg_write_and_release_buff(&temp_buff, dbg_buff);
+	return rc;
+}
+
+int cudbg_collect_tp_la(struct cudbg_init *pdbg_init,
+			struct cudbg_buffer *dbg_buff,
+			struct cudbg_error *cudbg_err)
+{
+	struct adapter *padap = pdbg_init->adap;
+	struct cudbg_buffer temp_buff = { 0 };
+	struct cudbg_tp_la *tp_la_buff;
+	int size, rc;
+
+	size = sizeof(struct cudbg_tp_la) + TPLA_SIZE *  sizeof(u64);
+	rc = cudbg_get_buff(dbg_buff, size, &temp_buff);
+	if (rc)
+		return rc;
+
+	tp_la_buff = (struct cudbg_tp_la *)temp_buff.data;
+	tp_la_buff->mode = DBGLAMODE_G(t4_read_reg(padap, TP_DBG_LA_CONFIG_A));
+	t4_tp_read_la(padap, (u64 *)tp_la_buff->data, NULL);
+	cudbg_write_and_release_buff(&temp_buff, dbg_buff);
+	return rc;
+}
+
+int cudbg_collect_cim_pif_la(struct cudbg_init *pdbg_init,
+			     struct cudbg_buffer *dbg_buff,
+			     struct cudbg_error *cudbg_err)
+{
+	struct cudbg_cim_pif_la *cim_pif_la_buff;
+	struct adapter *padap = pdbg_init->adap;
+	struct cudbg_buffer temp_buff = { 0 };
+	int size, rc;
+
+	size = sizeof(struct cudbg_cim_pif_la) +
+	       2 * CIM_PIFLA_SIZE * 6 * sizeof(u32);
+	rc = cudbg_get_buff(dbg_buff, size, &temp_buff);
+	if (rc)
+		return rc;
+
+	cim_pif_la_buff = (struct cudbg_cim_pif_la *)temp_buff.data;
+	cim_pif_la_buff->size = CIM_PIFLA_SIZE;
+	t4_cim_read_pif_la(padap, (u32 *)cim_pif_la_buff->data,
+			   (u32 *)cim_pif_la_buff->data + 6 * CIM_PIFLA_SIZE,
+			   NULL, NULL);
+	cudbg_write_and_release_buff(&temp_buff, dbg_buff);
+	return rc;
+}
+
+int cudbg_collect_clk_info(struct cudbg_init *pdbg_init,
+			   struct cudbg_buffer *dbg_buff,
+			   struct cudbg_error *cudbg_err)
+{
+	struct adapter *padap = pdbg_init->adap;
+	struct cudbg_buffer temp_buff = { 0 };
+	struct cudbg_clk_info *clk_info_buff;
+	u64 tp_tick_us;
+	int rc;
+
+	if (!padap->params.vpd.cclk)
+		return CUDBG_STATUS_CCLK_NOT_DEFINED;
+
+	rc = cudbg_get_buff(dbg_buff, sizeof(struct cudbg_clk_info),
+			    &temp_buff);
+	if (rc)
+		return rc;
+
+	clk_info_buff = (struct cudbg_clk_info *)temp_buff.data;
+	clk_info_buff->cclk_ps = 1000000000 / padap->params.vpd.cclk; /* psec */
+	clk_info_buff->res = t4_read_reg(padap, TP_TIMER_RESOLUTION_A);
+	clk_info_buff->tre = TIMERRESOLUTION_G(clk_info_buff->res);
+	clk_info_buff->dack_re = DELAYEDACKRESOLUTION_G(clk_info_buff->res);
+	tp_tick_us = (clk_info_buff->cclk_ps << clk_info_buff->tre) / 1000000;
+
+	clk_info_buff->dack_timer =
+		(clk_info_buff->cclk_ps << clk_info_buff->dack_re) / 1000000 *
+		t4_read_reg(padap, TP_DACK_TIMER_A);
+	clk_info_buff->retransmit_min =
+		tp_tick_us * t4_read_reg(padap, TP_RXT_MIN_A);
+	clk_info_buff->retransmit_max =
+		tp_tick_us * t4_read_reg(padap, TP_RXT_MAX_A);
+	clk_info_buff->persist_timer_min =
+		tp_tick_us * t4_read_reg(padap, TP_PERS_MIN_A);
+	clk_info_buff->persist_timer_max =
+		tp_tick_us * t4_read_reg(padap, TP_PERS_MAX_A);
+	clk_info_buff->keepalive_idle_timer =
+		tp_tick_us * t4_read_reg(padap, TP_KEEP_IDLE_A);
+	clk_info_buff->keepalive_interval =
+		tp_tick_us * t4_read_reg(padap, TP_KEEP_INTVL_A);
+	clk_info_buff->initial_srtt =
+		tp_tick_us * INITSRTT_G(t4_read_reg(padap, TP_INIT_SRTT_A));
+	clk_info_buff->finwait2_timer =
+		tp_tick_us * t4_read_reg(padap, TP_FINWAIT2_TIMER_A);
+
+	cudbg_write_and_release_buff(&temp_buff, dbg_buff);
+	return rc;
+}
+
 int cudbg_collect_pcie_indirect(struct cudbg_init *pdbg_init,
 				struct cudbg_buffer *dbg_buff,
 				struct cudbg_error *cudbg_err)
@@ -688,6 +1014,323 @@ int cudbg_collect_pm_indirect(struct cudbg_init *pdbg_init,
 	return rc;
 }
 
+int cudbg_collect_tid(struct cudbg_init *pdbg_init,
+		      struct cudbg_buffer *dbg_buff,
+		      struct cudbg_error *cudbg_err)
+{
+	struct adapter *padap = pdbg_init->adap;
+	struct cudbg_tid_info_region_rev1 *tid1;
+	struct cudbg_buffer temp_buff = { 0 };
+	struct cudbg_tid_info_region *tid;
+	u32 para[2], val[2];
+	int rc;
+
+	rc = cudbg_get_buff(dbg_buff, sizeof(struct cudbg_tid_info_region_rev1),
+			    &temp_buff);
+	if (rc)
+		return rc;
+
+	tid1 = (struct cudbg_tid_info_region_rev1 *)temp_buff.data;
+	tid = &tid1->tid;
+	tid1->ver_hdr.signature = CUDBG_ENTITY_SIGNATURE;
+	tid1->ver_hdr.revision = CUDBG_TID_INFO_REV;
+	tid1->ver_hdr.size = sizeof(struct cudbg_tid_info_region_rev1) -
+			     sizeof(struct cudbg_ver_hdr);
+
+#define FW_PARAM_PFVF_A(param) \
+	(FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_PFVF) | \
+	 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_PFVF_##param) | \
+	 FW_PARAMS_PARAM_Y_V(0) | \
+	 FW_PARAMS_PARAM_Z_V(0))
+
+	para[0] = FW_PARAM_PFVF_A(ETHOFLD_START);
+	para[1] = FW_PARAM_PFVF_A(ETHOFLD_END);
+	rc = t4_query_params(padap, padap->mbox, padap->pf, 0, 2, para, val);
+	if (rc <  0) {
+		cudbg_err->sys_err = rc;
+		cudbg_put_buff(&temp_buff, dbg_buff);
+		return rc;
+	}
+	tid->uotid_base = val[0];
+	tid->nuotids = val[1] - val[0] + 1;
+
+	if (is_t5(padap->params.chip)) {
+		tid->sb = t4_read_reg(padap, LE_DB_SERVER_INDEX_A) / 4;
+	} else if (is_t6(padap->params.chip)) {
+		tid1->tid_start =
+			t4_read_reg(padap, LE_DB_ACTIVE_TABLE_START_INDEX_A);
+		tid->sb = t4_read_reg(padap, LE_DB_SRVR_START_INDEX_A);
+
+		para[0] = FW_PARAM_PFVF_A(HPFILTER_START);
+		para[1] = FW_PARAM_PFVF_A(HPFILTER_END);
+		rc = t4_query_params(padap, padap->mbox, padap->pf, 0, 2,
+				     para, val);
+		if (rc < 0) {
+			cudbg_err->sys_err = rc;
+			cudbg_put_buff(&temp_buff, dbg_buff);
+			return rc;
+		}
+		tid->hpftid_base = val[0];
+		tid->nhpftids = val[1] - val[0] + 1;
+	}
+
+	tid->ntids = padap->tids.ntids;
+	tid->nstids = padap->tids.nstids;
+	tid->stid_base = padap->tids.stid_base;
+	tid->hash_base = padap->tids.hash_base;
+
+	tid->natids = padap->tids.natids;
+	tid->nftids = padap->tids.nftids;
+	tid->ftid_base = padap->tids.ftid_base;
+	tid->aftid_base = padap->tids.aftid_base;
+	tid->aftid_end = padap->tids.aftid_end;
+
+	tid->sftid_base = padap->tids.sftid_base;
+	tid->nsftids = padap->tids.nsftids;
+
+	tid->flags = padap->flags;
+	tid->le_db_conf = t4_read_reg(padap, LE_DB_CONFIG_A);
+	tid->ip_users = t4_read_reg(padap, LE_DB_ACT_CNT_IPV4_A);
+	tid->ipv6_users = t4_read_reg(padap, LE_DB_ACT_CNT_IPV6_A);
+
+#undef FW_PARAM_PFVF_A
+
+	cudbg_write_and_release_buff(&temp_buff, dbg_buff);
+	return rc;
+}
+
+static inline void cudbg_tcamxy2valmask(u64 x, u64 y, u8 *addr, u64 *mask)
+{
+	*mask = x | y;
+	y = (__force u64)cpu_to_be64(y);
+	memcpy(addr, (char *)&y + 2, ETH_ALEN);
+}
+
+static void cudbg_mps_rpl_backdoor(struct adapter *padap,
+				   struct fw_ldst_mps_rplc *mps_rplc)
+{
+	if (is_t5(padap->params.chip)) {
+		mps_rplc->rplc255_224 = htonl(t4_read_reg(padap,
+							  MPS_VF_RPLCT_MAP3_A));
+		mps_rplc->rplc223_192 = htonl(t4_read_reg(padap,
+							  MPS_VF_RPLCT_MAP2_A));
+		mps_rplc->rplc191_160 = htonl(t4_read_reg(padap,
+							  MPS_VF_RPLCT_MAP1_A));
+		mps_rplc->rplc159_128 = htonl(t4_read_reg(padap,
+							  MPS_VF_RPLCT_MAP0_A));
+	} else {
+		mps_rplc->rplc255_224 = htonl(t4_read_reg(padap,
+							  MPS_VF_RPLCT_MAP7_A));
+		mps_rplc->rplc223_192 = htonl(t4_read_reg(padap,
+							  MPS_VF_RPLCT_MAP6_A));
+		mps_rplc->rplc191_160 = htonl(t4_read_reg(padap,
+							  MPS_VF_RPLCT_MAP5_A));
+		mps_rplc->rplc159_128 = htonl(t4_read_reg(padap,
+							  MPS_VF_RPLCT_MAP4_A));
+	}
+	mps_rplc->rplc127_96 = htonl(t4_read_reg(padap, MPS_VF_RPLCT_MAP3_A));
+	mps_rplc->rplc95_64 = htonl(t4_read_reg(padap, MPS_VF_RPLCT_MAP2_A));
+	mps_rplc->rplc63_32 = htonl(t4_read_reg(padap, MPS_VF_RPLCT_MAP1_A));
+	mps_rplc->rplc31_0 = htonl(t4_read_reg(padap, MPS_VF_RPLCT_MAP0_A));
+}
+
+static int cudbg_collect_tcam_index(struct adapter *padap,
+				    struct cudbg_mps_tcam *tcam, u32 idx)
+{
+	u64 tcamy, tcamx, val;
+	u32 ctl, data2;
+	int rc = 0;
+
+	if (CHELSIO_CHIP_VERSION(padap->params.chip) >= CHELSIO_T6) {
+		/* CtlReqID   - 1: use Host Driver Requester ID
+		 * CtlCmdType - 0: Read, 1: Write
+		 * CtlTcamSel - 0: TCAM0, 1: TCAM1
+		 * CtlXYBitSel- 0: Y bit, 1: X bit
+		 */
+
+		/* Read tcamy */
+		ctl = CTLREQID_V(1) | CTLCMDTYPE_V(0) | CTLXYBITSEL_V(0);
+		if (idx < 256)
+			ctl |= CTLTCAMINDEX_V(idx) | CTLTCAMSEL_V(0);
+		else
+			ctl |= CTLTCAMINDEX_V(idx - 256) | CTLTCAMSEL_V(1);
+
+		t4_write_reg(padap, MPS_CLS_TCAM_DATA2_CTL_A, ctl);
+		val = t4_read_reg(padap, MPS_CLS_TCAM_RDATA1_REQ_ID1_A);
+		tcamy = DMACH_G(val) << 32;
+		tcamy |= t4_read_reg(padap, MPS_CLS_TCAM_RDATA0_REQ_ID1_A);
+		data2 = t4_read_reg(padap, MPS_CLS_TCAM_RDATA2_REQ_ID1_A);
+		tcam->lookup_type = DATALKPTYPE_G(data2);
+
+		/* 0 - Outer header, 1 - Inner header
+		 * [71:48] bit locations are overloaded for
+		 * outer vs. inner lookup types.
+		 */
+		if (tcam->lookup_type && tcam->lookup_type != DATALKPTYPE_M) {
+			/* Inner header VNI */
+			tcam->vniy = (data2 & DATAVIDH2_F) | DATAVIDH1_G(data2);
+			tcam->vniy = (tcam->vniy << 16) | VIDL_G(val);
+			tcam->dip_hit = data2 & DATADIPHIT_F;
+		} else {
+			tcam->vlan_vld = data2 & DATAVIDH2_F;
+			tcam->ivlan = VIDL_G(val);
+		}
+
+		tcam->port_num = DATAPORTNUM_G(data2);
+
+		/* Read tcamx. Change the control param */
+		ctl |= CTLXYBITSEL_V(1);
+		t4_write_reg(padap, MPS_CLS_TCAM_DATA2_CTL_A, ctl);
+		val = t4_read_reg(padap, MPS_CLS_TCAM_RDATA1_REQ_ID1_A);
+		tcamx = DMACH_G(val) << 32;
+		tcamx |= t4_read_reg(padap, MPS_CLS_TCAM_RDATA0_REQ_ID1_A);
+		data2 = t4_read_reg(padap, MPS_CLS_TCAM_RDATA2_REQ_ID1_A);
+		if (tcam->lookup_type && tcam->lookup_type != DATALKPTYPE_M) {
+			/* Inner header VNI mask */
+			tcam->vnix = (data2 & DATAVIDH2_F) | DATAVIDH1_G(data2);
+			tcam->vnix = (tcam->vnix << 16) | VIDL_G(val);
+		}
+	} else {
+		tcamy = t4_read_reg64(padap, MPS_CLS_TCAM_Y_L(idx));
+		tcamx = t4_read_reg64(padap, MPS_CLS_TCAM_X_L(idx));
+	}
+
+	/* If no entry, return */
+	if (tcamx & tcamy)
+		return rc;
+
+	tcam->cls_lo = t4_read_reg(padap, MPS_CLS_SRAM_L(idx));
+	tcam->cls_hi = t4_read_reg(padap, MPS_CLS_SRAM_H(idx));
+
+	if (is_t5(padap->params.chip))
+		tcam->repli = (tcam->cls_lo & REPLICATE_F);
+	else if (is_t6(padap->params.chip))
+		tcam->repli = (tcam->cls_lo & T6_REPLICATE_F);
+
+	if (tcam->repli) {
+		struct fw_ldst_cmd ldst_cmd;
+		struct fw_ldst_mps_rplc mps_rplc;
+
+		memset(&ldst_cmd, 0, sizeof(ldst_cmd));
+		ldst_cmd.op_to_addrspace =
+			htonl(FW_CMD_OP_V(FW_LDST_CMD) |
+			      FW_CMD_REQUEST_F | FW_CMD_READ_F |
+			      FW_LDST_CMD_ADDRSPACE_V(FW_LDST_ADDRSPC_MPS));
+		ldst_cmd.cycles_to_len16 = htonl(FW_LEN16(ldst_cmd));
+		ldst_cmd.u.mps.rplc.fid_idx =
+			htons(FW_LDST_CMD_FID_V(FW_LDST_MPS_RPLC) |
+			      FW_LDST_CMD_IDX_V(idx));
+
+		rc = t4_wr_mbox(padap, padap->mbox, &ldst_cmd, sizeof(ldst_cmd),
+				&ldst_cmd);
+		if (rc)
+			cudbg_mps_rpl_backdoor(padap, &mps_rplc);
+		else
+			mps_rplc = ldst_cmd.u.mps.rplc;
+
+		tcam->rplc[0] = ntohl(mps_rplc.rplc31_0);
+		tcam->rplc[1] = ntohl(mps_rplc.rplc63_32);
+		tcam->rplc[2] = ntohl(mps_rplc.rplc95_64);
+		tcam->rplc[3] = ntohl(mps_rplc.rplc127_96);
+		if (padap->params.arch.mps_rplc_size > CUDBG_MAX_RPLC_SIZE) {
+			tcam->rplc[4] = ntohl(mps_rplc.rplc159_128);
+			tcam->rplc[5] = ntohl(mps_rplc.rplc191_160);
+			tcam->rplc[6] = ntohl(mps_rplc.rplc223_192);
+			tcam->rplc[7] = ntohl(mps_rplc.rplc255_224);
+		}
+	}
+	cudbg_tcamxy2valmask(tcamx, tcamy, tcam->addr, &tcam->mask);
+	tcam->idx = idx;
+	tcam->rplc_size = padap->params.arch.mps_rplc_size;
+	return rc;
+}
+
+int cudbg_collect_mps_tcam(struct cudbg_init *pdbg_init,
+			   struct cudbg_buffer *dbg_buff,
+			   struct cudbg_error *cudbg_err)
+{
+	struct adapter *padap = pdbg_init->adap;
+	struct cudbg_buffer temp_buff = { 0 };
+	u32 size = 0, i, n, total_size = 0;
+	struct cudbg_mps_tcam *tcam;
+	int rc;
+
+	n = padap->params.arch.mps_tcam_size;
+	size = sizeof(struct cudbg_mps_tcam) * n;
+	rc = cudbg_get_buff(dbg_buff, size, &temp_buff);
+	if (rc)
+		return rc;
+
+	tcam = (struct cudbg_mps_tcam *)temp_buff.data;
+	for (i = 0; i < n; i++) {
+		rc = cudbg_collect_tcam_index(padap, tcam, i);
+		if (rc) {
+			cudbg_err->sys_err = rc;
+			cudbg_put_buff(&temp_buff, dbg_buff);
+			return rc;
+		}
+		total_size += sizeof(struct cudbg_mps_tcam);
+		tcam++;
+	}
+
+	if (!total_size) {
+		rc = CUDBG_SYSTEM_ERROR;
+		cudbg_err->sys_err = rc;
+		cudbg_put_buff(&temp_buff, dbg_buff);
+		return rc;
+	}
+	cudbg_write_and_release_buff(&temp_buff, dbg_buff);
+	return rc;
+}
+
+int cudbg_collect_vpd_data(struct cudbg_init *pdbg_init,
+			   struct cudbg_buffer *dbg_buff,
+			   struct cudbg_error *cudbg_err)
+{
+	struct adapter *padap = pdbg_init->adap;
+	struct cudbg_buffer temp_buff = { 0 };
+	struct cudbg_vpd_data *vpd_data;
+	int rc;
+
+	rc = cudbg_get_buff(dbg_buff, sizeof(struct cudbg_vpd_data),
+			    &temp_buff);
+	if (rc)
+		return rc;
+
+	vpd_data = (struct cudbg_vpd_data *)temp_buff.data;
+	memcpy(vpd_data->sn, padap->params.vpd.sn, SERNUM_LEN + 1);
+	memcpy(vpd_data->bn, padap->params.vpd.pn, PN_LEN + 1);
+	memcpy(vpd_data->na, padap->params.vpd.na, MACADDR_LEN + 1);
+	memcpy(vpd_data->mn, padap->params.vpd.id, ID_LEN + 1);
+	vpd_data->scfg_vers = padap->params.scfg_vers;
+	vpd_data->vpd_vers = padap->params.vpd_vers;
+	vpd_data->fw_major = FW_HDR_FW_VER_MAJOR_G(padap->params.fw_vers);
+	vpd_data->fw_minor = FW_HDR_FW_VER_MINOR_G(padap->params.fw_vers);
+	vpd_data->fw_micro = FW_HDR_FW_VER_MICRO_G(padap->params.fw_vers);
+	vpd_data->fw_build = FW_HDR_FW_VER_BUILD_G(padap->params.fw_vers);
+	cudbg_write_and_release_buff(&temp_buff, dbg_buff);
+	return rc;
+}
+
+int cudbg_collect_cctrl(struct cudbg_init *pdbg_init,
+			struct cudbg_buffer *dbg_buff,
+			struct cudbg_error *cudbg_err)
+{
+	struct adapter *padap = pdbg_init->adap;
+	struct cudbg_buffer temp_buff = { 0 };
+	u32 size;
+	int rc;
+
+	size = sizeof(u16) * NMTUS * NCCTRL_WIN;
+	rc = cudbg_get_buff(dbg_buff, size, &temp_buff);
+	if (rc)
+		return rc;
+
+	t4_read_cong_tbl(padap, (void *)temp_buff.data);
+	cudbg_write_and_release_buff(&temp_buff, dbg_buff);
+	return rc;
+}
+
 int cudbg_collect_ma_indirect(struct cudbg_init *pdbg_init,
 			      struct cudbg_buffer *dbg_buff,
 			      struct cudbg_error *cudbg_err)
@@ -743,6 +1386,41 @@ int cudbg_collect_ma_indirect(struct cudbg_init *pdbg_init,
 	return rc;
 }
 
+int cudbg_collect_ulptx_la(struct cudbg_init *pdbg_init,
+			   struct cudbg_buffer *dbg_buff,
+			   struct cudbg_error *cudbg_err)
+{
+	struct adapter *padap = pdbg_init->adap;
+	struct cudbg_buffer temp_buff = { 0 };
+	struct cudbg_ulptx_la *ulptx_la_buff;
+	u32 i, j;
+	int rc;
+
+	rc = cudbg_get_buff(dbg_buff, sizeof(struct cudbg_ulptx_la),
+			    &temp_buff);
+	if (rc)
+		return rc;
+
+	ulptx_la_buff = (struct cudbg_ulptx_la *)temp_buff.data;
+	for (i = 0; i < CUDBG_NUM_ULPTX; i++) {
+		ulptx_la_buff->rdptr[i] = t4_read_reg(padap,
+						      ULP_TX_LA_RDPTR_0_A +
+						      0x10 * i);
+		ulptx_la_buff->wrptr[i] = t4_read_reg(padap,
+						      ULP_TX_LA_WRPTR_0_A +
+						      0x10 * i);
+		ulptx_la_buff->rddata[i] = t4_read_reg(padap,
+						       ULP_TX_LA_RDDATA_0_A +
+						       0x10 * i);
+		for (j = 0; j < CUDBG_NUM_ULPTX_READ; j++)
+			ulptx_la_buff->rd_data[i][j] =
+				t4_read_reg(padap,
+					    ULP_TX_LA_RDDATA_0_A + 0x10 * i);
+	}
+	cudbg_write_and_release_buff(&temp_buff, dbg_buff);
+	return rc;
+}
+
 int cudbg_collect_up_cim_indirect(struct cudbg_init *pdbg_init,
 				  struct cudbg_buffer *dbg_buff,
 				  struct cudbg_error *cudbg_err)
@@ -792,6 +1470,74 @@ int cudbg_collect_up_cim_indirect(struct cudbg_init *pdbg_init,
 	return rc;
 }
 
+int cudbg_collect_pbt_tables(struct cudbg_init *pdbg_init,
+			     struct cudbg_buffer *dbg_buff,
+			     struct cudbg_error *cudbg_err)
+{
+	struct adapter *padap = pdbg_init->adap;
+	struct cudbg_buffer temp_buff = { 0 };
+	struct cudbg_pbt_tables *pbt;
+	int i, rc;
+	u32 addr;
+
+	rc = cudbg_get_buff(dbg_buff, sizeof(struct cudbg_pbt_tables),
+			    &temp_buff);
+	if (rc)
+		return rc;
+
+	pbt = (struct cudbg_pbt_tables *)temp_buff.data;
+	/* PBT dynamic entries */
+	addr = CUDBG_CHAC_PBT_ADDR;
+	for (i = 0; i < CUDBG_PBT_DYNAMIC_ENTRIES; i++) {
+		rc = t4_cim_read(padap, addr + (i * 4), 1,
+				 &pbt->pbt_dynamic[i]);
+		if (rc) {
+			cudbg_err->sys_err = rc;
+			cudbg_put_buff(&temp_buff, dbg_buff);
+			return rc;
+		}
+	}
+
+	/* PBT static entries */
+	/* static entries start when bit 6 is set */
+	addr = CUDBG_CHAC_PBT_ADDR + (1 << 6);
+	for (i = 0; i < CUDBG_PBT_STATIC_ENTRIES; i++) {
+		rc = t4_cim_read(padap, addr + (i * 4), 1,
+				 &pbt->pbt_static[i]);
+		if (rc) {
+			cudbg_err->sys_err = rc;
+			cudbg_put_buff(&temp_buff, dbg_buff);
+			return rc;
+		}
+	}
+
+	/* LRF entries */
+	addr = CUDBG_CHAC_PBT_LRF;
+	for (i = 0; i < CUDBG_LRF_ENTRIES; i++) {
+		rc = t4_cim_read(padap, addr + (i * 4), 1,
+				 &pbt->lrf_table[i]);
+		if (rc) {
+			cudbg_err->sys_err = rc;
+			cudbg_put_buff(&temp_buff, dbg_buff);
+			return rc;
+		}
+	}
+
+	/* PBT data entries */
+	addr = CUDBG_CHAC_PBT_DATA;
+	for (i = 0; i < CUDBG_PBT_DATA_ENTRIES; i++) {
+		rc = t4_cim_read(padap, addr + (i * 4), 1,
+				 &pbt->pbt_data[i]);
+		if (rc) {
+			cudbg_err->sys_err = rc;
+			cudbg_put_buff(&temp_buff, dbg_buff);
+			return rc;
+		}
+	}
+	cudbg_write_and_release_buff(&temp_buff, dbg_buff);
+	return rc;
+}
+
 int cudbg_collect_mbox_log(struct cudbg_init *pdbg_init,
 			   struct cudbg_buffer *dbg_buff,
 			   struct cudbg_error *cudbg_err)

+ 54 - 0
drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.h

@@ -24,6 +24,15 @@ int cudbg_collect_reg_dump(struct cudbg_init *pdbg_init,
 int cudbg_collect_fw_devlog(struct cudbg_init *pdbg_init,
 			    struct cudbg_buffer *dbg_buff,
 			    struct cudbg_error *cudbg_err);
+int cudbg_collect_cim_la(struct cudbg_init *pdbg_init,
+			 struct cudbg_buffer *dbg_buff,
+			 struct cudbg_error *cudbg_err);
+int cudbg_collect_cim_ma_la(struct cudbg_init *pdbg_init,
+			    struct cudbg_buffer *dbg_buff,
+			    struct cudbg_error *cudbg_err);
+int cudbg_collect_cim_qcfg(struct cudbg_init *pdbg_init,
+			   struct cudbg_buffer *dbg_buff,
+			   struct cudbg_error *cudbg_err);
 int cudbg_collect_cim_ibq_tp0(struct cudbg_init *pdbg_init,
 			      struct cudbg_buffer *dbg_buff,
 			      struct cudbg_error *cudbg_err);
@@ -66,12 +75,39 @@ int cudbg_collect_edc0_meminfo(struct cudbg_init *pdbg_init,
 int cudbg_collect_edc1_meminfo(struct cudbg_init *pdbg_init,
 			       struct cudbg_buffer *dbg_buff,
 			       struct cudbg_error *cudbg_err);
+int cudbg_collect_rss(struct cudbg_init *pdbg_init,
+		      struct cudbg_buffer *dbg_buff,
+		      struct cudbg_error *cudbg_err);
+int cudbg_collect_rss_vf_config(struct cudbg_init *pdbg_init,
+				struct cudbg_buffer *dbg_buff,
+				struct cudbg_error *cudbg_err);
 int cudbg_collect_tp_indirect(struct cudbg_init *pdbg_init,
 			      struct cudbg_buffer *dbg_buff,
 			      struct cudbg_error *cudbg_err);
+int cudbg_collect_path_mtu(struct cudbg_init *pdbg_init,
+			   struct cudbg_buffer *dbg_buff,
+			   struct cudbg_error *cudbg_err);
+int cudbg_collect_pm_stats(struct cudbg_init *pdbg_init,
+			   struct cudbg_buffer *dbg_buff,
+			   struct cudbg_error *cudbg_err);
+int cudbg_collect_hw_sched(struct cudbg_init *pdbg_init,
+			   struct cudbg_buffer *dbg_buff,
+			   struct cudbg_error *cudbg_err);
 int cudbg_collect_sge_indirect(struct cudbg_init *pdbg_init,
 			       struct cudbg_buffer *dbg_buff,
 			       struct cudbg_error *cudbg_err);
+int cudbg_collect_ulprx_la(struct cudbg_init *pdbg_init,
+			   struct cudbg_buffer *dbg_buff,
+			   struct cudbg_error *cudbg_err);
+int cudbg_collect_tp_la(struct cudbg_init *pdbg_init,
+			struct cudbg_buffer *dbg_buff,
+			struct cudbg_error *cudbg_err);
+int cudbg_collect_cim_pif_la(struct cudbg_init *pdbg_init,
+			     struct cudbg_buffer *dbg_buff,
+			     struct cudbg_error *cudbg_err);
+int cudbg_collect_clk_info(struct cudbg_init *pdbg_init,
+			   struct cudbg_buffer *dbg_buff,
+			   struct cudbg_error *cudbg_err);
 int cudbg_collect_obq_sge_rx_q0(struct cudbg_init *pdbg_init,
 				struct cudbg_buffer *dbg_buff,
 				struct cudbg_error *cudbg_err);
@@ -84,12 +120,30 @@ int cudbg_collect_pcie_indirect(struct cudbg_init *pdbg_init,
 int cudbg_collect_pm_indirect(struct cudbg_init *pdbg_init,
 			      struct cudbg_buffer *dbg_buff,
 			      struct cudbg_error *cudbg_err);
+int cudbg_collect_tid(struct cudbg_init *pdbg_init,
+		      struct cudbg_buffer *dbg_buff,
+		      struct cudbg_error *cudbg_err);
+int cudbg_collect_mps_tcam(struct cudbg_init *pdbg_init,
+			   struct cudbg_buffer *dbg_buff,
+			   struct cudbg_error *cudbg_err);
+int cudbg_collect_vpd_data(struct cudbg_init *pdbg_init,
+			   struct cudbg_buffer *dbg_buff,
+			   struct cudbg_error *cudbg_err);
+int cudbg_collect_cctrl(struct cudbg_init *pdbg_init,
+			struct cudbg_buffer *dbg_buff,
+			struct cudbg_error *cudbg_err);
 int cudbg_collect_ma_indirect(struct cudbg_init *pdbg_init,
 			      struct cudbg_buffer *dbg_buff,
 			      struct cudbg_error *cudbg_err);
+int cudbg_collect_ulptx_la(struct cudbg_init *pdbg_init,
+			   struct cudbg_buffer *dbg_buff,
+			   struct cudbg_error *cudbg_err);
 int cudbg_collect_up_cim_indirect(struct cudbg_init *pdbg_init,
 				  struct cudbg_buffer *dbg_buff,
 				  struct cudbg_error *cudbg_err);
+int cudbg_collect_pbt_tables(struct cudbg_init *pdbg_init,
+			     struct cudbg_buffer *dbg_buff,
+			     struct cudbg_error *cudbg_err);
 int cudbg_collect_mbox_log(struct cudbg_init *pdbg_init,
 			   struct cudbg_buffer *dbg_buff,
 			   struct cudbg_error *cudbg_err);

+ 6 - 0
drivers/net/ethernet/chelsio/cxgb4/cudbg_lib_common.h

@@ -57,6 +57,12 @@ struct cudbg_entity_hdr {
 	u32 reserved[5];
 };
 
+struct cudbg_ver_hdr {
+	u32 signature;
+	u16 revision;
+	u16 size;
+};
+
 struct cudbg_buffer {
 	u32 size;
 	u32 offset;

+ 9 - 0
drivers/net/ethernet/chelsio/cxgb4/cxgb4.h

@@ -1335,6 +1335,12 @@ static inline unsigned int core_ticks_to_us(const struct adapter *adapter,
 		adapter->params.vpd.cclk);
 }
 
+static inline unsigned int dack_ticks_to_usec(const struct adapter *adap,
+					      unsigned int ticks)
+{
+	return (ticks << adap->params.tp.dack_re) / core_ticks_per_usec(adap);
+}
+
 void t4_set_reg_field(struct adapter *adap, unsigned int addr, u32 mask,
 		      u32 val);
 
@@ -1636,6 +1642,9 @@ void t4_get_trace_filter(struct adapter *adapter, struct trace_params *tp,
 			 int filter_index, int *enabled);
 int t4_fwaddrspace_write(struct adapter *adap, unsigned int mbox,
 			 u32 addr, u32 val);
+void t4_read_pace_tbl(struct adapter *adap, unsigned int pace_vals[NTX_SCHED]);
+void t4_get_tx_sched(struct adapter *adap, unsigned int sched,
+		     unsigned int *kbps, unsigned int *ipg, bool sleep_ok);
 int t4_sched_params(struct adapter *adapter, int type, int level, int mode,
 		    int rateunit, int ratemode, int channel, int class,
 		    int minrate, int maxrate, int weight, int pktsize);

+ 82 - 0
drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c

@@ -29,6 +29,9 @@ static const struct cxgb4_collect_entity cxgb4_collect_hw_dump[] = {
 	{ CUDBG_MBOX_LOG, cudbg_collect_mbox_log },
 	{ CUDBG_DEV_LOG, cudbg_collect_fw_devlog },
 	{ CUDBG_REG_DUMP, cudbg_collect_reg_dump },
+	{ CUDBG_CIM_LA, cudbg_collect_cim_la },
+	{ CUDBG_CIM_MA_LA, cudbg_collect_cim_ma_la },
+	{ CUDBG_CIM_QCFG, cudbg_collect_cim_qcfg },
 	{ CUDBG_CIM_IBQ_TP0, cudbg_collect_cim_ibq_tp0 },
 	{ CUDBG_CIM_IBQ_TP1, cudbg_collect_cim_ibq_tp1 },
 	{ CUDBG_CIM_IBQ_ULP, cudbg_collect_cim_ibq_ulp },
@@ -41,14 +44,29 @@ static const struct cxgb4_collect_entity cxgb4_collect_hw_dump[] = {
 	{ CUDBG_CIM_OBQ_ULP3, cudbg_collect_cim_obq_ulp3 },
 	{ CUDBG_CIM_OBQ_SGE, cudbg_collect_cim_obq_sge },
 	{ CUDBG_CIM_OBQ_NCSI, cudbg_collect_cim_obq_ncsi },
+	{ CUDBG_RSS, cudbg_collect_rss },
+	{ CUDBG_RSS_VF_CONF, cudbg_collect_rss_vf_config },
+	{ CUDBG_PATH_MTU, cudbg_collect_path_mtu },
+	{ CUDBG_PM_STATS, cudbg_collect_pm_stats },
+	{ CUDBG_HW_SCHED, cudbg_collect_hw_sched },
 	{ CUDBG_TP_INDIRECT, cudbg_collect_tp_indirect },
 	{ CUDBG_SGE_INDIRECT, cudbg_collect_sge_indirect },
+	{ CUDBG_ULPRX_LA, cudbg_collect_ulprx_la },
+	{ CUDBG_TP_LA, cudbg_collect_tp_la },
+	{ CUDBG_CIM_PIF_LA, cudbg_collect_cim_pif_la },
+	{ CUDBG_CLK, cudbg_collect_clk_info },
 	{ CUDBG_CIM_OBQ_RXQ0, cudbg_collect_obq_sge_rx_q0 },
 	{ CUDBG_CIM_OBQ_RXQ1, cudbg_collect_obq_sge_rx_q1 },
 	{ CUDBG_PCIE_INDIRECT, cudbg_collect_pcie_indirect },
 	{ CUDBG_PM_INDIRECT, cudbg_collect_pm_indirect },
+	{ CUDBG_TID_INFO, cudbg_collect_tid },
+	{ CUDBG_MPS_TCAM, cudbg_collect_mps_tcam },
+	{ CUDBG_VPD_DATA, cudbg_collect_vpd_data },
+	{ CUDBG_CCTRL, cudbg_collect_cctrl },
 	{ CUDBG_MA_INDIRECT, cudbg_collect_ma_indirect },
+	{ CUDBG_ULPTX_LA, cudbg_collect_ulptx_la },
 	{ CUDBG_UP_CIM_INDIRECT, cudbg_collect_up_cim_indirect },
+	{ CUDBG_PBT_TABLE, cudbg_collect_pbt_tables },
 	{ CUDBG_HMA_INDIRECT, cudbg_collect_hma_indirect },
 };
 
@@ -73,6 +91,22 @@ static u32 cxgb4_get_entity_length(struct adapter *adap, u32 entity)
 	case CUDBG_DEV_LOG:
 		len = adap->params.devlog.size;
 		break;
+	case CUDBG_CIM_LA:
+		if (is_t6(adap->params.chip)) {
+			len = adap->params.cim_la_size / 10 + 1;
+			len *= 11 * sizeof(u32);
+		} else {
+			len = adap->params.cim_la_size / 8;
+			len *= 8 * sizeof(u32);
+		}
+		len += sizeof(u32); /* for reading CIM LA configuration */
+		break;
+	case CUDBG_CIM_MA_LA:
+		len = 2 * CIM_MALA_SIZE * 5 * sizeof(u32);
+		break;
+	case CUDBG_CIM_QCFG:
+		len = sizeof(struct cudbg_cim_qcfg);
+		break;
 	case CUDBG_CIM_IBQ_TP0:
 	case CUDBG_CIM_IBQ_TP1:
 	case CUDBG_CIM_IBQ_ULP:
@@ -121,6 +155,22 @@ static u32 cxgb4_get_entity_length(struct adapter *adap, u32 entity)
 		}
 		len = cudbg_mbytes_to_bytes(len);
 		break;
+	case CUDBG_RSS:
+		len = RSS_NENTRIES * sizeof(u16);
+		break;
+	case CUDBG_RSS_VF_CONF:
+		len = adap->params.arch.vfcount *
+		      sizeof(struct cudbg_rss_vf_conf);
+		break;
+	case CUDBG_PATH_MTU:
+		len = NMTUS * sizeof(u16);
+		break;
+	case CUDBG_PM_STATS:
+		len = sizeof(struct cudbg_pm_stats);
+		break;
+	case CUDBG_HW_SCHED:
+		len = sizeof(struct cudbg_hw_sched);
+		break;
 	case CUDBG_TP_INDIRECT:
 		switch (CHELSIO_CHIP_VERSION(adap->params.chip)) {
 		case CHELSIO_T5:
@@ -142,6 +192,19 @@ static u32 cxgb4_get_entity_length(struct adapter *adap, u32 entity)
 	case CUDBG_SGE_INDIRECT:
 		len = sizeof(struct ireg_buf) * 2;
 		break;
+	case CUDBG_ULPRX_LA:
+		len = sizeof(struct cudbg_ulprx_la);
+		break;
+	case CUDBG_TP_LA:
+		len = sizeof(struct cudbg_tp_la) + TPLA_SIZE * sizeof(u64);
+		break;
+	case CUDBG_CIM_PIF_LA:
+		len = sizeof(struct cudbg_cim_pif_la);
+		len += 2 * CIM_PIFLA_SIZE * 6 * sizeof(u32);
+		break;
+	case CUDBG_CLK:
+		len = sizeof(struct cudbg_clk_info);
+		break;
 	case CUDBG_PCIE_INDIRECT:
 		n = sizeof(t5_pcie_pdbg_array) / (IREG_NUM_ELEM * sizeof(u32));
 		len = sizeof(struct ireg_buf) * n * 2;
@@ -150,6 +213,19 @@ static u32 cxgb4_get_entity_length(struct adapter *adap, u32 entity)
 		n = sizeof(t5_pm_rx_array) / (IREG_NUM_ELEM * sizeof(u32));
 		len = sizeof(struct ireg_buf) * n * 2;
 		break;
+	case CUDBG_TID_INFO:
+		len = sizeof(struct cudbg_tid_info_region_rev1);
+		break;
+	case CUDBG_MPS_TCAM:
+		len = sizeof(struct cudbg_mps_tcam) *
+		      adap->params.arch.mps_tcam_size;
+		break;
+	case CUDBG_VPD_DATA:
+		len = sizeof(struct cudbg_vpd_data);
+		break;
+	case CUDBG_CCTRL:
+		len = sizeof(u16) * NMTUS * NCCTRL_WIN;
+		break;
 	case CUDBG_MA_INDIRECT:
 		if (CHELSIO_CHIP_VERSION(adap->params.chip) > CHELSIO_T5) {
 			n = sizeof(t6_ma_ireg_array) /
@@ -157,10 +233,16 @@ static u32 cxgb4_get_entity_length(struct adapter *adap, u32 entity)
 			len = sizeof(struct ireg_buf) * n * 2;
 		}
 		break;
+	case CUDBG_ULPTX_LA:
+		len = sizeof(struct cudbg_ulptx_la);
+		break;
 	case CUDBG_UP_CIM_INDIRECT:
 		n = sizeof(t5_up_cim_reg_array) / (IREG_NUM_ELEM * sizeof(u32));
 		len = sizeof(struct ireg_buf) * n;
 		break;
+	case CUDBG_PBT_TABLE:
+		len = sizeof(struct cudbg_pbt_tables);
+		break;
 	case CUDBG_MBOX_LOG:
 		len = sizeof(struct cudbg_mbox_log) * adap->mbox_log->size;
 		break;

+ 57 - 0
drivers/net/ethernet/chelsio/cxgb4/t4_hw.c

@@ -9547,6 +9547,63 @@ int t4_set_vf_mac_acl(struct adapter *adapter, unsigned int vf,
 	return t4_wr_mbox(adapter, adapter->mbox, &cmd, sizeof(cmd), &cmd);
 }
 
+/**
+ * t4_read_pace_tbl - read the pace table
+ * @adap: the adapter
+ * @pace_vals: holds the returned values
+ *
+ * Returns the values of TP's pace table in microseconds.
+ */
+void t4_read_pace_tbl(struct adapter *adap, unsigned int pace_vals[NTX_SCHED])
+{
+	unsigned int i, v;
+
+	for (i = 0; i < NTX_SCHED; i++) {
+		t4_write_reg(adap, TP_PACE_TABLE_A, 0xffff0000 + i);
+		v = t4_read_reg(adap, TP_PACE_TABLE_A);
+		pace_vals[i] = dack_ticks_to_usec(adap, v);
+	}
+}
+
+/**
+ * t4_get_tx_sched - get the configuration of a Tx HW traffic scheduler
+ * @adap: the adapter
+ * @sched: the scheduler index
+ * @kbps: the byte rate in Kbps
+ * @ipg: the interpacket delay in tenths of nanoseconds
+ * @sleep_ok: if true we may sleep while awaiting command completion
+ *
+ * Return the current configuration of a HW Tx scheduler.
+ */
+void t4_get_tx_sched(struct adapter *adap, unsigned int sched,
+		     unsigned int *kbps, unsigned int *ipg, bool sleep_ok)
+{
+	unsigned int v, addr, bpt, cpt;
+
+	if (kbps) {
+		addr = TP_TX_MOD_Q1_Q0_RATE_LIMIT_A - sched / 2;
+		t4_tp_tm_pio_read(adap, &v, 1, addr, sleep_ok);
+		if (sched & 1)
+			v >>= 16;
+		bpt = (v >> 8) & 0xff;
+		cpt = v & 0xff;
+		if (!cpt) {
+			*kbps = 0;	/* scheduler disabled */
+		} else {
+			v = (adap->params.vpd.cclk * 1000) / cpt; /* ticks/s */
+			*kbps = (v * bpt) / 125;
+		}
+	}
+	if (ipg) {
+		addr = TP_TX_MOD_Q1_Q0_TIMER_SEPARATOR_A - sched / 2;
+		t4_tp_tm_pio_read(adap, &v, 1, addr, sleep_ok);
+		if (sched & 1)
+			v >>= 16;
+		v &= 0xffff;
+		*ipg = (10000 * v) / core_ticks_per_usec(adap);
+	}
+}
+
 int t4_sched_params(struct adapter *adapter, int type, int level, int mode,
 		    int rateunit, int ratemode, int channel, int class,
 		    int minrate, int maxrate, int weight, int pktsize)

+ 1 - 0
drivers/net/ethernet/chelsio/cxgb4/t4_hw.h

@@ -47,6 +47,7 @@ enum {
 	TCB_SIZE       = 128,   /* TCB size */
 	NMTUS          = 16,    /* size of MTU table */
 	NCCTRL_WIN     = 32,    /* # of congestion control windows */
+	NTX_SCHED      = 8,     /* # of HW Tx scheduling queues */
 	PM_NSTATS      = 5,     /* # of PM stats */
 	T6_PM_NSTATS   = 7,     /* # of PM stats in T6 */
 	MBOX_LEN       = 64,    /* mailbox size in bytes */

+ 31 - 0
drivers/net/ethernet/chelsio/cxgb4/t4_regs.h

@@ -1415,6 +1415,7 @@
 #define ROWINDEX_V(x) ((x) << ROWINDEX_S)
 
 #define TP_CCTRL_TABLE_A	0x7ddc
+#define TP_PACE_TABLE_A 0x7dd8
 #define TP_MTU_TABLE_A		0x7de4
 
 #define MTUINDEX_S    24
@@ -1449,6 +1450,15 @@
 
 #define TP_TM_PIO_ADDR_A 0x7e18
 #define TP_TM_PIO_DATA_A 0x7e1c
+#define TP_MOD_CONFIG_A 0x7e24
+
+#define TIMERMODE_S    8
+#define TIMERMODE_M    0xffU
+#define TIMERMODE_G(x) (((x) >> TIMERMODE_S) & TIMERMODE_M)
+
+#define TP_TX_MOD_Q1_Q0_TIMER_SEPARATOR_A 0x3
+#define TP_TX_MOD_Q1_Q0_RATE_LIMIT_A 0x8
+
 #define TP_PIO_ADDR_A	0x7e40
 #define TP_PIO_DATA_A	0x7e44
 #define TP_MIB_INDEX_A	0x7e50
@@ -1629,6 +1639,10 @@
 #define IESPI_PAR_ERROR_V(x) ((x) << IESPI_PAR_ERROR_S)
 #define IESPI_PAR_ERROR_F    IESPI_PAR_ERROR_V(1U)
 
+#define ULP_TX_LA_RDPTR_0_A 0x8ec0
+#define ULP_TX_LA_RDDATA_0_A 0x8ec4
+#define ULP_TX_LA_WRPTR_0_A 0x8ec8
+
 #define PMRX_E_PCMD_PAR_ERROR_S    0
 #define PMRX_E_PCMD_PAR_ERROR_V(x) ((x) << PMRX_E_PCMD_PAR_ERROR_S)
 #define PMRX_E_PCMD_PAR_ERROR_F    PMRX_E_PCMD_PAR_ERROR_V(1U)
@@ -2435,6 +2449,18 @@
 #define MPS_CLS_TCAM_DATA0_A 0xf000
 #define MPS_CLS_TCAM_DATA1_A 0xf004
 
+#define CTLREQID_S    30
+#define CTLREQID_V(x) ((x) << CTLREQID_S)
+
+#define MPS_VF_RPLCT_MAP0_A 0x1111c
+#define MPS_VF_RPLCT_MAP1_A 0x11120
+#define MPS_VF_RPLCT_MAP2_A 0x11124
+#define MPS_VF_RPLCT_MAP3_A 0x11128
+#define MPS_VF_RPLCT_MAP4_A 0x11300
+#define MPS_VF_RPLCT_MAP5_A 0x11304
+#define MPS_VF_RPLCT_MAP6_A 0x11308
+#define MPS_VF_RPLCT_MAP7_A 0x1130c
+
 #define VIDL_S    16
 #define VIDL_M    0xffffU
 #define VIDL_G(x) (((x) >> VIDL_S) & VIDL_M)
@@ -2459,6 +2485,10 @@
 #define DATAVIDH1_M    0x7fU
 #define DATAVIDH1_G(x) (((x) >> DATAVIDH1_S) & DATAVIDH1_M)
 
+#define MPS_CLS_TCAM_RDATA0_REQ_ID1_A 0xf020
+#define MPS_CLS_TCAM_RDATA1_REQ_ID1_A 0xf024
+#define MPS_CLS_TCAM_RDATA2_REQ_ID1_A 0xf028
+
 #define USED_S    16
 #define USED_M    0x7ffU
 #define USED_G(x) (((x) >> USED_S) & USED_M)
@@ -2852,6 +2882,7 @@
 #define T6_LIPMISS_F    T6_LIPMISS_V(1U)
 
 #define LE_DB_CONFIG_A 0x19c04
+#define LE_DB_ACTIVE_TABLE_START_INDEX_A 0x19c10
 #define LE_DB_SERVER_INDEX_A 0x19c18
 #define LE_DB_SRVR_START_INDEX_A 0x19c18
 #define LE_DB_ACT_CNT_IPV4_A 0x19c20

+ 4 - 1
drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h

@@ -1244,9 +1244,12 @@ enum fw_params_param_pfvf {
 	FW_PARAMS_PARAM_PFVF_EQ_END	= 0x2C,
 	FW_PARAMS_PARAM_PFVF_ACTIVE_FILTER_START = 0x2D,
 	FW_PARAMS_PARAM_PFVF_ACTIVE_FILTER_END = 0x2E,
+	FW_PARAMS_PARAM_PFVF_ETHOFLD_START = 0x2F,
 	FW_PARAMS_PARAM_PFVF_ETHOFLD_END = 0x30,
 	FW_PARAMS_PARAM_PFVF_CPLFW4MSG_ENCAP = 0x31,
-	FW_PARAMS_PARAM_PFVF_NCRYPTO_LOOKASIDE = 0x32,
+	FW_PARAMS_PARAM_PFVF_HPFILTER_START = 0x32,
+	FW_PARAMS_PARAM_PFVF_HPFILTER_END = 0x33,
+	FW_PARAMS_PARAM_PFVF_NCRYPTO_LOOKASIDE = 0x39,
 	FW_PARAMS_PARAM_PFVF_PORT_CAPS32 = 0x3A,
 };