|
@@ -19,6 +19,7 @@
|
|
|
|
|
|
#include "t4_regs.h"
|
|
|
#include "cxgb4.h"
|
|
|
+#include "cxgb4_cudbg.h"
|
|
|
#include "cudbg_if.h"
|
|
|
#include "cudbg_lib_common.h"
|
|
|
#include "cudbg_entity.h"
|
|
@@ -2890,3 +2891,240 @@ int cudbg_collect_hma_indirect(struct cudbg_init *pdbg_init,
|
|
|
}
|
|
|
return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
|
|
|
}
|
|
|
+
|
|
|
+void cudbg_fill_qdesc_num_and_size(const struct adapter *padap,
|
|
|
+ u32 *num, u32 *size)
|
|
|
+{
|
|
|
+ u32 tot_entries = 0, tot_size = 0;
|
|
|
+
|
|
|
+ /* NIC TXQ, RXQ, FLQ, and CTRLQ */
|
|
|
+ tot_entries += MAX_ETH_QSETS * 3;
|
|
|
+ tot_entries += MAX_CTRL_QUEUES;
|
|
|
+
|
|
|
+ tot_size += MAX_ETH_QSETS * MAX_TXQ_ENTRIES * MAX_TXQ_DESC_SIZE;
|
|
|
+ tot_size += MAX_ETH_QSETS * MAX_RSPQ_ENTRIES * MAX_RXQ_DESC_SIZE;
|
|
|
+ tot_size += MAX_ETH_QSETS * MAX_RX_BUFFERS * MAX_FL_DESC_SIZE;
|
|
|
+ tot_size += MAX_CTRL_QUEUES * MAX_CTRL_TXQ_ENTRIES *
|
|
|
+ MAX_CTRL_TXQ_DESC_SIZE;
|
|
|
+
|
|
|
+ /* FW_EVTQ and INTRQ */
|
|
|
+ tot_entries += INGQ_EXTRAS;
|
|
|
+ tot_size += INGQ_EXTRAS * MAX_RSPQ_ENTRIES * MAX_RXQ_DESC_SIZE;
|
|
|
+
|
|
|
+ /* PTP_TXQ */
|
|
|
+ tot_entries += 1;
|
|
|
+ tot_size += MAX_TXQ_ENTRIES * MAX_TXQ_DESC_SIZE;
|
|
|
+
|
|
|
+ /* ULD TXQ, RXQ, and FLQ */
|
|
|
+ tot_entries += CXGB4_TX_MAX * MAX_OFLD_QSETS;
|
|
|
+ tot_entries += CXGB4_ULD_MAX * MAX_ULD_QSETS * 2;
|
|
|
+
|
|
|
+ tot_size += CXGB4_TX_MAX * MAX_OFLD_QSETS * MAX_TXQ_ENTRIES *
|
|
|
+ MAX_TXQ_DESC_SIZE;
|
|
|
+ tot_size += CXGB4_ULD_MAX * MAX_ULD_QSETS * MAX_RSPQ_ENTRIES *
|
|
|
+ MAX_RXQ_DESC_SIZE;
|
|
|
+ tot_size += CXGB4_ULD_MAX * MAX_ULD_QSETS * MAX_RX_BUFFERS *
|
|
|
+ MAX_FL_DESC_SIZE;
|
|
|
+
|
|
|
+ /* ULD CIQ */
|
|
|
+ tot_entries += CXGB4_ULD_MAX * MAX_ULD_QSETS;
|
|
|
+ tot_size += CXGB4_ULD_MAX * MAX_ULD_QSETS * SGE_MAX_IQ_SIZE *
|
|
|
+ MAX_RXQ_DESC_SIZE;
|
|
|
+
|
|
|
+ tot_size += sizeof(struct cudbg_ver_hdr) +
|
|
|
+ sizeof(struct cudbg_qdesc_info) +
|
|
|
+ sizeof(struct cudbg_qdesc_entry) * tot_entries;
|
|
|
+
|
|
|
+ if (num)
|
|
|
+ *num = tot_entries;
|
|
|
+
|
|
|
+ if (size)
|
|
|
+ *size = tot_size;
|
|
|
+}
|
|
|
+
|
|
|
+int cudbg_collect_qdesc(struct cudbg_init *pdbg_init,
|
|
|
+ struct cudbg_buffer *dbg_buff,
|
|
|
+ struct cudbg_error *cudbg_err)
|
|
|
+{
|
|
|
+ u32 num_queues = 0, tot_entries = 0, size = 0;
|
|
|
+ struct adapter *padap = pdbg_init->adap;
|
|
|
+ struct cudbg_buffer temp_buff = { 0 };
|
|
|
+ struct cudbg_qdesc_entry *qdesc_entry;
|
|
|
+ struct cudbg_qdesc_info *qdesc_info;
|
|
|
+ struct cudbg_ver_hdr *ver_hdr;
|
|
|
+ struct sge *s = &padap->sge;
|
|
|
+ u32 i, j, cur_off, tot_len;
|
|
|
+ u8 *data;
|
|
|
+ int rc;
|
|
|
+
|
|
|
+ cudbg_fill_qdesc_num_and_size(padap, &tot_entries, &size);
|
|
|
+ size = min_t(u32, size, CUDBG_DUMP_BUFF_SIZE);
|
|
|
+ tot_len = size;
|
|
|
+ data = kvzalloc(size, GFP_KERNEL);
|
|
|
+ if (!data)
|
|
|
+ return -ENOMEM;
|
|
|
+
|
|
|
+ ver_hdr = (struct cudbg_ver_hdr *)data;
|
|
|
+ ver_hdr->signature = CUDBG_ENTITY_SIGNATURE;
|
|
|
+ ver_hdr->revision = CUDBG_QDESC_REV;
|
|
|
+ ver_hdr->size = sizeof(struct cudbg_qdesc_info);
|
|
|
+ size -= sizeof(*ver_hdr);
|
|
|
+
|
|
|
+ qdesc_info = (struct cudbg_qdesc_info *)(data +
|
|
|
+ sizeof(*ver_hdr));
|
|
|
+ size -= sizeof(*qdesc_info);
|
|
|
+ qdesc_entry = (struct cudbg_qdesc_entry *)qdesc_info->data;
|
|
|
+
|
|
|
+#define QDESC_GET(q, desc, type, label) do { \
|
|
|
+ if (size <= 0) { \
|
|
|
+ goto label; \
|
|
|
+ } \
|
|
|
+ if (desc) { \
|
|
|
+ cudbg_fill_qdesc_##q(q, type, qdesc_entry); \
|
|
|
+ size -= sizeof(*qdesc_entry) + qdesc_entry->data_size; \
|
|
|
+ num_queues++; \
|
|
|
+ qdesc_entry = cudbg_next_qdesc(qdesc_entry); \
|
|
|
+ } \
|
|
|
+} while (0)
|
|
|
+
|
|
|
+#define QDESC_GET_TXQ(q, type, label) do { \
|
|
|
+ struct sge_txq *txq = (struct sge_txq *)q; \
|
|
|
+ QDESC_GET(txq, txq->desc, type, label); \
|
|
|
+} while (0)
|
|
|
+
|
|
|
+#define QDESC_GET_RXQ(q, type, label) do { \
|
|
|
+ struct sge_rspq *rxq = (struct sge_rspq *)q; \
|
|
|
+ QDESC_GET(rxq, rxq->desc, type, label); \
|
|
|
+} while (0)
|
|
|
+
|
|
|
+#define QDESC_GET_FLQ(q, type, label) do { \
|
|
|
+ struct sge_fl *flq = (struct sge_fl *)q; \
|
|
|
+ QDESC_GET(flq, flq->desc, type, label); \
|
|
|
+} while (0)
|
|
|
+
|
|
|
+ /* NIC TXQ */
|
|
|
+ for (i = 0; i < s->ethqsets; i++)
|
|
|
+ QDESC_GET_TXQ(&s->ethtxq[i].q, CUDBG_QTYPE_NIC_TXQ, out);
|
|
|
+
|
|
|
+ /* NIC RXQ */
|
|
|
+ for (i = 0; i < s->ethqsets; i++)
|
|
|
+ QDESC_GET_RXQ(&s->ethrxq[i].rspq, CUDBG_QTYPE_NIC_RXQ, out);
|
|
|
+
|
|
|
+ /* NIC FLQ */
|
|
|
+ for (i = 0; i < s->ethqsets; i++)
|
|
|
+ QDESC_GET_FLQ(&s->ethrxq[i].fl, CUDBG_QTYPE_NIC_FLQ, out);
|
|
|
+
|
|
|
+ /* NIC CTRLQ */
|
|
|
+ for (i = 0; i < padap->params.nports; i++)
|
|
|
+ QDESC_GET_TXQ(&s->ctrlq[i].q, CUDBG_QTYPE_CTRLQ, out);
|
|
|
+
|
|
|
+ /* FW_EVTQ */
|
|
|
+ QDESC_GET_RXQ(&s->fw_evtq, CUDBG_QTYPE_FWEVTQ, out);
|
|
|
+
|
|
|
+ /* INTRQ */
|
|
|
+ QDESC_GET_RXQ(&s->intrq, CUDBG_QTYPE_INTRQ, out);
|
|
|
+
|
|
|
+ /* PTP_TXQ */
|
|
|
+ QDESC_GET_TXQ(&s->ptptxq.q, CUDBG_QTYPE_PTP_TXQ, out);
|
|
|
+
|
|
|
+ /* ULD Queues */
|
|
|
+ mutex_lock(&uld_mutex);
|
|
|
+
|
|
|
+ if (s->uld_txq_info) {
|
|
|
+ struct sge_uld_txq_info *utxq;
|
|
|
+
|
|
|
+ /* ULD TXQ */
|
|
|
+ for (j = 0; j < CXGB4_TX_MAX; j++) {
|
|
|
+ if (!s->uld_txq_info[j])
|
|
|
+ continue;
|
|
|
+
|
|
|
+ utxq = s->uld_txq_info[j];
|
|
|
+ for (i = 0; i < utxq->ntxq; i++)
|
|
|
+ QDESC_GET_TXQ(&utxq->uldtxq[i].q,
|
|
|
+ cudbg_uld_txq_to_qtype(j),
|
|
|
+ out_unlock);
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ if (s->uld_rxq_info) {
|
|
|
+ struct sge_uld_rxq_info *urxq;
|
|
|
+ u32 base;
|
|
|
+
|
|
|
+ /* ULD RXQ */
|
|
|
+ for (j = 0; j < CXGB4_ULD_MAX; j++) {
|
|
|
+ if (!s->uld_rxq_info[j])
|
|
|
+ continue;
|
|
|
+
|
|
|
+ urxq = s->uld_rxq_info[j];
|
|
|
+ for (i = 0; i < urxq->nrxq; i++)
|
|
|
+ QDESC_GET_RXQ(&urxq->uldrxq[i].rspq,
|
|
|
+ cudbg_uld_rxq_to_qtype(j),
|
|
|
+ out_unlock);
|
|
|
+ }
|
|
|
+
|
|
|
+ /* ULD FLQ */
|
|
|
+ for (j = 0; j < CXGB4_ULD_MAX; j++) {
|
|
|
+ if (!s->uld_rxq_info[j])
|
|
|
+ continue;
|
|
|
+
|
|
|
+ urxq = s->uld_rxq_info[j];
|
|
|
+ for (i = 0; i < urxq->nrxq; i++)
|
|
|
+ QDESC_GET_FLQ(&urxq->uldrxq[i].fl,
|
|
|
+ cudbg_uld_flq_to_qtype(j),
|
|
|
+ out_unlock);
|
|
|
+ }
|
|
|
+
|
|
|
+ /* ULD CIQ */
|
|
|
+ for (j = 0; j < CXGB4_ULD_MAX; j++) {
|
|
|
+ if (!s->uld_rxq_info[j])
|
|
|
+ continue;
|
|
|
+
|
|
|
+ urxq = s->uld_rxq_info[j];
|
|
|
+ base = urxq->nrxq;
|
|
|
+ for (i = 0; i < urxq->nciq; i++)
|
|
|
+ QDESC_GET_RXQ(&urxq->uldrxq[base + i].rspq,
|
|
|
+ cudbg_uld_ciq_to_qtype(j),
|
|
|
+ out_unlock);
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+out_unlock:
|
|
|
+ mutex_unlock(&uld_mutex);
|
|
|
+
|
|
|
+out:
|
|
|
+ qdesc_info->qdesc_entry_size = sizeof(*qdesc_entry);
|
|
|
+ qdesc_info->num_queues = num_queues;
|
|
|
+ cur_off = 0;
|
|
|
+ while (tot_len) {
|
|
|
+ u32 chunk_size = min_t(u32, tot_len, CUDBG_CHUNK_SIZE);
|
|
|
+
|
|
|
+ rc = cudbg_get_buff(pdbg_init, dbg_buff, chunk_size,
|
|
|
+ &temp_buff);
|
|
|
+ if (rc) {
|
|
|
+ cudbg_err->sys_warn = CUDBG_STATUS_PARTIAL_DATA;
|
|
|
+ goto out_free;
|
|
|
+ }
|
|
|
+
|
|
|
+ memcpy(temp_buff.data, data + cur_off, chunk_size);
|
|
|
+ tot_len -= chunk_size;
|
|
|
+ cur_off += chunk_size;
|
|
|
+ rc = cudbg_write_and_release_buff(pdbg_init, &temp_buff,
|
|
|
+ dbg_buff);
|
|
|
+ if (rc) {
|
|
|
+ cudbg_put_buff(pdbg_init, &temp_buff);
|
|
|
+ cudbg_err->sys_warn = CUDBG_STATUS_PARTIAL_DATA;
|
|
|
+ goto out_free;
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+out_free:
|
|
|
+ if (data)
|
|
|
+ kvfree(data);
|
|
|
+
|
|
|
+#undef QDESC_GET_FLQ
|
|
|
+#undef QDESC_GET_RXQ
|
|
|
+#undef QDESC_GET_TXQ
|
|
|
+#undef QDESC_GET
|
|
|
+
|
|
|
+ return rc;
|
|
|
+}
|