|
@@ -140,10 +140,10 @@ enum qm_mr_cmode { /* matches QCSP_CFG::MM */
|
|
|
struct qm_eqcr_entry {
|
|
|
u8 _ncw_verb; /* writes to this are non-coherent */
|
|
|
u8 dca;
|
|
|
- u16 seqnum;
|
|
|
- u32 orp; /* 24-bit */
|
|
|
- u32 fqid; /* 24-bit */
|
|
|
- u32 tag;
|
|
|
+ __be16 seqnum;
|
|
|
+ u8 __reserved[4];
|
|
|
+ __be32 fqid; /* 24-bit */
|
|
|
+ __be32 tag;
|
|
|
struct qm_fd fd;
|
|
|
u8 __reserved3[32];
|
|
|
} __packed;
|
|
@@ -183,41 +183,22 @@ struct qm_mr {
|
|
|
};
|
|
|
|
|
|
/* MC (Management Command) command */
|
|
|
-/* "Query FQ" */
|
|
|
-struct qm_mcc_queryfq {
|
|
|
+/* "FQ" command layout */
|
|
|
+struct qm_mcc_fq {
|
|
|
u8 _ncw_verb;
|
|
|
u8 __reserved1[3];
|
|
|
- u32 fqid; /* 24-bit */
|
|
|
+ __be32 fqid; /* 24-bit */
|
|
|
u8 __reserved2[56];
|
|
|
} __packed;
|
|
|
-/* "Alter FQ State Commands " */
|
|
|
-struct qm_mcc_alterfq {
|
|
|
- u8 _ncw_verb;
|
|
|
- u8 __reserved1[3];
|
|
|
- u32 fqid; /* 24-bit */
|
|
|
- u8 __reserved2;
|
|
|
- u8 count; /* number of consecutive FQID */
|
|
|
- u8 __reserved3[10];
|
|
|
- u32 context_b; /* frame queue context b */
|
|
|
- u8 __reserved4[40];
|
|
|
-} __packed;
|
|
|
|
|
|
-/* "Query CGR" */
|
|
|
-struct qm_mcc_querycgr {
|
|
|
+/* "CGR" command layout */
|
|
|
+struct qm_mcc_cgr {
|
|
|
u8 _ncw_verb;
|
|
|
u8 __reserved1[30];
|
|
|
u8 cgid;
|
|
|
u8 __reserved2[32];
|
|
|
};
|
|
|
|
|
|
-struct qm_mcc_querywq {
|
|
|
- u8 _ncw_verb;
|
|
|
- u8 __reserved;
|
|
|
- /* select channel if verb != QUERYWQ_DEDICATED */
|
|
|
- u16 channel_wq; /* ignores wq (3 lsbits): _res[0-2] */
|
|
|
- u8 __reserved2[60];
|
|
|
-} __packed;
|
|
|
-
|
|
|
#define QM_MCC_VERB_VBIT 0x80
|
|
|
#define QM_MCC_VERB_MASK 0x7f /* where the verb contains; */
|
|
|
#define QM_MCC_VERB_INITFQ_PARKED 0x40
|
|
@@ -243,12 +224,9 @@ union qm_mc_command {
|
|
|
u8 __reserved[63];
|
|
|
};
|
|
|
struct qm_mcc_initfq initfq;
|
|
|
- struct qm_mcc_queryfq queryfq;
|
|
|
- struct qm_mcc_alterfq alterfq;
|
|
|
struct qm_mcc_initcgr initcgr;
|
|
|
- struct qm_mcc_querycgr querycgr;
|
|
|
- struct qm_mcc_querywq querywq;
|
|
|
- struct qm_mcc_queryfq_np queryfq_np;
|
|
|
+ struct qm_mcc_fq fq;
|
|
|
+ struct qm_mcc_cgr cgr;
|
|
|
};
|
|
|
|
|
|
/* MC (Management Command) result */
|
|
@@ -343,12 +321,12 @@ struct qm_portal {
|
|
|
/* Cache-inhibited register access. */
|
|
|
static inline u32 qm_in(struct qm_portal *p, u32 offset)
|
|
|
{
|
|
|
- return __raw_readl(p->addr.ci + offset);
|
|
|
+ return be32_to_cpu(__raw_readl(p->addr.ci + offset));
|
|
|
}
|
|
|
|
|
|
static inline void qm_out(struct qm_portal *p, u32 offset, u32 val)
|
|
|
{
|
|
|
- __raw_writel(val, p->addr.ci + offset);
|
|
|
+ __raw_writel(cpu_to_be32(val), p->addr.ci + offset);
|
|
|
}
|
|
|
|
|
|
/* Cache Enabled Portal Access */
|
|
@@ -364,7 +342,7 @@ static inline void qm_cl_touch_ro(struct qm_portal *p, u32 offset)
|
|
|
|
|
|
static inline u32 qm_ce_in(struct qm_portal *p, u32 offset)
|
|
|
{
|
|
|
- return __raw_readl(p->addr.ce + offset);
|
|
|
+ return be32_to_cpu(__raw_readl(p->addr.ce + offset));
|
|
|
}
|
|
|
|
|
|
/* --- EQCR API --- */
|
|
@@ -443,7 +421,7 @@ static inline void qm_eqcr_finish(struct qm_portal *portal)
|
|
|
|
|
|
DPAA_ASSERT(!eqcr->busy);
|
|
|
if (pi != eqcr_ptr2idx(eqcr->cursor))
|
|
|
- pr_crit("losing uncommited EQCR entries\n");
|
|
|
+ pr_crit("losing uncommitted EQCR entries\n");
|
|
|
if (ci != eqcr->ci)
|
|
|
pr_crit("missing existing EQCR completions\n");
|
|
|
if (eqcr->ci != eqcr_ptr2idx(eqcr->cursor))
|
|
@@ -492,8 +470,7 @@ static inline struct qm_eqcr_entry *qm_eqcr_start_stash(struct qm_portal
|
|
|
static inline void eqcr_commit_checks(struct qm_eqcr *eqcr)
|
|
|
{
|
|
|
DPAA_ASSERT(eqcr->busy);
|
|
|
- DPAA_ASSERT(eqcr->cursor->orp == (eqcr->cursor->orp & 0x00ffffff));
|
|
|
- DPAA_ASSERT(eqcr->cursor->fqid == (eqcr->cursor->fqid & 0x00ffffff));
|
|
|
+ DPAA_ASSERT(!(be32_to_cpu(eqcr->cursor->fqid) & ~QM_FQID_MASK));
|
|
|
DPAA_ASSERT(eqcr->available >= 1);
|
|
|
}
|
|
|
|
|
@@ -962,8 +939,6 @@ struct qman_portal {
|
|
|
u32 sdqcr;
|
|
|
/* probing time config params for cpu-affine portals */
|
|
|
const struct qm_portal_config *config;
|
|
|
- /* needed for providing a non-NULL device to dma_map_***() */
|
|
|
- struct platform_device *pdev;
|
|
|
/* 2-element array. cgrs[0] is mask, cgrs[1] is snapshot. */
|
|
|
struct qman_cgrs *cgrs;
|
|
|
/* linked-list of CSCN handlers. */
|
|
@@ -1133,7 +1108,6 @@ static int qman_create_portal(struct qman_portal *portal,
|
|
|
const struct qman_cgrs *cgrs)
|
|
|
{
|
|
|
struct qm_portal *p;
|
|
|
- char buf[16];
|
|
|
int ret;
|
|
|
u32 isdr;
|
|
|
|
|
@@ -1196,15 +1170,6 @@ static int qman_create_portal(struct qman_portal *portal,
|
|
|
portal->sdqcr = QM_SDQCR_SOURCE_CHANNELS | QM_SDQCR_COUNT_UPTO3 |
|
|
|
QM_SDQCR_DEDICATED_PRECEDENCE | QM_SDQCR_TYPE_PRIO_QOS |
|
|
|
QM_SDQCR_TOKEN_SET(0xab) | QM_SDQCR_CHANNELS_DEDICATED;
|
|
|
- sprintf(buf, "qportal-%d", c->channel);
|
|
|
- portal->pdev = platform_device_alloc(buf, -1);
|
|
|
- if (!portal->pdev)
|
|
|
- goto fail_devalloc;
|
|
|
- if (dma_set_mask(&portal->pdev->dev, DMA_BIT_MASK(40)))
|
|
|
- goto fail_devadd;
|
|
|
- ret = platform_device_add(portal->pdev);
|
|
|
- if (ret)
|
|
|
- goto fail_devadd;
|
|
|
isdr = 0xffffffff;
|
|
|
qm_out(p, QM_REG_ISDR, isdr);
|
|
|
portal->irq_sources = 0;
|
|
@@ -1239,8 +1204,8 @@ static int qman_create_portal(struct qman_portal *portal,
|
|
|
/* special handling, drain just in case it's a few FQRNIs */
|
|
|
const union qm_mr_entry *e = qm_mr_current(p);
|
|
|
|
|
|
- dev_err(c->dev, "MR dirty, VB 0x%x, rc 0x%x\n, addr 0x%x",
|
|
|
- e->verb, e->ern.rc, e->ern.fd.addr_lo);
|
|
|
+ dev_err(c->dev, "MR dirty, VB 0x%x, rc 0x%x, addr 0x%llx\n",
|
|
|
+ e->verb, e->ern.rc, qm_fd_addr_get64(&e->ern.fd));
|
|
|
goto fail_dqrr_mr_empty;
|
|
|
}
|
|
|
/* Success */
|
|
@@ -1256,10 +1221,6 @@ fail_eqcr_empty:
|
|
|
fail_affinity:
|
|
|
free_irq(c->irq, portal);
|
|
|
fail_irq:
|
|
|
- platform_device_del(portal->pdev);
|
|
|
-fail_devadd:
|
|
|
- platform_device_put(portal->pdev);
|
|
|
-fail_devalloc:
|
|
|
kfree(portal->cgrs);
|
|
|
fail_cgrs:
|
|
|
qm_mc_finish(p);
|
|
@@ -1321,9 +1282,6 @@ static void qman_destroy_portal(struct qman_portal *qm)
|
|
|
qm_dqrr_finish(&qm->p);
|
|
|
qm_eqcr_finish(&qm->p);
|
|
|
|
|
|
- platform_device_del(qm->pdev);
|
|
|
- platform_device_put(qm->pdev);
|
|
|
-
|
|
|
qm->config = NULL;
|
|
|
}
|
|
|
|
|
@@ -1428,7 +1386,7 @@ static void qm_mr_process_task(struct work_struct *work)
|
|
|
case QM_MR_VERB_FQRN:
|
|
|
case QM_MR_VERB_FQRL:
|
|
|
/* Lookup in the retirement table */
|
|
|
- fq = fqid_to_fq(msg->fq.fqid);
|
|
|
+ fq = fqid_to_fq(qm_fqid_get(&msg->fq));
|
|
|
if (WARN_ON(!fq))
|
|
|
break;
|
|
|
fq_state_change(p, fq, msg, verb);
|
|
@@ -1437,7 +1395,7 @@ static void qm_mr_process_task(struct work_struct *work)
|
|
|
break;
|
|
|
case QM_MR_VERB_FQPN:
|
|
|
/* Parked */
|
|
|
- fq = tag_to_fq(msg->fq.contextB);
|
|
|
+ fq = tag_to_fq(be32_to_cpu(msg->fq.context_b));
|
|
|
fq_state_change(p, fq, msg, verb);
|
|
|
if (fq->cb.fqs)
|
|
|
fq->cb.fqs(p, fq, msg);
|
|
@@ -1451,7 +1409,7 @@ static void qm_mr_process_task(struct work_struct *work)
|
|
|
}
|
|
|
} else {
|
|
|
/* Its a software ERN */
|
|
|
- fq = tag_to_fq(msg->ern.tag);
|
|
|
+ fq = tag_to_fq(be32_to_cpu(msg->ern.tag));
|
|
|
fq->cb.ern(p, fq, msg);
|
|
|
}
|
|
|
num++;
|
|
@@ -1536,7 +1494,7 @@ static inline unsigned int __poll_portal_fast(struct qman_portal *p,
|
|
|
|
|
|
if (dq->stat & QM_DQRR_STAT_UNSCHEDULED) {
|
|
|
/*
|
|
|
- * VDQCR: don't trust contextB as the FQ may have
|
|
|
+ * VDQCR: don't trust context_b as the FQ may have
|
|
|
* been configured for h/w consumption and we're
|
|
|
* draining it post-retirement.
|
|
|
*/
|
|
@@ -1562,8 +1520,8 @@ static inline unsigned int __poll_portal_fast(struct qman_portal *p,
|
|
|
if (dq->stat & QM_DQRR_STAT_DQCR_EXPIRED)
|
|
|
clear_vdqcr(p, fq);
|
|
|
} else {
|
|
|
- /* SDQCR: contextB points to the FQ */
|
|
|
- fq = tag_to_fq(dq->contextB);
|
|
|
+ /* SDQCR: context_b points to the FQ */
|
|
|
+ fq = tag_to_fq(be32_to_cpu(dq->context_b));
|
|
|
/* Now let the callback do its stuff */
|
|
|
res = fq->cb.dqrr(p, fq, dq);
|
|
|
/*
|
|
@@ -1780,9 +1738,9 @@ int qman_init_fq(struct qman_fq *fq, u32 flags, struct qm_mcc_initfq *opts)
|
|
|
if (fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY))
|
|
|
return -EINVAL;
|
|
|
#endif
|
|
|
- if (opts && (opts->we_mask & QM_INITFQ_WE_OAC)) {
|
|
|
+ if (opts && (be16_to_cpu(opts->we_mask) & QM_INITFQ_WE_OAC)) {
|
|
|
/* And can't be set at the same time as TDTHRESH */
|
|
|
- if (opts->we_mask & QM_INITFQ_WE_TDTHRESH)
|
|
|
+ if (be16_to_cpu(opts->we_mask) & QM_INITFQ_WE_TDTHRESH)
|
|
|
return -EINVAL;
|
|
|
}
|
|
|
/* Issue an INITFQ_[PARKED|SCHED] management command */
|
|
@@ -1796,37 +1754,49 @@ int qman_init_fq(struct qman_fq *fq, u32 flags, struct qm_mcc_initfq *opts)
|
|
|
mcc = qm_mc_start(&p->p);
|
|
|
if (opts)
|
|
|
mcc->initfq = *opts;
|
|
|
- mcc->initfq.fqid = fq->fqid;
|
|
|
+ qm_fqid_set(&mcc->fq, fq->fqid);
|
|
|
mcc->initfq.count = 0;
|
|
|
/*
|
|
|
- * If the FQ does *not* have the TO_DCPORTAL flag, contextB is set as a
|
|
|
+ * If the FQ does *not* have the TO_DCPORTAL flag, context_b is set as a
|
|
|
* demux pointer. Otherwise, the caller-provided value is allowed to
|
|
|
* stand, don't overwrite it.
|
|
|
*/
|
|
|
if (fq_isclear(fq, QMAN_FQ_FLAG_TO_DCPORTAL)) {
|
|
|
dma_addr_t phys_fq;
|
|
|
|
|
|
- mcc->initfq.we_mask |= QM_INITFQ_WE_CONTEXTB;
|
|
|
- mcc->initfq.fqd.context_b = fq_to_tag(fq);
|
|
|
+ mcc->initfq.we_mask |= cpu_to_be16(QM_INITFQ_WE_CONTEXTB);
|
|
|
+ mcc->initfq.fqd.context_b = cpu_to_be32(fq_to_tag(fq));
|
|
|
/*
|
|
|
* and the physical address - NB, if the user wasn't trying to
|
|
|
* set CONTEXTA, clear the stashing settings.
|
|
|
*/
|
|
|
- if (!(mcc->initfq.we_mask & QM_INITFQ_WE_CONTEXTA)) {
|
|
|
- mcc->initfq.we_mask |= QM_INITFQ_WE_CONTEXTA;
|
|
|
+ if (!(be16_to_cpu(mcc->initfq.we_mask) &
|
|
|
+ QM_INITFQ_WE_CONTEXTA)) {
|
|
|
+ mcc->initfq.we_mask |=
|
|
|
+ cpu_to_be16(QM_INITFQ_WE_CONTEXTA);
|
|
|
memset(&mcc->initfq.fqd.context_a, 0,
|
|
|
sizeof(mcc->initfq.fqd.context_a));
|
|
|
} else {
|
|
|
- phys_fq = dma_map_single(&p->pdev->dev, fq, sizeof(*fq),
|
|
|
- DMA_TO_DEVICE);
|
|
|
+ struct qman_portal *p = qman_dma_portal;
|
|
|
+
|
|
|
+ phys_fq = dma_map_single(p->config->dev, fq,
|
|
|
+ sizeof(*fq), DMA_TO_DEVICE);
|
|
|
+ if (dma_mapping_error(p->config->dev, phys_fq)) {
|
|
|
+ dev_err(p->config->dev, "dma_mapping failed\n");
|
|
|
+ ret = -EIO;
|
|
|
+ goto out;
|
|
|
+ }
|
|
|
+
|
|
|
qm_fqd_stashing_set64(&mcc->initfq.fqd, phys_fq);
|
|
|
}
|
|
|
}
|
|
|
if (flags & QMAN_INITFQ_FLAG_LOCAL) {
|
|
|
int wq = 0;
|
|
|
|
|
|
- if (!(mcc->initfq.we_mask & QM_INITFQ_WE_DESTWQ)) {
|
|
|
- mcc->initfq.we_mask |= QM_INITFQ_WE_DESTWQ;
|
|
|
+ if (!(be16_to_cpu(mcc->initfq.we_mask) &
|
|
|
+ QM_INITFQ_WE_DESTWQ)) {
|
|
|
+ mcc->initfq.we_mask |=
|
|
|
+ cpu_to_be16(QM_INITFQ_WE_DESTWQ);
|
|
|
wq = 4;
|
|
|
}
|
|
|
qm_fqd_set_destwq(&mcc->initfq.fqd, p->config->channel, wq);
|
|
@@ -1845,13 +1815,13 @@ int qman_init_fq(struct qman_fq *fq, u32 flags, struct qm_mcc_initfq *opts)
|
|
|
goto out;
|
|
|
}
|
|
|
if (opts) {
|
|
|
- if (opts->we_mask & QM_INITFQ_WE_FQCTRL) {
|
|
|
- if (opts->fqd.fq_ctrl & QM_FQCTRL_CGE)
|
|
|
+ if (be16_to_cpu(opts->we_mask) & QM_INITFQ_WE_FQCTRL) {
|
|
|
+ if (be16_to_cpu(opts->fqd.fq_ctrl) & QM_FQCTRL_CGE)
|
|
|
fq_set(fq, QMAN_FQ_STATE_CGR_EN);
|
|
|
else
|
|
|
fq_clear(fq, QMAN_FQ_STATE_CGR_EN);
|
|
|
}
|
|
|
- if (opts->we_mask & QM_INITFQ_WE_CGID)
|
|
|
+ if (be16_to_cpu(opts->we_mask) & QM_INITFQ_WE_CGID)
|
|
|
fq->cgr_groupid = opts->fqd.cgid;
|
|
|
}
|
|
|
fq->state = (flags & QMAN_INITFQ_FLAG_SCHED) ?
|
|
@@ -1884,7 +1854,7 @@ int qman_schedule_fq(struct qman_fq *fq)
|
|
|
goto out;
|
|
|
}
|
|
|
mcc = qm_mc_start(&p->p);
|
|
|
- mcc->alterfq.fqid = fq->fqid;
|
|
|
+ qm_fqid_set(&mcc->fq, fq->fqid);
|
|
|
qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_SCHED);
|
|
|
if (!qm_mc_result_timeout(&p->p, &mcr)) {
|
|
|
dev_err(p->config->dev, "ALTER_SCHED timeout\n");
|
|
@@ -1927,7 +1897,7 @@ int qman_retire_fq(struct qman_fq *fq, u32 *flags)
|
|
|
goto out;
|
|
|
}
|
|
|
mcc = qm_mc_start(&p->p);
|
|
|
- mcc->alterfq.fqid = fq->fqid;
|
|
|
+ qm_fqid_set(&mcc->fq, fq->fqid);
|
|
|
qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_RETIRE);
|
|
|
if (!qm_mc_result_timeout(&p->p, &mcr)) {
|
|
|
dev_crit(p->config->dev, "ALTER_RETIRE timeout\n");
|
|
@@ -1970,8 +1940,8 @@ int qman_retire_fq(struct qman_fq *fq, u32 *flags)
|
|
|
|
|
|
msg.verb = QM_MR_VERB_FQRNI;
|
|
|
msg.fq.fqs = mcr->alterfq.fqs;
|
|
|
- msg.fq.fqid = fq->fqid;
|
|
|
- msg.fq.contextB = fq_to_tag(fq);
|
|
|
+ qm_fqid_set(&msg.fq, fq->fqid);
|
|
|
+ msg.fq.context_b = cpu_to_be32(fq_to_tag(fq));
|
|
|
fq->cb.fqs(p, fq, &msg);
|
|
|
}
|
|
|
} else if (res == QM_MCR_RESULT_PENDING) {
|
|
@@ -2006,7 +1976,7 @@ int qman_oos_fq(struct qman_fq *fq)
|
|
|
goto out;
|
|
|
}
|
|
|
mcc = qm_mc_start(&p->p);
|
|
|
- mcc->alterfq.fqid = fq->fqid;
|
|
|
+ qm_fqid_set(&mcc->fq, fq->fqid);
|
|
|
qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_OOS);
|
|
|
if (!qm_mc_result_timeout(&p->p, &mcr)) {
|
|
|
ret = -ETIMEDOUT;
|
|
@@ -2032,7 +2002,7 @@ int qman_query_fq(struct qman_fq *fq, struct qm_fqd *fqd)
|
|
|
int ret = 0;
|
|
|
|
|
|
mcc = qm_mc_start(&p->p);
|
|
|
- mcc->queryfq.fqid = fq->fqid;
|
|
|
+ qm_fqid_set(&mcc->fq, fq->fqid);
|
|
|
qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ);
|
|
|
if (!qm_mc_result_timeout(&p->p, &mcr)) {
|
|
|
ret = -ETIMEDOUT;
|
|
@@ -2058,7 +2028,7 @@ static int qman_query_fq_np(struct qman_fq *fq,
|
|
|
int ret = 0;
|
|
|
|
|
|
mcc = qm_mc_start(&p->p);
|
|
|
- mcc->queryfq.fqid = fq->fqid;
|
|
|
+ qm_fqid_set(&mcc->fq, fq->fqid);
|
|
|
qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ_NP);
|
|
|
if (!qm_mc_result_timeout(&p->p, &mcr)) {
|
|
|
ret = -ETIMEDOUT;
|
|
@@ -2086,7 +2056,7 @@ static int qman_query_cgr(struct qman_cgr *cgr,
|
|
|
int ret = 0;
|
|
|
|
|
|
mcc = qm_mc_start(&p->p);
|
|
|
- mcc->querycgr.cgid = cgr->cgrid;
|
|
|
+ mcc->cgr.cgid = cgr->cgrid;
|
|
|
qm_mc_commit(&p->p, QM_MCC_VERB_QUERYCGR);
|
|
|
if (!qm_mc_result_timeout(&p->p, &mcr)) {
|
|
|
ret = -ETIMEDOUT;
|
|
@@ -2239,8 +2209,8 @@ int qman_enqueue(struct qman_fq *fq, const struct qm_fd *fd)
|
|
|
if (unlikely(!eq))
|
|
|
goto out;
|
|
|
|
|
|
- eq->fqid = fq->fqid;
|
|
|
- eq->tag = fq_to_tag(fq);
|
|
|
+ qm_fqid_set(eq, fq->fqid);
|
|
|
+ eq->tag = cpu_to_be32(fq_to_tag(fq));
|
|
|
eq->fd = *fd;
|
|
|
|
|
|
qm_eqcr_pvb_commit(&p->p, QM_EQCR_VERB_CMD_ENQUEUE);
|
|
@@ -2282,7 +2252,24 @@ out:
|
|
|
}
|
|
|
|
|
|
#define PORTAL_IDX(n) (n->config->channel - QM_CHANNEL_SWPORTAL0)
|
|
|
-#define TARG_MASK(n) (BIT(31) >> PORTAL_IDX(n))
|
|
|
+
|
|
|
+/* congestion state change notification target update control */
|
|
|
+static void qm_cgr_cscn_targ_set(struct __qm_mc_cgr *cgr, int pi, u32 val)
|
|
|
+{
|
|
|
+ if (qman_ip_rev >= QMAN_REV30)
|
|
|
+ cgr->cscn_targ_upd_ctrl = cpu_to_be16(pi |
|
|
|
+ QM_CGR_TARG_UDP_CTRL_WRITE_BIT);
|
|
|
+ else
|
|
|
+ cgr->cscn_targ = cpu_to_be32(val | QM_CGR_TARG_PORTAL(pi));
|
|
|
+}
|
|
|
+
|
|
|
+static void qm_cgr_cscn_targ_clear(struct __qm_mc_cgr *cgr, int pi, u32 val)
|
|
|
+{
|
|
|
+ if (qman_ip_rev >= QMAN_REV30)
|
|
|
+ cgr->cscn_targ_upd_ctrl = cpu_to_be16(pi);
|
|
|
+ else
|
|
|
+ cgr->cscn_targ = cpu_to_be32(val & ~QM_CGR_TARG_PORTAL(pi));
|
|
|
+}
|
|
|
|
|
|
static u8 qman_cgr_cpus[CGR_NUM];
|
|
|
|
|
@@ -2305,7 +2292,6 @@ int qman_create_cgr(struct qman_cgr *cgr, u32 flags,
|
|
|
struct qm_mcc_initcgr *opts)
|
|
|
{
|
|
|
struct qm_mcr_querycgr cgr_state;
|
|
|
- struct qm_mcc_initcgr local_opts = {};
|
|
|
int ret;
|
|
|
struct qman_portal *p;
|
|
|
|
|
@@ -2327,22 +2313,18 @@ int qman_create_cgr(struct qman_cgr *cgr, u32 flags,
|
|
|
spin_lock(&p->cgr_lock);
|
|
|
|
|
|
if (opts) {
|
|
|
+ struct qm_mcc_initcgr local_opts = *opts;
|
|
|
+
|
|
|
ret = qman_query_cgr(cgr, &cgr_state);
|
|
|
if (ret)
|
|
|
goto out;
|
|
|
- if (opts)
|
|
|
- local_opts = *opts;
|
|
|
- if ((qman_ip_rev & 0xFF00) >= QMAN_REV30)
|
|
|
- local_opts.cgr.cscn_targ_upd_ctrl =
|
|
|
- QM_CGR_TARG_UDP_CTRL_WRITE_BIT | PORTAL_IDX(p);
|
|
|
- else
|
|
|
- /* Overwrite TARG */
|
|
|
- local_opts.cgr.cscn_targ = cgr_state.cgr.cscn_targ |
|
|
|
- TARG_MASK(p);
|
|
|
- local_opts.we_mask |= QM_CGR_WE_CSCN_TARG;
|
|
|
+
|
|
|
+ qm_cgr_cscn_targ_set(&local_opts.cgr, PORTAL_IDX(p),
|
|
|
+ be32_to_cpu(cgr_state.cgr.cscn_targ));
|
|
|
+ local_opts.we_mask |= cpu_to_be16(QM_CGR_WE_CSCN_TARG);
|
|
|
|
|
|
/* send init if flags indicate so */
|
|
|
- if (opts && (flags & QMAN_CGR_FLAG_USE_INIT))
|
|
|
+ if (flags & QMAN_CGR_FLAG_USE_INIT)
|
|
|
ret = qm_modify_cgr(cgr, QMAN_CGR_FLAG_USE_INIT,
|
|
|
&local_opts);
|
|
|
else
|
|
@@ -2405,13 +2387,11 @@ int qman_delete_cgr(struct qman_cgr *cgr)
|
|
|
list_add(&cgr->node, &p->cgr_cbs);
|
|
|
goto release_lock;
|
|
|
}
|
|
|
- /* Overwrite TARG */
|
|
|
- local_opts.we_mask = QM_CGR_WE_CSCN_TARG;
|
|
|
- if ((qman_ip_rev & 0xFF00) >= QMAN_REV30)
|
|
|
- local_opts.cgr.cscn_targ_upd_ctrl = PORTAL_IDX(p);
|
|
|
- else
|
|
|
- local_opts.cgr.cscn_targ = cgr_state.cgr.cscn_targ &
|
|
|
- ~(TARG_MASK(p));
|
|
|
+
|
|
|
+ local_opts.we_mask = cpu_to_be16(QM_CGR_WE_CSCN_TARG);
|
|
|
+ qm_cgr_cscn_targ_clear(&local_opts.cgr, PORTAL_IDX(p),
|
|
|
+ be32_to_cpu(cgr_state.cgr.cscn_targ));
|
|
|
+
|
|
|
ret = qm_modify_cgr(cgr, 0, &local_opts);
|
|
|
if (ret)
|
|
|
/* add back to the list */
|
|
@@ -2501,7 +2481,7 @@ static int _qm_dqrr_consume_and_match(struct qm_portal *p, u32 fqid, int s,
|
|
|
} while (wait && !dqrr);
|
|
|
|
|
|
while (dqrr) {
|
|
|
- if (dqrr->fqid == fqid && (dqrr->stat & s))
|
|
|
+ if (qm_fqid_get(dqrr) == fqid && (dqrr->stat & s))
|
|
|
found = 1;
|
|
|
qm_dqrr_cdc_consume_1ptr(p, dqrr, 0);
|
|
|
qm_dqrr_pvb_update(p);
|
|
@@ -2537,7 +2517,7 @@ static int qman_shutdown_fq(u32 fqid)
|
|
|
dev = p->config->dev;
|
|
|
/* Determine the state of the FQID */
|
|
|
mcc = qm_mc_start(&p->p);
|
|
|
- mcc->queryfq_np.fqid = fqid;
|
|
|
+ qm_fqid_set(&mcc->fq, fqid);
|
|
|
qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ_NP);
|
|
|
if (!qm_mc_result_timeout(&p->p, &mcr)) {
|
|
|
dev_err(dev, "QUERYFQ_NP timeout\n");
|
|
@@ -2552,7 +2532,7 @@ static int qman_shutdown_fq(u32 fqid)
|
|
|
|
|
|
/* Query which channel the FQ is using */
|
|
|
mcc = qm_mc_start(&p->p);
|
|
|
- mcc->queryfq.fqid = fqid;
|
|
|
+ qm_fqid_set(&mcc->fq, fqid);
|
|
|
qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ);
|
|
|
if (!qm_mc_result_timeout(&p->p, &mcr)) {
|
|
|
dev_err(dev, "QUERYFQ timeout\n");
|
|
@@ -2572,7 +2552,7 @@ static int qman_shutdown_fq(u32 fqid)
|
|
|
case QM_MCR_NP_STATE_PARKED:
|
|
|
orl_empty = 0;
|
|
|
mcc = qm_mc_start(&p->p);
|
|
|
- mcc->alterfq.fqid = fqid;
|
|
|
+ qm_fqid_set(&mcc->fq, fqid);
|
|
|
qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_RETIRE);
|
|
|
if (!qm_mc_result_timeout(&p->p, &mcr)) {
|
|
|
dev_err(dev, "QUERYFQ_NP timeout\n");
|
|
@@ -2667,7 +2647,7 @@ static int qman_shutdown_fq(u32 fqid)
|
|
|
cpu_relax();
|
|
|
}
|
|
|
mcc = qm_mc_start(&p->p);
|
|
|
- mcc->alterfq.fqid = fqid;
|
|
|
+ qm_fqid_set(&mcc->fq, fqid);
|
|
|
qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_OOS);
|
|
|
if (!qm_mc_result_timeout(&p->p, &mcr)) {
|
|
|
ret = -ETIMEDOUT;
|
|
@@ -2687,7 +2667,7 @@ static int qman_shutdown_fq(u32 fqid)
|
|
|
case QM_MCR_NP_STATE_RETIRED:
|
|
|
/* Send OOS Command */
|
|
|
mcc = qm_mc_start(&p->p);
|
|
|
- mcc->alterfq.fqid = fqid;
|
|
|
+ qm_fqid_set(&mcc->fq, fqid);
|
|
|
qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_OOS);
|
|
|
if (!qm_mc_result_timeout(&p->p, &mcr)) {
|
|
|
ret = -ETIMEDOUT;
|
|
@@ -2722,6 +2702,7 @@ const struct qm_portal_config *qman_get_qm_portal_config(
|
|
|
{
|
|
|
return portal->config;
|
|
|
}
|
|
|
+EXPORT_SYMBOL(qman_get_qm_portal_config);
|
|
|
|
|
|
struct gen_pool *qm_fqalloc; /* FQID allocator */
|
|
|
struct gen_pool *qm_qpalloc; /* pool-channel allocator */
|
|
@@ -2789,15 +2770,18 @@ static int qpool_cleanup(u32 qp)
|
|
|
struct qm_mcr_queryfq_np np;
|
|
|
|
|
|
err = qman_query_fq_np(&fq, &np);
|
|
|
- if (err)
|
|
|
+ if (err == -ERANGE)
|
|
|
/* FQID range exceeded, found no problems */
|
|
|
return 0;
|
|
|
+ else if (WARN_ON(err))
|
|
|
+ return err;
|
|
|
+
|
|
|
if ((np.state & QM_MCR_NP_STATE_MASK) != QM_MCR_NP_STATE_OOS) {
|
|
|
struct qm_fqd fqd;
|
|
|
|
|
|
err = qman_query_fq(&fq, &fqd);
|
|
|
if (WARN_ON(err))
|
|
|
- return 0;
|
|
|
+ return err;
|
|
|
if (qm_fqd_get_chan(&fqd) == qp) {
|
|
|
/* The channel is the FQ's target, clean it */
|
|
|
err = qman_shutdown_fq(fq.fqid);
|
|
@@ -2836,7 +2820,7 @@ static int cgr_cleanup(u32 cgrid)
|
|
|
* error, looking for non-OOS FQDs whose CGR is the CGR being released
|
|
|
*/
|
|
|
struct qman_fq fq = {
|
|
|
- .fqid = 1
|
|
|
+ .fqid = QM_FQID_RANGE_START
|
|
|
};
|
|
|
int err;
|
|
|
|
|
@@ -2844,16 +2828,19 @@ static int cgr_cleanup(u32 cgrid)
|
|
|
struct qm_mcr_queryfq_np np;
|
|
|
|
|
|
err = qman_query_fq_np(&fq, &np);
|
|
|
- if (err)
|
|
|
+ if (err == -ERANGE)
|
|
|
/* FQID range exceeded, found no problems */
|
|
|
return 0;
|
|
|
+ else if (WARN_ON(err))
|
|
|
+ return err;
|
|
|
+
|
|
|
if ((np.state & QM_MCR_NP_STATE_MASK) != QM_MCR_NP_STATE_OOS) {
|
|
|
struct qm_fqd fqd;
|
|
|
|
|
|
err = qman_query_fq(&fq, &fqd);
|
|
|
if (WARN_ON(err))
|
|
|
- return 0;
|
|
|
- if ((fqd.fq_ctrl & QM_FQCTRL_CGE) &&
|
|
|
+ return err;
|
|
|
+ if (be16_to_cpu(fqd.fq_ctrl) & QM_FQCTRL_CGE &&
|
|
|
fqd.cgid == cgrid) {
|
|
|
pr_err("CRGID 0x%x is being used by FQID 0x%x, CGR will be leaked\n",
|
|
|
cgrid, fq.fqid);
|