|
@@ -734,6 +734,9 @@ static void ocrdma_dispatch_ibevent(struct ocrdma_dev *dev,
|
|
|
break;
|
|
|
}
|
|
|
|
|
|
+ if (type < OCRDMA_MAX_ASYNC_ERRORS)
|
|
|
+ atomic_inc(&dev->async_err_stats[type]);
|
|
|
+
|
|
|
if (qp_event) {
|
|
|
if (qp->ibqp.event_handler)
|
|
|
qp->ibqp.event_handler(&ib_evt, qp->ibqp.qp_context);
|
|
@@ -831,20 +834,20 @@ static int ocrdma_mq_cq_handler(struct ocrdma_dev *dev, u16 cq_id)
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
-static void ocrdma_qp_buddy_cq_handler(struct ocrdma_dev *dev,
|
|
|
- struct ocrdma_cq *cq)
|
|
|
+static struct ocrdma_cq *_ocrdma_qp_buddy_cq_handler(struct ocrdma_dev *dev,
|
|
|
+ struct ocrdma_cq *cq, bool sq)
|
|
|
{
|
|
|
- unsigned long flags;
|
|
|
struct ocrdma_qp *qp;
|
|
|
- bool buddy_cq_found = false;
|
|
|
- /* Go through list of QPs in error state which are using this CQ
|
|
|
- * and invoke its callback handler to trigger CQE processing for
|
|
|
- * error/flushed CQE. It is rare to find more than few entries in
|
|
|
- * this list as most consumers stops after getting error CQE.
|
|
|
- * List is traversed only once when a matching buddy cq found for a QP.
|
|
|
- */
|
|
|
- spin_lock_irqsave(&dev->flush_q_lock, flags);
|
|
|
- list_for_each_entry(qp, &cq->sq_head, sq_entry) {
|
|
|
+ struct list_head *cur;
|
|
|
+ struct ocrdma_cq *bcq = NULL;
|
|
|
+ struct list_head *head = sq?(&cq->sq_head):(&cq->rq_head);
|
|
|
+
|
|
|
+ list_for_each(cur, head) {
|
|
|
+ if (sq)
|
|
|
+ qp = list_entry(cur, struct ocrdma_qp, sq_entry);
|
|
|
+ else
|
|
|
+ qp = list_entry(cur, struct ocrdma_qp, rq_entry);
|
|
|
+
|
|
|
if (qp->srq)
|
|
|
continue;
|
|
|
/* if wq and rq share the same cq, than comp_handler
|
|
@@ -856,19 +859,41 @@ static void ocrdma_qp_buddy_cq_handler(struct ocrdma_dev *dev,
|
|
|
* if completion came on rq, sq's cq is buddy cq.
|
|
|
*/
|
|
|
if (qp->sq_cq == cq)
|
|
|
- cq = qp->rq_cq;
|
|
|
+ bcq = qp->rq_cq;
|
|
|
else
|
|
|
- cq = qp->sq_cq;
|
|
|
- buddy_cq_found = true;
|
|
|
- break;
|
|
|
+ bcq = qp->sq_cq;
|
|
|
+ return bcq;
|
|
|
}
|
|
|
+ return NULL;
|
|
|
+}
|
|
|
+
|
|
|
+static void ocrdma_qp_buddy_cq_handler(struct ocrdma_dev *dev,
|
|
|
+ struct ocrdma_cq *cq)
|
|
|
+{
|
|
|
+ unsigned long flags;
|
|
|
+ struct ocrdma_cq *bcq = NULL;
|
|
|
+
|
|
|
+ /* Go through list of QPs in error state which are using this CQ
|
|
|
+ * and invoke its callback handler to trigger CQE processing for
|
|
|
+ * error/flushed CQE. It is rare to find more than few entries in
|
|
|
+ * this list as most consumers stops after getting error CQE.
|
|
|
+ * List is traversed only once when a matching buddy cq found for a QP.
|
|
|
+ */
|
|
|
+ spin_lock_irqsave(&dev->flush_q_lock, flags);
|
|
|
+ /* Check if buddy CQ is present.
|
|
|
+ * true - Check for SQ CQ
|
|
|
+ * false - Check for RQ CQ
|
|
|
+ */
|
|
|
+ bcq = _ocrdma_qp_buddy_cq_handler(dev, cq, true);
|
|
|
+ if (bcq == NULL)
|
|
|
+ bcq = _ocrdma_qp_buddy_cq_handler(dev, cq, false);
|
|
|
spin_unlock_irqrestore(&dev->flush_q_lock, flags);
|
|
|
- if (buddy_cq_found == false)
|
|
|
- return;
|
|
|
- if (cq->ibcq.comp_handler) {
|
|
|
- spin_lock_irqsave(&cq->comp_handler_lock, flags);
|
|
|
- (*cq->ibcq.comp_handler) (&cq->ibcq, cq->ibcq.cq_context);
|
|
|
- spin_unlock_irqrestore(&cq->comp_handler_lock, flags);
|
|
|
+
|
|
|
+ /* if there is valid buddy cq, look for its completion handler */
|
|
|
+ if (bcq && bcq->ibcq.comp_handler) {
|
|
|
+ spin_lock_irqsave(&bcq->comp_handler_lock, flags);
|
|
|
+ (*bcq->ibcq.comp_handler) (&bcq->ibcq, bcq->ibcq.cq_context);
|
|
|
+ spin_unlock_irqrestore(&bcq->comp_handler_lock, flags);
|
|
|
}
|
|
|
}
|
|
|
|
|
@@ -935,6 +960,7 @@ static irqreturn_t ocrdma_irq_handler(int irq, void *handle)
|
|
|
|
|
|
} while (budget);
|
|
|
|
|
|
+ eq->aic_obj.eq_intr_cnt++;
|
|
|
ocrdma_ring_eq_db(dev, eq->q.id, true, true, 0);
|
|
|
return IRQ_HANDLED;
|
|
|
}
|
|
@@ -1050,6 +1076,9 @@ static void ocrdma_get_attr(struct ocrdma_dev *dev,
|
|
|
attr->max_pd =
|
|
|
(rsp->max_pd_ca_ack_delay & OCRDMA_MBX_QUERY_CFG_MAX_PD_MASK) >>
|
|
|
OCRDMA_MBX_QUERY_CFG_MAX_PD_SHIFT;
|
|
|
+ attr->max_dpp_pds =
|
|
|
+ (rsp->max_dpp_pds_credits & OCRDMA_MBX_QUERY_CFG_MAX_DPP_PDS_MASK) >>
|
|
|
+ OCRDMA_MBX_QUERY_CFG_MAX_DPP_PDS_OFFSET;
|
|
|
attr->max_qp =
|
|
|
(rsp->qp_srq_cq_ird_ord & OCRDMA_MBX_QUERY_CFG_MAX_QP_MASK) >>
|
|
|
OCRDMA_MBX_QUERY_CFG_MAX_QP_SHIFT;
|
|
@@ -1396,6 +1425,122 @@ int ocrdma_mbx_dealloc_pd(struct ocrdma_dev *dev, struct ocrdma_pd *pd)
|
|
|
return status;
|
|
|
}
|
|
|
|
|
|
+
|
|
|
+static int ocrdma_mbx_alloc_pd_range(struct ocrdma_dev *dev)
|
|
|
+{
|
|
|
+ int status = -ENOMEM;
|
|
|
+ size_t pd_bitmap_size;
|
|
|
+ struct ocrdma_alloc_pd_range *cmd;
|
|
|
+ struct ocrdma_alloc_pd_range_rsp *rsp;
|
|
|
+
|
|
|
+ /* Pre allocate the DPP PDs */
|
|
|
+ cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_ALLOC_PD_RANGE, sizeof(*cmd));
|
|
|
+ if (!cmd)
|
|
|
+ return -ENOMEM;
|
|
|
+ cmd->pd_count = dev->attr.max_dpp_pds;
|
|
|
+ cmd->enable_dpp_rsvd |= OCRDMA_ALLOC_PD_ENABLE_DPP;
|
|
|
+ status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
|
|
|
+ if (status)
|
|
|
+ goto mbx_err;
|
|
|
+ rsp = (struct ocrdma_alloc_pd_range_rsp *)cmd;
|
|
|
+
|
|
|
+ if ((rsp->dpp_page_pdid & OCRDMA_ALLOC_PD_RSP_DPP) && rsp->pd_count) {
|
|
|
+ dev->pd_mgr->dpp_page_index = rsp->dpp_page_pdid >>
|
|
|
+ OCRDMA_ALLOC_PD_RSP_DPP_PAGE_SHIFT;
|
|
|
+ dev->pd_mgr->pd_dpp_start = rsp->dpp_page_pdid &
|
|
|
+ OCRDMA_ALLOC_PD_RNG_RSP_START_PDID_MASK;
|
|
|
+ dev->pd_mgr->max_dpp_pd = rsp->pd_count;
|
|
|
+ pd_bitmap_size = BITS_TO_LONGS(rsp->pd_count) * sizeof(long);
|
|
|
+ dev->pd_mgr->pd_dpp_bitmap = kzalloc(pd_bitmap_size,
|
|
|
+ GFP_KERNEL);
|
|
|
+ }
|
|
|
+ kfree(cmd);
|
|
|
+
|
|
|
+ cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_ALLOC_PD_RANGE, sizeof(*cmd));
|
|
|
+ if (!cmd)
|
|
|
+ return -ENOMEM;
|
|
|
+
|
|
|
+ cmd->pd_count = dev->attr.max_pd - dev->attr.max_dpp_pds;
|
|
|
+ status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
|
|
|
+ if (status)
|
|
|
+ goto mbx_err;
|
|
|
+ rsp = (struct ocrdma_alloc_pd_range_rsp *)cmd;
|
|
|
+ if (rsp->pd_count) {
|
|
|
+ dev->pd_mgr->pd_norm_start = rsp->dpp_page_pdid &
|
|
|
+ OCRDMA_ALLOC_PD_RNG_RSP_START_PDID_MASK;
|
|
|
+ dev->pd_mgr->max_normal_pd = rsp->pd_count;
|
|
|
+ pd_bitmap_size = BITS_TO_LONGS(rsp->pd_count) * sizeof(long);
|
|
|
+ dev->pd_mgr->pd_norm_bitmap = kzalloc(pd_bitmap_size,
|
|
|
+ GFP_KERNEL);
|
|
|
+ }
|
|
|
+
|
|
|
+ if (dev->pd_mgr->pd_norm_bitmap || dev->pd_mgr->pd_dpp_bitmap) {
|
|
|
+ /* Enable PD resource manager */
|
|
|
+ dev->pd_mgr->pd_prealloc_valid = true;
|
|
|
+ } else {
|
|
|
+ return -ENOMEM;
|
|
|
+ }
|
|
|
+mbx_err:
|
|
|
+ kfree(cmd);
|
|
|
+ return status;
|
|
|
+}
|
|
|
+
|
|
|
+static void ocrdma_mbx_dealloc_pd_range(struct ocrdma_dev *dev)
|
|
|
+{
|
|
|
+ struct ocrdma_dealloc_pd_range *cmd;
|
|
|
+
|
|
|
+ /* return normal PDs to firmware */
|
|
|
+ cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_DEALLOC_PD_RANGE, sizeof(*cmd));
|
|
|
+ if (!cmd)
|
|
|
+ goto mbx_err;
|
|
|
+
|
|
|
+ if (dev->pd_mgr->max_normal_pd) {
|
|
|
+ cmd->start_pd_id = dev->pd_mgr->pd_norm_start;
|
|
|
+ cmd->pd_count = dev->pd_mgr->max_normal_pd;
|
|
|
+ ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
|
|
|
+ }
|
|
|
+
|
|
|
+ if (dev->pd_mgr->max_dpp_pd) {
|
|
|
+ kfree(cmd);
|
|
|
+ /* return DPP PDs to firmware */
|
|
|
+ cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_DEALLOC_PD_RANGE,
|
|
|
+ sizeof(*cmd));
|
|
|
+ if (!cmd)
|
|
|
+ goto mbx_err;
|
|
|
+
|
|
|
+ cmd->start_pd_id = dev->pd_mgr->pd_dpp_start;
|
|
|
+ cmd->pd_count = dev->pd_mgr->max_dpp_pd;
|
|
|
+ ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
|
|
|
+ }
|
|
|
+mbx_err:
|
|
|
+ kfree(cmd);
|
|
|
+}
|
|
|
+
|
|
|
+void ocrdma_alloc_pd_pool(struct ocrdma_dev *dev)
|
|
|
+{
|
|
|
+ int status;
|
|
|
+
|
|
|
+ dev->pd_mgr = kzalloc(sizeof(struct ocrdma_pd_resource_mgr),
|
|
|
+ GFP_KERNEL);
|
|
|
+ if (!dev->pd_mgr) {
|
|
|
+ pr_err("%s(%d)Memory allocation failure.\n", __func__, dev->id);
|
|
|
+ return;
|
|
|
+ }
|
|
|
+ status = ocrdma_mbx_alloc_pd_range(dev);
|
|
|
+ if (status) {
|
|
|
+ pr_err("%s(%d) Unable to initialize PD pool, using default.\n",
|
|
|
+ __func__, dev->id);
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
+static void ocrdma_free_pd_pool(struct ocrdma_dev *dev)
|
|
|
+{
|
|
|
+ ocrdma_mbx_dealloc_pd_range(dev);
|
|
|
+ kfree(dev->pd_mgr->pd_norm_bitmap);
|
|
|
+ kfree(dev->pd_mgr->pd_dpp_bitmap);
|
|
|
+ kfree(dev->pd_mgr);
|
|
|
+}
|
|
|
+
|
|
|
static int ocrdma_build_q_conf(u32 *num_entries, int entry_size,
|
|
|
int *num_pages, int *page_size)
|
|
|
{
|
|
@@ -1896,8 +2041,9 @@ void ocrdma_flush_qp(struct ocrdma_qp *qp)
|
|
|
{
|
|
|
bool found;
|
|
|
unsigned long flags;
|
|
|
+ struct ocrdma_dev *dev = get_ocrdma_dev(qp->ibqp.device);
|
|
|
|
|
|
- spin_lock_irqsave(&qp->dev->flush_q_lock, flags);
|
|
|
+ spin_lock_irqsave(&dev->flush_q_lock, flags);
|
|
|
found = ocrdma_is_qp_in_sq_flushlist(qp->sq_cq, qp);
|
|
|
if (!found)
|
|
|
list_add_tail(&qp->sq_entry, &qp->sq_cq->sq_head);
|
|
@@ -1906,7 +2052,7 @@ void ocrdma_flush_qp(struct ocrdma_qp *qp)
|
|
|
if (!found)
|
|
|
list_add_tail(&qp->rq_entry, &qp->rq_cq->rq_head);
|
|
|
}
|
|
|
- spin_unlock_irqrestore(&qp->dev->flush_q_lock, flags);
|
|
|
+ spin_unlock_irqrestore(&dev->flush_q_lock, flags);
|
|
|
}
|
|
|
|
|
|
static void ocrdma_init_hwq_ptr(struct ocrdma_qp *qp)
|
|
@@ -1972,7 +2118,8 @@ static int ocrdma_set_create_qp_sq_cmd(struct ocrdma_create_qp_req *cmd,
|
|
|
int status;
|
|
|
u32 len, hw_pages, hw_page_size;
|
|
|
dma_addr_t pa;
|
|
|
- struct ocrdma_dev *dev = qp->dev;
|
|
|
+ struct ocrdma_pd *pd = qp->pd;
|
|
|
+ struct ocrdma_dev *dev = get_ocrdma_dev(pd->ibpd.device);
|
|
|
struct pci_dev *pdev = dev->nic_info.pdev;
|
|
|
u32 max_wqe_allocated;
|
|
|
u32 max_sges = attrs->cap.max_send_sge;
|
|
@@ -2027,7 +2174,8 @@ static int ocrdma_set_create_qp_rq_cmd(struct ocrdma_create_qp_req *cmd,
|
|
|
int status;
|
|
|
u32 len, hw_pages, hw_page_size;
|
|
|
dma_addr_t pa = 0;
|
|
|
- struct ocrdma_dev *dev = qp->dev;
|
|
|
+ struct ocrdma_pd *pd = qp->pd;
|
|
|
+ struct ocrdma_dev *dev = get_ocrdma_dev(pd->ibpd.device);
|
|
|
struct pci_dev *pdev = dev->nic_info.pdev;
|
|
|
u32 max_rqe_allocated = attrs->cap.max_recv_wr + 1;
|
|
|
|
|
@@ -2086,7 +2234,8 @@ static void ocrdma_set_create_qp_dpp_cmd(struct ocrdma_create_qp_req *cmd,
|
|
|
static int ocrdma_set_create_qp_ird_cmd(struct ocrdma_create_qp_req *cmd,
|
|
|
struct ocrdma_qp *qp)
|
|
|
{
|
|
|
- struct ocrdma_dev *dev = qp->dev;
|
|
|
+ struct ocrdma_pd *pd = qp->pd;
|
|
|
+ struct ocrdma_dev *dev = get_ocrdma_dev(pd->ibpd.device);
|
|
|
struct pci_dev *pdev = dev->nic_info.pdev;
|
|
|
dma_addr_t pa = 0;
|
|
|
int ird_page_size = dev->attr.ird_page_size;
|
|
@@ -2157,8 +2306,8 @@ int ocrdma_mbx_create_qp(struct ocrdma_qp *qp, struct ib_qp_init_attr *attrs,
|
|
|
{
|
|
|
int status = -ENOMEM;
|
|
|
u32 flags = 0;
|
|
|
- struct ocrdma_dev *dev = qp->dev;
|
|
|
struct ocrdma_pd *pd = qp->pd;
|
|
|
+ struct ocrdma_dev *dev = get_ocrdma_dev(pd->ibpd.device);
|
|
|
struct pci_dev *pdev = dev->nic_info.pdev;
|
|
|
struct ocrdma_cq *cq;
|
|
|
struct ocrdma_create_qp_req *cmd;
|
|
@@ -2281,11 +2430,12 @@ static int ocrdma_set_av_params(struct ocrdma_qp *qp,
|
|
|
union ib_gid sgid, zgid;
|
|
|
u32 vlan_id;
|
|
|
u8 mac_addr[6];
|
|
|
+ struct ocrdma_dev *dev = get_ocrdma_dev(qp->ibqp.device);
|
|
|
|
|
|
if ((ah_attr->ah_flags & IB_AH_GRH) == 0)
|
|
|
return -EINVAL;
|
|
|
- if (atomic_cmpxchg(&qp->dev->update_sl, 1, 0))
|
|
|
- ocrdma_init_service_level(qp->dev);
|
|
|
+ if (atomic_cmpxchg(&dev->update_sl, 1, 0))
|
|
|
+ ocrdma_init_service_level(dev);
|
|
|
cmd->params.tclass_sq_psn |=
|
|
|
(ah_attr->grh.traffic_class << OCRDMA_QP_PARAMS_TCLASS_SHIFT);
|
|
|
cmd->params.rnt_rc_sl_fl |=
|
|
@@ -2296,7 +2446,7 @@ static int ocrdma_set_av_params(struct ocrdma_qp *qp,
|
|
|
cmd->flags |= OCRDMA_QP_PARA_FLOW_LBL_VALID;
|
|
|
memcpy(&cmd->params.dgid[0], &ah_attr->grh.dgid.raw[0],
|
|
|
sizeof(cmd->params.dgid));
|
|
|
- status = ocrdma_query_gid(&qp->dev->ibdev, 1,
|
|
|
+ status = ocrdma_query_gid(&dev->ibdev, 1,
|
|
|
ah_attr->grh.sgid_index, &sgid);
|
|
|
if (status)
|
|
|
return status;
|
|
@@ -2307,7 +2457,9 @@ static int ocrdma_set_av_params(struct ocrdma_qp *qp,
|
|
|
|
|
|
qp->sgid_idx = ah_attr->grh.sgid_index;
|
|
|
memcpy(&cmd->params.sgid[0], &sgid.raw[0], sizeof(cmd->params.sgid));
|
|
|
- ocrdma_resolve_dmac(qp->dev, ah_attr, &mac_addr[0]);
|
|
|
+ status = ocrdma_resolve_dmac(dev, ah_attr, &mac_addr[0]);
|
|
|
+ if (status)
|
|
|
+ return status;
|
|
|
cmd->params.dmac_b0_to_b3 = mac_addr[0] | (mac_addr[1] << 8) |
|
|
|
(mac_addr[2] << 16) | (mac_addr[3] << 24);
|
|
|
/* convert them to LE format. */
|
|
@@ -2320,7 +2472,7 @@ static int ocrdma_set_av_params(struct ocrdma_qp *qp,
|
|
|
vlan_id << OCRDMA_QP_PARAMS_VLAN_SHIFT;
|
|
|
cmd->flags |= OCRDMA_QP_PARA_VLAN_EN_VALID;
|
|
|
cmd->params.rnt_rc_sl_fl |=
|
|
|
- (qp->dev->sl & 0x07) << OCRDMA_QP_PARAMS_SL_SHIFT;
|
|
|
+ (dev->sl & 0x07) << OCRDMA_QP_PARAMS_SL_SHIFT;
|
|
|
}
|
|
|
return 0;
|
|
|
}
|
|
@@ -2330,6 +2482,7 @@ static int ocrdma_set_qp_params(struct ocrdma_qp *qp,
|
|
|
struct ib_qp_attr *attrs, int attr_mask)
|
|
|
{
|
|
|
int status = 0;
|
|
|
+ struct ocrdma_dev *dev = get_ocrdma_dev(qp->ibqp.device);
|
|
|
|
|
|
if (attr_mask & IB_QP_PKEY_INDEX) {
|
|
|
cmd->params.path_mtu_pkey_indx |= (attrs->pkey_index &
|
|
@@ -2347,12 +2500,12 @@ static int ocrdma_set_qp_params(struct ocrdma_qp *qp,
|
|
|
return status;
|
|
|
} else if (qp->qp_type == IB_QPT_GSI || qp->qp_type == IB_QPT_UD) {
|
|
|
/* set the default mac address for UD, GSI QPs */
|
|
|
- cmd->params.dmac_b0_to_b3 = qp->dev->nic_info.mac_addr[0] |
|
|
|
- (qp->dev->nic_info.mac_addr[1] << 8) |
|
|
|
- (qp->dev->nic_info.mac_addr[2] << 16) |
|
|
|
- (qp->dev->nic_info.mac_addr[3] << 24);
|
|
|
- cmd->params.vlan_dmac_b4_to_b5 = qp->dev->nic_info.mac_addr[4] |
|
|
|
- (qp->dev->nic_info.mac_addr[5] << 8);
|
|
|
+ cmd->params.dmac_b0_to_b3 = dev->nic_info.mac_addr[0] |
|
|
|
+ (dev->nic_info.mac_addr[1] << 8) |
|
|
|
+ (dev->nic_info.mac_addr[2] << 16) |
|
|
|
+ (dev->nic_info.mac_addr[3] << 24);
|
|
|
+ cmd->params.vlan_dmac_b4_to_b5 = dev->nic_info.mac_addr[4] |
|
|
|
+ (dev->nic_info.mac_addr[5] << 8);
|
|
|
}
|
|
|
if ((attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY) &&
|
|
|
attrs->en_sqd_async_notify) {
|
|
@@ -2409,7 +2562,7 @@ static int ocrdma_set_qp_params(struct ocrdma_qp *qp,
|
|
|
cmd->flags |= OCRDMA_QP_PARA_RQPSN_VALID;
|
|
|
}
|
|
|
if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) {
|
|
|
- if (attrs->max_rd_atomic > qp->dev->attr.max_ord_per_qp) {
|
|
|
+ if (attrs->max_rd_atomic > dev->attr.max_ord_per_qp) {
|
|
|
status = -EINVAL;
|
|
|
goto pmtu_err;
|
|
|
}
|
|
@@ -2417,7 +2570,7 @@ static int ocrdma_set_qp_params(struct ocrdma_qp *qp,
|
|
|
cmd->flags |= OCRDMA_QP_PARA_MAX_ORD_VALID;
|
|
|
}
|
|
|
if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) {
|
|
|
- if (attrs->max_dest_rd_atomic > qp->dev->attr.max_ird_per_qp) {
|
|
|
+ if (attrs->max_dest_rd_atomic > dev->attr.max_ird_per_qp) {
|
|
|
status = -EINVAL;
|
|
|
goto pmtu_err;
|
|
|
}
|
|
@@ -2870,6 +3023,82 @@ done:
|
|
|
return status;
|
|
|
}
|
|
|
|
|
|
+static int ocrdma_mbx_modify_eqd(struct ocrdma_dev *dev, struct ocrdma_eq *eq,
|
|
|
+ int num)
|
|
|
+{
|
|
|
+ int i, status = -ENOMEM;
|
|
|
+ struct ocrdma_modify_eqd_req *cmd;
|
|
|
+
|
|
|
+ cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_MODIFY_EQ_DELAY, sizeof(*cmd));
|
|
|
+ if (!cmd)
|
|
|
+ return status;
|
|
|
+
|
|
|
+ ocrdma_init_mch(&cmd->cmd.req, OCRDMA_CMD_MODIFY_EQ_DELAY,
|
|
|
+ OCRDMA_SUBSYS_COMMON, sizeof(*cmd));
|
|
|
+
|
|
|
+ cmd->cmd.num_eq = num;
|
|
|
+ for (i = 0; i < num; i++) {
|
|
|
+ cmd->cmd.set_eqd[i].eq_id = eq[i].q.id;
|
|
|
+ cmd->cmd.set_eqd[i].phase = 0;
|
|
|
+ cmd->cmd.set_eqd[i].delay_multiplier =
|
|
|
+ (eq[i].aic_obj.prev_eqd * 65)/100;
|
|
|
+ }
|
|
|
+ status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
|
|
|
+ if (status)
|
|
|
+ goto mbx_err;
|
|
|
+mbx_err:
|
|
|
+ kfree(cmd);
|
|
|
+ return status;
|
|
|
+}
|
|
|
+
|
|
|
+static int ocrdma_modify_eqd(struct ocrdma_dev *dev, struct ocrdma_eq *eq,
|
|
|
+ int num)
|
|
|
+{
|
|
|
+ int num_eqs, i = 0;
|
|
|
+ if (num > 8) {
|
|
|
+ while (num) {
|
|
|
+ num_eqs = min(num, 8);
|
|
|
+ ocrdma_mbx_modify_eqd(dev, &eq[i], num_eqs);
|
|
|
+ i += num_eqs;
|
|
|
+ num -= num_eqs;
|
|
|
+ }
|
|
|
+ } else {
|
|
|
+ ocrdma_mbx_modify_eqd(dev, eq, num);
|
|
|
+ }
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
+void ocrdma_eqd_set_task(struct work_struct *work)
|
|
|
+{
|
|
|
+ struct ocrdma_dev *dev =
|
|
|
+ container_of(work, struct ocrdma_dev, eqd_work.work);
|
|
|
+ struct ocrdma_eq *eq = 0;
|
|
|
+ int i, num = 0, status = -EINVAL;
|
|
|
+ u64 eq_intr;
|
|
|
+
|
|
|
+ for (i = 0; i < dev->eq_cnt; i++) {
|
|
|
+ eq = &dev->eq_tbl[i];
|
|
|
+ if (eq->aic_obj.eq_intr_cnt > eq->aic_obj.prev_eq_intr_cnt) {
|
|
|
+ eq_intr = eq->aic_obj.eq_intr_cnt -
|
|
|
+ eq->aic_obj.prev_eq_intr_cnt;
|
|
|
+ if ((eq_intr > EQ_INTR_PER_SEC_THRSH_HI) &&
|
|
|
+ (eq->aic_obj.prev_eqd == EQ_AIC_MIN_EQD)) {
|
|
|
+ eq->aic_obj.prev_eqd = EQ_AIC_MAX_EQD;
|
|
|
+ num++;
|
|
|
+ } else if ((eq_intr < EQ_INTR_PER_SEC_THRSH_LOW) &&
|
|
|
+ (eq->aic_obj.prev_eqd == EQ_AIC_MAX_EQD)) {
|
|
|
+ eq->aic_obj.prev_eqd = EQ_AIC_MIN_EQD;
|
|
|
+ num++;
|
|
|
+ }
|
|
|
+ }
|
|
|
+ eq->aic_obj.prev_eq_intr_cnt = eq->aic_obj.eq_intr_cnt;
|
|
|
+ }
|
|
|
+
|
|
|
+ if (num)
|
|
|
+ status = ocrdma_modify_eqd(dev, &dev->eq_tbl[0], num);
|
|
|
+ schedule_delayed_work(&dev->eqd_work, msecs_to_jiffies(1000));
|
|
|
+}
|
|
|
+
|
|
|
int ocrdma_init_hw(struct ocrdma_dev *dev)
|
|
|
{
|
|
|
int status;
|
|
@@ -2915,6 +3144,7 @@ qpeq_err:
|
|
|
|
|
|
void ocrdma_cleanup_hw(struct ocrdma_dev *dev)
|
|
|
{
|
|
|
+ ocrdma_free_pd_pool(dev);
|
|
|
ocrdma_mbx_delete_ah_tbl(dev);
|
|
|
|
|
|
/* cleanup the eqs */
|