|
@@ -588,8 +588,8 @@ int
|
|
|
rpcrdma_ia_open(struct rpcrdma_xprt *xprt, struct sockaddr *addr, int memreg)
|
|
|
{
|
|
|
int rc, mem_priv;
|
|
|
- struct ib_device_attr devattr;
|
|
|
struct rpcrdma_ia *ia = &xprt->rx_ia;
|
|
|
+ struct ib_device_attr *devattr = &ia->ri_devattr;
|
|
|
|
|
|
ia->ri_id = rpcrdma_create_id(xprt, ia, addr);
|
|
|
if (IS_ERR(ia->ri_id)) {
|
|
@@ -605,26 +605,21 @@ rpcrdma_ia_open(struct rpcrdma_xprt *xprt, struct sockaddr *addr, int memreg)
|
|
|
goto out2;
|
|
|
}
|
|
|
|
|
|
- /*
|
|
|
- * Query the device to determine if the requested memory
|
|
|
- * registration strategy is supported. If it isn't, set the
|
|
|
- * strategy to a globally supported model.
|
|
|
- */
|
|
|
- rc = ib_query_device(ia->ri_id->device, &devattr);
|
|
|
+ rc = ib_query_device(ia->ri_id->device, devattr);
|
|
|
if (rc) {
|
|
|
dprintk("RPC: %s: ib_query_device failed %d\n",
|
|
|
__func__, rc);
|
|
|
goto out3;
|
|
|
}
|
|
|
|
|
|
- if (devattr.device_cap_flags & IB_DEVICE_LOCAL_DMA_LKEY) {
|
|
|
+ if (devattr->device_cap_flags & IB_DEVICE_LOCAL_DMA_LKEY) {
|
|
|
ia->ri_have_dma_lkey = 1;
|
|
|
ia->ri_dma_lkey = ia->ri_id->device->local_dma_lkey;
|
|
|
}
|
|
|
|
|
|
if (memreg == RPCRDMA_FRMR) {
|
|
|
/* Requires both frmr reg and local dma lkey */
|
|
|
- if ((devattr.device_cap_flags &
|
|
|
+ if ((devattr->device_cap_flags &
|
|
|
(IB_DEVICE_MEM_MGT_EXTENSIONS|IB_DEVICE_LOCAL_DMA_LKEY)) !=
|
|
|
(IB_DEVICE_MEM_MGT_EXTENSIONS|IB_DEVICE_LOCAL_DMA_LKEY)) {
|
|
|
dprintk("RPC: %s: FRMR registration "
|
|
@@ -634,7 +629,7 @@ rpcrdma_ia_open(struct rpcrdma_xprt *xprt, struct sockaddr *addr, int memreg)
|
|
|
/* Mind the ia limit on FRMR page list depth */
|
|
|
ia->ri_max_frmr_depth = min_t(unsigned int,
|
|
|
RPCRDMA_MAX_DATA_SEGS,
|
|
|
- devattr.max_fast_reg_page_list_len);
|
|
|
+ devattr->max_fast_reg_page_list_len);
|
|
|
}
|
|
|
}
|
|
|
if (memreg == RPCRDMA_MTHCAFMR) {
|
|
@@ -736,20 +731,13 @@ int
|
|
|
rpcrdma_ep_create(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia,
|
|
|
struct rpcrdma_create_data_internal *cdata)
|
|
|
{
|
|
|
- struct ib_device_attr devattr;
|
|
|
+ struct ib_device_attr *devattr = &ia->ri_devattr;
|
|
|
struct ib_cq *sendcq, *recvcq;
|
|
|
int rc, err;
|
|
|
|
|
|
- rc = ib_query_device(ia->ri_id->device, &devattr);
|
|
|
- if (rc) {
|
|
|
- dprintk("RPC: %s: ib_query_device failed %d\n",
|
|
|
- __func__, rc);
|
|
|
- return rc;
|
|
|
- }
|
|
|
-
|
|
|
/* check provider's send/recv wr limits */
|
|
|
- if (cdata->max_requests > devattr.max_qp_wr)
|
|
|
- cdata->max_requests = devattr.max_qp_wr;
|
|
|
+ if (cdata->max_requests > devattr->max_qp_wr)
|
|
|
+ cdata->max_requests = devattr->max_qp_wr;
|
|
|
|
|
|
ep->rep_attr.event_handler = rpcrdma_qp_async_error_upcall;
|
|
|
ep->rep_attr.qp_context = ep;
|
|
@@ -784,8 +772,8 @@ rpcrdma_ep_create(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia,
|
|
|
|
|
|
}
|
|
|
ep->rep_attr.cap.max_send_wr *= depth;
|
|
|
- if (ep->rep_attr.cap.max_send_wr > devattr.max_qp_wr) {
|
|
|
- cdata->max_requests = devattr.max_qp_wr / depth;
|
|
|
+ if (ep->rep_attr.cap.max_send_wr > devattr->max_qp_wr) {
|
|
|
+ cdata->max_requests = devattr->max_qp_wr / depth;
|
|
|
if (!cdata->max_requests)
|
|
|
return -EINVAL;
|
|
|
ep->rep_attr.cap.max_send_wr = cdata->max_requests *
|
|
@@ -868,10 +856,11 @@ rpcrdma_ep_create(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia,
|
|
|
|
|
|
/* Client offers RDMA Read but does not initiate */
|
|
|
ep->rep_remote_cma.initiator_depth = 0;
|
|
|
- if (devattr.max_qp_rd_atom > 32) /* arbitrary but <= 255 */
|
|
|
+ if (devattr->max_qp_rd_atom > 32) /* arbitrary but <= 255 */
|
|
|
ep->rep_remote_cma.responder_resources = 32;
|
|
|
else
|
|
|
- ep->rep_remote_cma.responder_resources = devattr.max_qp_rd_atom;
|
|
|
+ ep->rep_remote_cma.responder_resources =
|
|
|
+ devattr->max_qp_rd_atom;
|
|
|
|
|
|
ep->rep_remote_cma.retry_count = 7;
|
|
|
ep->rep_remote_cma.flow_control = 0;
|