|
@@ -213,24 +213,61 @@ EXPORT_SYMBOL(rdma_port_get_link_layer);
|
|
|
|
|
|
/* Protection domains */
|
|
|
|
|
|
+/**
|
|
|
+ * ib_alloc_pd - Allocates an unused protection domain.
|
|
|
+ * @device: The device on which to allocate the protection domain.
|
|
|
+ *
|
|
|
+ * A protection domain object provides an association between QPs, shared
|
|
|
+ * receive queues, address handles, memory regions, and memory windows.
|
|
|
+ *
|
|
|
+ * Every PD has a local_dma_lkey which can be used as the lkey value for local
|
|
|
+ * memory operations.
|
|
|
+ */
|
|
|
struct ib_pd *ib_alloc_pd(struct ib_device *device)
|
|
|
{
|
|
|
struct ib_pd *pd;
|
|
|
+ struct ib_device_attr devattr;
|
|
|
+ int rc;
|
|
|
+
|
|
|
+ rc = ib_query_device(device, &devattr);
|
|
|
+ if (rc)
|
|
|
+ return ERR_PTR(rc);
|
|
|
|
|
|
pd = device->alloc_pd(device, NULL, NULL);
|
|
|
+ if (IS_ERR(pd))
|
|
|
+ return pd;
|
|
|
+
|
|
|
+ pd->device = device;
|
|
|
+ pd->uobject = NULL;
|
|
|
+ pd->local_mr = NULL;
|
|
|
+ atomic_set(&pd->usecnt, 0);
|
|
|
+
|
|
|
+ if (devattr.device_cap_flags & IB_DEVICE_LOCAL_DMA_LKEY)
|
|
|
+ pd->local_dma_lkey = device->local_dma_lkey;
|
|
|
+ else {
|
|
|
+ struct ib_mr *mr;
|
|
|
+
|
|
|
+ mr = ib_get_dma_mr(pd, IB_ACCESS_LOCAL_WRITE);
|
|
|
+ if (IS_ERR(mr)) {
|
|
|
+ ib_dealloc_pd(pd);
|
|
|
+ return (struct ib_pd *)mr;
|
|
|
+ }
|
|
|
|
|
|
- if (!IS_ERR(pd)) {
|
|
|
- pd->device = device;
|
|
|
- pd->uobject = NULL;
|
|
|
- atomic_set(&pd->usecnt, 0);
|
|
|
+ pd->local_mr = mr;
|
|
|
+ pd->local_dma_lkey = pd->local_mr->lkey;
|
|
|
}
|
|
|
-
|
|
|
return pd;
|
|
|
}
|
|
|
EXPORT_SYMBOL(ib_alloc_pd);
|
|
|
|
|
|
int ib_dealloc_pd(struct ib_pd *pd)
|
|
|
{
|
|
|
+ if (pd->local_mr) {
|
|
|
+ if (ib_dereg_mr(pd->local_mr))
|
|
|
+ return -EBUSY;
|
|
|
+ pd->local_mr = NULL;
|
|
|
+ }
|
|
|
+
|
|
|
if (atomic_read(&pd->usecnt))
|
|
|
return -EBUSY;
|
|
|
|