|
|
@@ -302,8 +302,8 @@ static struct ib_fmr_pool *srp_alloc_fmr_pool(struct srp_target_port *target)
|
|
|
fmr_param.pool_size = target->scsi_host->can_queue;
|
|
|
fmr_param.dirty_watermark = fmr_param.pool_size / 4;
|
|
|
fmr_param.cache = 1;
|
|
|
- fmr_param.max_pages_per_fmr = dev->max_pages_per_fmr;
|
|
|
- fmr_param.page_shift = ilog2(dev->fmr_page_size);
|
|
|
+ fmr_param.max_pages_per_fmr = dev->max_pages_per_mr;
|
|
|
+ fmr_param.page_shift = ilog2(dev->mr_page_size);
|
|
|
fmr_param.access = (IB_ACCESS_LOCAL_WRITE |
|
|
|
IB_ACCESS_REMOTE_WRITE |
|
|
|
IB_ACCESS_REMOTE_READ);
|
|
|
@@ -657,7 +657,7 @@ static int srp_alloc_req_data(struct srp_target_port *target)
|
|
|
req = &target->req_ring[i];
|
|
|
req->fmr_list = kmalloc(target->cmd_sg_cnt * sizeof(void *),
|
|
|
GFP_KERNEL);
|
|
|
- req->map_page = kmalloc(srp_dev->max_pages_per_fmr *
|
|
|
+ req->map_page = kmalloc(srp_dev->max_pages_per_mr *
|
|
|
sizeof(void *), GFP_KERNEL);
|
|
|
req->indirect_desc = kmalloc(target->indirect_size, GFP_KERNEL);
|
|
|
if (!req->fmr_list || !req->map_page || !req->indirect_desc)
|
|
|
@@ -810,7 +810,7 @@ static void srp_unmap_data(struct scsi_cmnd *scmnd,
|
|
|
return;
|
|
|
|
|
|
pfmr = req->fmr_list;
|
|
|
- while (req->nfmr--)
|
|
|
+ while (req->nmdesc--)
|
|
|
ib_fmr_pool_unmap(*pfmr++);
|
|
|
|
|
|
ib_dma_unmap_sg(ibdev, scsi_sglist(scmnd), scsi_sg_count(scmnd),
|
|
|
@@ -979,9 +979,9 @@ static int srp_map_finish_fmr(struct srp_map_state *state,
|
|
|
return PTR_ERR(fmr);
|
|
|
|
|
|
*state->next_fmr++ = fmr;
|
|
|
- state->nfmr++;
|
|
|
+ state->nmdesc++;
|
|
|
|
|
|
- srp_map_desc(state, 0, state->fmr_len, fmr->fmr->rkey);
|
|
|
+ srp_map_desc(state, 0, state->dma_len, fmr->fmr->rkey);
|
|
|
|
|
|
return 0;
|
|
|
}
|
|
|
@@ -995,14 +995,14 @@ static int srp_finish_mapping(struct srp_map_state *state,
|
|
|
return 0;
|
|
|
|
|
|
if (state->npages == 1 && !register_always)
|
|
|
- srp_map_desc(state, state->base_dma_addr, state->fmr_len,
|
|
|
+ srp_map_desc(state, state->base_dma_addr, state->dma_len,
|
|
|
target->rkey);
|
|
|
else
|
|
|
ret = srp_map_finish_fmr(state, target);
|
|
|
|
|
|
if (ret == 0) {
|
|
|
state->npages = 0;
|
|
|
- state->fmr_len = 0;
|
|
|
+ state->dma_len = 0;
|
|
|
}
|
|
|
|
|
|
return ret;
|
|
|
@@ -1047,7 +1047,7 @@ static int srp_map_sg_entry(struct srp_map_state *state,
|
|
|
* that were never quite defined, but went away when the initiator
|
|
|
* avoided using FMR on such page fragments.
|
|
|
*/
|
|
|
- if (dma_addr & ~dev->fmr_page_mask || dma_len > dev->fmr_max_size) {
|
|
|
+ if (dma_addr & ~dev->mr_page_mask || dma_len > dev->mr_max_size) {
|
|
|
ret = srp_finish_mapping(state, target);
|
|
|
if (ret)
|
|
|
return ret;
|
|
|
@@ -1066,7 +1066,7 @@ static int srp_map_sg_entry(struct srp_map_state *state,
|
|
|
srp_map_update_start(state, sg, sg_index, dma_addr);
|
|
|
|
|
|
while (dma_len) {
|
|
|
- if (state->npages == dev->max_pages_per_fmr) {
|
|
|
+ if (state->npages == dev->max_pages_per_mr) {
|
|
|
ret = srp_finish_mapping(state, target);
|
|
|
if (ret)
|
|
|
return ret;
|
|
|
@@ -1074,12 +1074,12 @@ static int srp_map_sg_entry(struct srp_map_state *state,
|
|
|
srp_map_update_start(state, sg, sg_index, dma_addr);
|
|
|
}
|
|
|
|
|
|
- len = min_t(unsigned int, dma_len, dev->fmr_page_size);
|
|
|
+ len = min_t(unsigned int, dma_len, dev->mr_page_size);
|
|
|
|
|
|
if (!state->npages)
|
|
|
state->base_dma_addr = dma_addr;
|
|
|
state->pages[state->npages++] = dma_addr;
|
|
|
- state->fmr_len += len;
|
|
|
+ state->dma_len += len;
|
|
|
dma_addr += len;
|
|
|
dma_len -= len;
|
|
|
}
|
|
|
@@ -1089,7 +1089,7 @@ static int srp_map_sg_entry(struct srp_map_state *state,
|
|
|
* boundries.
|
|
|
*/
|
|
|
ret = 0;
|
|
|
- if (len != dev->fmr_page_size) {
|
|
|
+ if (len != dev->mr_page_size) {
|
|
|
ret = srp_finish_mapping(state, target);
|
|
|
if (!ret)
|
|
|
srp_map_update_start(state, NULL, 0, 0);
|
|
|
@@ -1136,7 +1136,7 @@ backtrack:
|
|
|
if (use_fmr == SRP_MAP_ALLOW_FMR && srp_finish_mapping(state, target))
|
|
|
goto backtrack;
|
|
|
|
|
|
- req->nfmr = state->nfmr;
|
|
|
+ req->nmdesc = state->nmdesc;
|
|
|
}
|
|
|
|
|
|
static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_target_port *target,
|
|
|
@@ -1189,7 +1189,7 @@ static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_target_port *target,
|
|
|
buf->key = cpu_to_be32(target->rkey);
|
|
|
buf->len = cpu_to_be32(ib_sg_dma_len(ibdev, scat));
|
|
|
|
|
|
- req->nfmr = 0;
|
|
|
+ req->nmdesc = 0;
|
|
|
goto map_complete;
|
|
|
}
|
|
|
|
|
|
@@ -1637,7 +1637,7 @@ static int srp_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scmnd)
|
|
|
/*
|
|
|
* If we ran out of memory descriptors (-ENOMEM) because an
|
|
|
* application is queuing many requests with more than
|
|
|
- * max_pages_per_fmr sg-list elements, tell the SCSI mid-layer
|
|
|
+ * max_pages_per_mr sg-list elements, tell the SCSI mid-layer
|
|
|
* to reduce queue depth temporarily.
|
|
|
*/
|
|
|
scmnd->result = len == -ENOMEM ?
|
|
|
@@ -2878,8 +2878,8 @@ static void srp_add_one(struct ib_device *device)
|
|
|
struct srp_device *srp_dev;
|
|
|
struct ib_device_attr *dev_attr;
|
|
|
struct srp_host *host;
|
|
|
- int fmr_page_shift, s, e, p;
|
|
|
- u64 max_pages_per_fmr;
|
|
|
+ int mr_page_shift, s, e, p;
|
|
|
+ u64 max_pages_per_mr;
|
|
|
|
|
|
dev_attr = kmalloc(sizeof *dev_attr, GFP_KERNEL);
|
|
|
if (!dev_attr)
|
|
|
@@ -2902,18 +2902,18 @@ static void srp_add_one(struct ib_device *device)
|
|
|
* minimum of 4096 bytes. We're unlikely to build large sglists
|
|
|
* out of smaller entries.
|
|
|
*/
|
|
|
- fmr_page_shift = max(12, ffs(dev_attr->page_size_cap) - 1);
|
|
|
- srp_dev->fmr_page_size = 1 << fmr_page_shift;
|
|
|
- srp_dev->fmr_page_mask = ~((u64) srp_dev->fmr_page_size - 1);
|
|
|
- max_pages_per_fmr = dev_attr->max_mr_size;
|
|
|
- do_div(max_pages_per_fmr, srp_dev->fmr_page_size);
|
|
|
- srp_dev->max_pages_per_fmr = min_t(u64, SRP_FMR_SIZE,
|
|
|
- max_pages_per_fmr);
|
|
|
- srp_dev->fmr_max_size = srp_dev->fmr_page_size *
|
|
|
- srp_dev->max_pages_per_fmr;
|
|
|
- pr_debug("%s: fmr_page_shift = %d, dev_attr->max_mr_size = %#llx, max_pages_per_fmr = %d, fmr_max_size = %#x\n",
|
|
|
- device->name, fmr_page_shift, dev_attr->max_mr_size,
|
|
|
- srp_dev->max_pages_per_fmr, srp_dev->fmr_max_size);
|
|
|
+ mr_page_shift = max(12, ffs(dev_attr->page_size_cap) - 1);
|
|
|
+ srp_dev->mr_page_size = 1 << mr_page_shift;
|
|
|
+ srp_dev->mr_page_mask = ~((u64) srp_dev->mr_page_size - 1);
|
|
|
+ max_pages_per_mr = dev_attr->max_mr_size;
|
|
|
+ do_div(max_pages_per_mr, srp_dev->mr_page_size);
|
|
|
+ srp_dev->max_pages_per_mr = min_t(u64, SRP_MAX_PAGES_PER_MR,
|
|
|
+ max_pages_per_mr);
|
|
|
+ srp_dev->mr_max_size = srp_dev->mr_page_size *
|
|
|
+ srp_dev->max_pages_per_mr;
|
|
|
+ pr_debug("%s: mr_page_shift = %d, dev_attr->max_mr_size = %#llx, max_pages_per_mr = %d, mr_max_size = %#x\n",
|
|
|
+ device->name, mr_page_shift, dev_attr->max_mr_size,
|
|
|
+ srp_dev->max_pages_per_mr, srp_dev->mr_max_size);
|
|
|
|
|
|
INIT_LIST_HEAD(&srp_dev->dev_list);
|
|
|
|