|
@@ -1612,18 +1612,16 @@ static void nvme_free_host_mem(struct nvme_dev *dev)
|
|
dev->host_mem_descs = NULL;
|
|
dev->host_mem_descs = NULL;
|
|
}
|
|
}
|
|
|
|
|
|
-static int nvme_alloc_host_mem(struct nvme_dev *dev, u64 min, u64 preferred)
|
|
|
|
|
|
+static int __nvme_alloc_host_mem(struct nvme_dev *dev, u64 preferred,
|
|
|
|
+ u32 chunk_size)
|
|
{
|
|
{
|
|
struct nvme_host_mem_buf_desc *descs;
|
|
struct nvme_host_mem_buf_desc *descs;
|
|
- u32 chunk_size, max_entries, len;
|
|
|
|
|
|
+ u32 max_entries, len;
|
|
dma_addr_t descs_dma;
|
|
dma_addr_t descs_dma;
|
|
int i = 0;
|
|
int i = 0;
|
|
void **bufs;
|
|
void **bufs;
|
|
u64 size = 0, tmp;
|
|
u64 size = 0, tmp;
|
|
|
|
|
|
- /* start big and work our way down */
|
|
|
|
- chunk_size = min(preferred, (u64)PAGE_SIZE << MAX_ORDER);
|
|
|
|
-retry:
|
|
|
|
tmp = (preferred + chunk_size - 1);
|
|
tmp = (preferred + chunk_size - 1);
|
|
do_div(tmp, chunk_size);
|
|
do_div(tmp, chunk_size);
|
|
max_entries = tmp;
|
|
max_entries = tmp;
|
|
@@ -1650,15 +1648,9 @@ retry:
|
|
i++;
|
|
i++;
|
|
}
|
|
}
|
|
|
|
|
|
- if (!size || (min && size < min)) {
|
|
|
|
- dev_warn(dev->ctrl.device,
|
|
|
|
- "failed to allocate host memory buffer.\n");
|
|
|
|
|
|
+ if (!size)
|
|
goto out_free_bufs;
|
|
goto out_free_bufs;
|
|
- }
|
|
|
|
|
|
|
|
- dev_info(dev->ctrl.device,
|
|
|
|
- "allocated %lld MiB host memory buffer.\n",
|
|
|
|
- size >> ilog2(SZ_1M));
|
|
|
|
dev->nr_host_mem_descs = i;
|
|
dev->nr_host_mem_descs = i;
|
|
dev->host_mem_size = size;
|
|
dev->host_mem_size = size;
|
|
dev->host_mem_descs = descs;
|
|
dev->host_mem_descs = descs;
|
|
@@ -1679,15 +1671,28 @@ out_free_descs:
|
|
dma_free_coherent(dev->dev, max_entries * sizeof(*descs), descs,
|
|
dma_free_coherent(dev->dev, max_entries * sizeof(*descs), descs,
|
|
descs_dma);
|
|
descs_dma);
|
|
out:
|
|
out:
|
|
- /* try a smaller chunk size if we failed early */
|
|
|
|
- if (chunk_size >= PAGE_SIZE * 2 && (i == 0 || size < min)) {
|
|
|
|
- chunk_size /= 2;
|
|
|
|
- goto retry;
|
|
|
|
- }
|
|
|
|
dev->host_mem_descs = NULL;
|
|
dev->host_mem_descs = NULL;
|
|
return -ENOMEM;
|
|
return -ENOMEM;
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+static int nvme_alloc_host_mem(struct nvme_dev *dev, u64 min, u64 preferred)
|
|
|
|
+{
|
|
|
|
+ u32 chunk_size;
|
|
|
|
+
|
|
|
|
+ /* start big and work our way down */
|
|
|
|
+ for (chunk_size = min_t(u64, preferred, PAGE_SIZE << MAX_ORDER);
|
|
|
|
+ chunk_size >= PAGE_SIZE * 2;
|
|
|
|
+ chunk_size /= 2) {
|
|
|
|
+ if (!__nvme_alloc_host_mem(dev, preferred, chunk_size)) {
|
|
|
|
+ if (!min || dev->host_mem_size >= min)
|
|
|
|
+ return 0;
|
|
|
|
+ nvme_free_host_mem(dev);
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ return -ENOMEM;
|
|
|
|
+}
|
|
|
|
+
|
|
static void nvme_setup_host_mem(struct nvme_dev *dev)
|
|
static void nvme_setup_host_mem(struct nvme_dev *dev)
|
|
{
|
|
{
|
|
u64 max = (u64)max_host_mem_size_mb * SZ_1M;
|
|
u64 max = (u64)max_host_mem_size_mb * SZ_1M;
|
|
@@ -1715,8 +1720,15 @@ static void nvme_setup_host_mem(struct nvme_dev *dev)
|
|
}
|
|
}
|
|
|
|
|
|
if (!dev->host_mem_descs) {
|
|
if (!dev->host_mem_descs) {
|
|
- if (nvme_alloc_host_mem(dev, min, preferred))
|
|
|
|
|
|
+ if (nvme_alloc_host_mem(dev, min, preferred)) {
|
|
|
|
+ dev_warn(dev->ctrl.device,
|
|
|
|
+ "failed to allocate host memory buffer.\n");
|
|
return;
|
|
return;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ dev_info(dev->ctrl.device,
|
|
|
|
+ "allocated %lld MiB host memory buffer.\n",
|
|
|
|
+ dev->host_mem_size >> ilog2(SZ_1M));
|
|
}
|
|
}
|
|
|
|
|
|
if (nvme_set_host_mem(dev, enable_bits))
|
|
if (nvme_set_host_mem(dev, enable_bits))
|