|
|
@@ -122,14 +122,10 @@ static void devm_memremap_pages_release(void *data)
|
|
|
resource_size_t align_start, align_size;
|
|
|
unsigned long pfn;
|
|
|
|
|
|
+ pgmap->kill(pgmap->ref);
|
|
|
for_each_device_pfn(pfn, pgmap)
|
|
|
put_page(pfn_to_page(pfn));
|
|
|
|
|
|
- if (percpu_ref_tryget_live(pgmap->ref)) {
|
|
|
- dev_WARN(dev, "%s: page mapping is still live!\n", __func__);
|
|
|
- percpu_ref_put(pgmap->ref);
|
|
|
- }
|
|
|
-
|
|
|
/* pages are dead and unused, undo the arch mapping */
|
|
|
align_start = res->start & ~(SECTION_SIZE - 1);
|
|
|
align_size = ALIGN(res->start + resource_size(res), SECTION_SIZE)
|
|
|
@@ -150,7 +146,7 @@ static void devm_memremap_pages_release(void *data)
|
|
|
/**
|
|
|
* devm_memremap_pages - remap and provide memmap backing for the given resource
|
|
|
* @dev: hosting device for @res
|
|
|
- * @pgmap: pointer to a struct dev_pgmap
|
|
|
+ * @pgmap: pointer to a struct dev_pagemap
|
|
|
*
|
|
|
* Notes:
|
|
|
* 1/ At a minimum the res, ref and type members of @pgmap must be initialized
|
|
|
@@ -159,11 +155,8 @@ static void devm_memremap_pages_release(void *data)
|
|
|
* 2/ The altmap field may optionally be initialized, in which case altmap_valid
|
|
|
* must be set to true
|
|
|
*
|
|
|
- * 3/ pgmap.ref must be 'live' on entry and 'dead' before devm_memunmap_pages()
|
|
|
- * time (or devm release event). The expected order of events is that ref has
|
|
|
- * been through percpu_ref_kill() before devm_memremap_pages_release(). The
|
|
|
- * wait for the completion of all references being dropped and
|
|
|
- * percpu_ref_exit() must occur after devm_memremap_pages_release().
|
|
|
+ * 3/ pgmap->ref must be 'live' on entry and will be killed at
|
|
|
+ * devm_memremap_pages_release() time, or if this routine fails.
|
|
|
*
|
|
|
* 4/ res is expected to be a host memory range that could feasibly be
|
|
|
* treated as a "System RAM" range, i.e. not a device mmio range, but
|
|
|
@@ -180,6 +173,9 @@ void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap)
|
|
|
int error, nid, is_ram;
|
|
|
struct dev_pagemap *conflict_pgmap;
|
|
|
|
|
|
+ if (!pgmap->ref || !pgmap->kill)
|
|
|
+ return ERR_PTR(-EINVAL);
|
|
|
+
|
|
|
align_start = res->start & ~(SECTION_SIZE - 1);
|
|
|
align_size = ALIGN(res->start + resource_size(res), SECTION_SIZE)
|
|
|
- align_start;
|
|
|
@@ -205,12 +201,10 @@ void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap)
|
|
|
if (is_ram != REGION_DISJOINT) {
|
|
|
WARN_ONCE(1, "%s attempted on %s region %pr\n", __func__,
|
|
|
is_ram == REGION_MIXED ? "mixed" : "ram", res);
|
|
|
- return ERR_PTR(-ENXIO);
|
|
|
+ error = -ENXIO;
|
|
|
+ goto err_array;
|
|
|
}
|
|
|
|
|
|
- if (!pgmap->ref)
|
|
|
- return ERR_PTR(-EINVAL);
|
|
|
-
|
|
|
pgmap->dev = dev;
|
|
|
|
|
|
mutex_lock(&pgmap_lock);
|
|
|
@@ -267,7 +261,10 @@ void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap)
|
|
|
percpu_ref_get(pgmap->ref);
|
|
|
}
|
|
|
|
|
|
- devm_add_action(dev, devm_memremap_pages_release, pgmap);
|
|
|
+ error = devm_add_action_or_reset(dev, devm_memremap_pages_release,
|
|
|
+ pgmap);
|
|
|
+ if (error)
|
|
|
+ return ERR_PTR(error);
|
|
|
|
|
|
return __va(res->start);
|
|
|
|
|
|
@@ -278,6 +275,8 @@ void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap)
|
|
|
err_pfn_remap:
|
|
|
err_radix:
|
|
|
pgmap_radix_release(res, pgoff);
|
|
|
+ err_array:
|
|
|
+ pgmap->kill(pgmap->ref);
|
|
|
return ERR_PTR(error);
|
|
|
}
|
|
|
EXPORT_SYMBOL_GPL(devm_memremap_pages);
|