|
@@ -23,10 +23,16 @@
|
|
|
#include <linux/swap.h>
|
|
|
#include <linux/slab.h>
|
|
|
#include <linux/sched.h>
|
|
|
+#include <linux/mmzone.h>
|
|
|
+#include <linux/pagemap.h>
|
|
|
#include <linux/swapops.h>
|
|
|
#include <linux/hugetlb.h>
|
|
|
+#include <linux/memremap.h>
|
|
|
#include <linux/jump_label.h>
|
|
|
#include <linux/mmu_notifier.h>
|
|
|
+#include <linux/memory_hotplug.h>
|
|
|
+
|
|
|
+#define PA_SECTION_SIZE (1UL << PA_SECTION_SHIFT)
|
|
|
|
|
|
|
|
|
/*
|
|
@@ -426,7 +432,15 @@ again:
|
|
|
* This is a special swap entry, ignore migration, use
|
|
|
* device and report anything else as error.
|
|
|
*/
|
|
|
- if (is_migration_entry(entry)) {
|
|
|
+ if (is_device_private_entry(entry)) {
|
|
|
+ pfns[i] = hmm_pfn_t_from_pfn(swp_offset(entry));
|
|
|
+ if (is_write_device_private_entry(entry)) {
|
|
|
+ pfns[i] |= HMM_PFN_WRITE;
|
|
|
+ } else if (write_fault)
|
|
|
+ goto fault;
|
|
|
+ pfns[i] |= HMM_PFN_DEVICE_UNADDRESSABLE;
|
|
|
+ pfns[i] |= flag;
|
|
|
+ } else if (is_migration_entry(entry)) {
|
|
|
if (hmm_vma_walk->fault) {
|
|
|
pte_unmap(ptep);
|
|
|
hmm_vma_walk->last = addr;
|
|
@@ -720,3 +734,366 @@ int hmm_vma_fault(struct vm_area_struct *vma,
|
|
|
}
|
|
|
EXPORT_SYMBOL(hmm_vma_fault);
|
|
|
#endif /* IS_ENABLED(CONFIG_HMM_MIRROR) */
|
|
|
+
|
|
|
+
|
|
|
+#if IS_ENABLED(CONFIG_DEVICE_PRIVATE)
|
|
|
+struct page *hmm_vma_alloc_locked_page(struct vm_area_struct *vma,
|
|
|
+ unsigned long addr)
|
|
|
+{
|
|
|
+ struct page *page;
|
|
|
+
|
|
|
+ page = alloc_page_vma(GFP_HIGHUSER, vma, addr);
|
|
|
+ if (!page)
|
|
|
+ return NULL;
|
|
|
+ lock_page(page);
|
|
|
+ return page;
|
|
|
+}
|
|
|
+EXPORT_SYMBOL(hmm_vma_alloc_locked_page);
|
|
|
+
|
|
|
+
|
|
|
+static void hmm_devmem_ref_release(struct percpu_ref *ref)
|
|
|
+{
|
|
|
+ struct hmm_devmem *devmem;
|
|
|
+
|
|
|
+ devmem = container_of(ref, struct hmm_devmem, ref);
|
|
|
+ complete(&devmem->completion);
|
|
|
+}
|
|
|
+
|
|
|
+static void hmm_devmem_ref_exit(void *data)
|
|
|
+{
|
|
|
+ struct percpu_ref *ref = data;
|
|
|
+ struct hmm_devmem *devmem;
|
|
|
+
|
|
|
+ devmem = container_of(ref, struct hmm_devmem, ref);
|
|
|
+ percpu_ref_exit(ref);
|
|
|
+ devm_remove_action(devmem->device, &hmm_devmem_ref_exit, data);
|
|
|
+}
|
|
|
+
|
|
|
+static void hmm_devmem_ref_kill(void *data)
|
|
|
+{
|
|
|
+ struct percpu_ref *ref = data;
|
|
|
+ struct hmm_devmem *devmem;
|
|
|
+
|
|
|
+ devmem = container_of(ref, struct hmm_devmem, ref);
|
|
|
+ percpu_ref_kill(ref);
|
|
|
+ wait_for_completion(&devmem->completion);
|
|
|
+ devm_remove_action(devmem->device, &hmm_devmem_ref_kill, data);
|
|
|
+}
|
|
|
+
|
|
|
+static int hmm_devmem_fault(struct vm_area_struct *vma,
|
|
|
+ unsigned long addr,
|
|
|
+ const struct page *page,
|
|
|
+ unsigned int flags,
|
|
|
+ pmd_t *pmdp)
|
|
|
+{
|
|
|
+ struct hmm_devmem *devmem = page->pgmap->data;
|
|
|
+
|
|
|
+ return devmem->ops->fault(devmem, vma, addr, page, flags, pmdp);
|
|
|
+}
|
|
|
+
|
|
|
+static void hmm_devmem_free(struct page *page, void *data)
|
|
|
+{
|
|
|
+ struct hmm_devmem *devmem = data;
|
|
|
+
|
|
|
+ devmem->ops->free(devmem, page);
|
|
|
+}
|
|
|
+
|
|
|
+static DEFINE_MUTEX(hmm_devmem_lock);
|
|
|
+static RADIX_TREE(hmm_devmem_radix, GFP_KERNEL);
|
|
|
+
|
|
|
+static void hmm_devmem_radix_release(struct resource *resource)
|
|
|
+{
|
|
|
+ resource_size_t key, align_start, align_size, align_end;
|
|
|
+
|
|
|
+ align_start = resource->start & ~(PA_SECTION_SIZE - 1);
|
|
|
+ align_size = ALIGN(resource_size(resource), PA_SECTION_SIZE);
|
|
|
+ align_end = align_start + align_size - 1;
|
|
|
+
|
|
|
+ mutex_lock(&hmm_devmem_lock);
|
|
|
+ for (key = resource->start;
|
|
|
+ key <= resource->end;
|
|
|
+ key += PA_SECTION_SIZE)
|
|
|
+ radix_tree_delete(&hmm_devmem_radix, key >> PA_SECTION_SHIFT);
|
|
|
+ mutex_unlock(&hmm_devmem_lock);
|
|
|
+}
|
|
|
+
|
|
|
+static void hmm_devmem_release(struct device *dev, void *data)
|
|
|
+{
|
|
|
+ struct hmm_devmem *devmem = data;
|
|
|
+ struct resource *resource = devmem->resource;
|
|
|
+ unsigned long start_pfn, npages;
|
|
|
+ struct zone *zone;
|
|
|
+ struct page *page;
|
|
|
+
|
|
|
+ if (percpu_ref_tryget_live(&devmem->ref)) {
|
|
|
+ dev_WARN(dev, "%s: page mapping is still live!\n", __func__);
|
|
|
+ percpu_ref_put(&devmem->ref);
|
|
|
+ }
|
|
|
+
|
|
|
+ /* pages are dead and unused, undo the arch mapping */
|
|
|
+ start_pfn = (resource->start & ~(PA_SECTION_SIZE - 1)) >> PAGE_SHIFT;
|
|
|
+ npages = ALIGN(resource_size(resource), PA_SECTION_SIZE) >> PAGE_SHIFT;
|
|
|
+
|
|
|
+ page = pfn_to_page(start_pfn);
|
|
|
+ zone = page_zone(page);
|
|
|
+
|
|
|
+ mem_hotplug_begin();
|
|
|
+ __remove_pages(zone, start_pfn, npages);
|
|
|
+ mem_hotplug_done();
|
|
|
+
|
|
|
+ hmm_devmem_radix_release(resource);
|
|
|
+}
|
|
|
+
|
|
|
+static struct hmm_devmem *hmm_devmem_find(resource_size_t phys)
|
|
|
+{
|
|
|
+ WARN_ON_ONCE(!rcu_read_lock_held());
|
|
|
+
|
|
|
+ return radix_tree_lookup(&hmm_devmem_radix, phys >> PA_SECTION_SHIFT);
|
|
|
+}
|
|
|
+
|
|
|
+static int hmm_devmem_pages_create(struct hmm_devmem *devmem)
|
|
|
+{
|
|
|
+ resource_size_t key, align_start, align_size, align_end;
|
|
|
+ struct device *device = devmem->device;
|
|
|
+ int ret, nid, is_ram;
|
|
|
+ unsigned long pfn;
|
|
|
+
|
|
|
+ align_start = devmem->resource->start & ~(PA_SECTION_SIZE - 1);
|
|
|
+ align_size = ALIGN(devmem->resource->start +
|
|
|
+ resource_size(devmem->resource),
|
|
|
+ PA_SECTION_SIZE) - align_start;
|
|
|
+
|
|
|
+ is_ram = region_intersects(align_start, align_size,
|
|
|
+ IORESOURCE_SYSTEM_RAM,
|
|
|
+ IORES_DESC_NONE);
|
|
|
+ if (is_ram == REGION_MIXED) {
|
|
|
+ WARN_ONCE(1, "%s attempted on mixed region %pr\n",
|
|
|
+ __func__, devmem->resource);
|
|
|
+ return -ENXIO;
|
|
|
+ }
|
|
|
+ if (is_ram == REGION_INTERSECTS)
|
|
|
+ return -ENXIO;
|
|
|
+
|
|
|
+ devmem->pagemap.type = MEMORY_DEVICE_PRIVATE;
|
|
|
+ devmem->pagemap.res = devmem->resource;
|
|
|
+ devmem->pagemap.page_fault = hmm_devmem_fault;
|
|
|
+ devmem->pagemap.page_free = hmm_devmem_free;
|
|
|
+ devmem->pagemap.dev = devmem->device;
|
|
|
+ devmem->pagemap.ref = &devmem->ref;
|
|
|
+ devmem->pagemap.data = devmem;
|
|
|
+
|
|
|
+ mutex_lock(&hmm_devmem_lock);
|
|
|
+ align_end = align_start + align_size - 1;
|
|
|
+ for (key = align_start; key <= align_end; key += PA_SECTION_SIZE) {
|
|
|
+ struct hmm_devmem *dup;
|
|
|
+
|
|
|
+ rcu_read_lock();
|
|
|
+ dup = hmm_devmem_find(key);
|
|
|
+ rcu_read_unlock();
|
|
|
+ if (dup) {
|
|
|
+ dev_err(device, "%s: collides with mapping for %s\n",
|
|
|
+ __func__, dev_name(dup->device));
|
|
|
+ mutex_unlock(&hmm_devmem_lock);
|
|
|
+ ret = -EBUSY;
|
|
|
+ goto error;
|
|
|
+ }
|
|
|
+ ret = radix_tree_insert(&hmm_devmem_radix,
|
|
|
+ key >> PA_SECTION_SHIFT,
|
|
|
+ devmem);
|
|
|
+ if (ret) {
|
|
|
+ dev_err(device, "%s: failed: %d\n", __func__, ret);
|
|
|
+ mutex_unlock(&hmm_devmem_lock);
|
|
|
+ goto error_radix;
|
|
|
+ }
|
|
|
+ }
|
|
|
+ mutex_unlock(&hmm_devmem_lock);
|
|
|
+
|
|
|
+ nid = dev_to_node(device);
|
|
|
+ if (nid < 0)
|
|
|
+ nid = numa_mem_id();
|
|
|
+
|
|
|
+ mem_hotplug_begin();
|
|
|
+ /*
|
|
|
+ * For device private memory we call add_pages() as we only need to
|
|
|
+ * allocate and initialize struct page for the device memory. More-
|
|
|
+ * over the device memory is un-accessible thus we do not want to
|
|
|
+ * create a linear mapping for the memory like arch_add_memory()
|
|
|
+ * would do.
|
|
|
+ */
|
|
|
+ ret = add_pages(nid, align_start >> PAGE_SHIFT,
|
|
|
+ align_size >> PAGE_SHIFT, false);
|
|
|
+ if (ret) {
|
|
|
+ mem_hotplug_done();
|
|
|
+ goto error_add_memory;
|
|
|
+ }
|
|
|
+ move_pfn_range_to_zone(&NODE_DATA(nid)->node_zones[ZONE_DEVICE],
|
|
|
+ align_start >> PAGE_SHIFT,
|
|
|
+ align_size >> PAGE_SHIFT);
|
|
|
+ mem_hotplug_done();
|
|
|
+
|
|
|
+ for (pfn = devmem->pfn_first; pfn < devmem->pfn_last; pfn++) {
|
|
|
+ struct page *page = pfn_to_page(pfn);
|
|
|
+
|
|
|
+ page->pgmap = &devmem->pagemap;
|
|
|
+ }
|
|
|
+ return 0;
|
|
|
+
|
|
|
+error_add_memory:
|
|
|
+ untrack_pfn(NULL, PHYS_PFN(align_start), align_size);
|
|
|
+error_radix:
|
|
|
+ hmm_devmem_radix_release(devmem->resource);
|
|
|
+error:
|
|
|
+ return ret;
|
|
|
+}
|
|
|
+
|
|
|
+static int hmm_devmem_match(struct device *dev, void *data, void *match_data)
|
|
|
+{
|
|
|
+ struct hmm_devmem *devmem = data;
|
|
|
+
|
|
|
+ return devmem->resource == match_data;
|
|
|
+}
|
|
|
+
|
|
|
+static void hmm_devmem_pages_remove(struct hmm_devmem *devmem)
|
|
|
+{
|
|
|
+ devres_release(devmem->device, &hmm_devmem_release,
|
|
|
+ &hmm_devmem_match, devmem->resource);
|
|
|
+}
|
|
|
+
|
|
|
+/*
|
|
|
+ * hmm_devmem_add() - hotplug ZONE_DEVICE memory for device memory
|
|
|
+ *
|
|
|
+ * @ops: memory event device driver callback (see struct hmm_devmem_ops)
|
|
|
+ * @device: device struct to bind the resource too
|
|
|
+ * @size: size in bytes of the device memory to add
|
|
|
+ * Returns: pointer to new hmm_devmem struct ERR_PTR otherwise
|
|
|
+ *
|
|
|
+ * This function first finds an empty range of physical address big enough to
|
|
|
+ * contain the new resource, and then hotplugs it as ZONE_DEVICE memory, which
|
|
|
+ * in turn allocates struct pages. It does not do anything beyond that; all
|
|
|
+ * events affecting the memory will go through the various callbacks provided
|
|
|
+ * by hmm_devmem_ops struct.
|
|
|
+ *
|
|
|
+ * Device driver should call this function during device initialization and
|
|
|
+ * is then responsible of memory management. HMM only provides helpers.
|
|
|
+ */
|
|
|
+struct hmm_devmem *hmm_devmem_add(const struct hmm_devmem_ops *ops,
|
|
|
+ struct device *device,
|
|
|
+ unsigned long size)
|
|
|
+{
|
|
|
+ struct hmm_devmem *devmem;
|
|
|
+ resource_size_t addr;
|
|
|
+ int ret;
|
|
|
+
|
|
|
+ static_branch_enable(&device_private_key);
|
|
|
+
|
|
|
+ devmem = devres_alloc_node(&hmm_devmem_release, sizeof(*devmem),
|
|
|
+ GFP_KERNEL, dev_to_node(device));
|
|
|
+ if (!devmem)
|
|
|
+ return ERR_PTR(-ENOMEM);
|
|
|
+
|
|
|
+ init_completion(&devmem->completion);
|
|
|
+ devmem->pfn_first = -1UL;
|
|
|
+ devmem->pfn_last = -1UL;
|
|
|
+ devmem->resource = NULL;
|
|
|
+ devmem->device = device;
|
|
|
+ devmem->ops = ops;
|
|
|
+
|
|
|
+ ret = percpu_ref_init(&devmem->ref, &hmm_devmem_ref_release,
|
|
|
+ 0, GFP_KERNEL);
|
|
|
+ if (ret)
|
|
|
+ goto error_percpu_ref;
|
|
|
+
|
|
|
+ ret = devm_add_action(device, hmm_devmem_ref_exit, &devmem->ref);
|
|
|
+ if (ret)
|
|
|
+ goto error_devm_add_action;
|
|
|
+
|
|
|
+ size = ALIGN(size, PA_SECTION_SIZE);
|
|
|
+ addr = min((unsigned long)iomem_resource.end,
|
|
|
+ (1UL << MAX_PHYSMEM_BITS) - 1);
|
|
|
+ addr = addr - size + 1UL;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * FIXME add a new helper to quickly walk resource tree and find free
|
|
|
+ * range
|
|
|
+ *
|
|
|
+ * FIXME what about ioport_resource resource ?
|
|
|
+ */
|
|
|
+ for (; addr > size && addr >= iomem_resource.start; addr -= size) {
|
|
|
+ ret = region_intersects(addr, size, 0, IORES_DESC_NONE);
|
|
|
+ if (ret != REGION_DISJOINT)
|
|
|
+ continue;
|
|
|
+
|
|
|
+ devmem->resource = devm_request_mem_region(device, addr, size,
|
|
|
+ dev_name(device));
|
|
|
+ if (!devmem->resource) {
|
|
|
+ ret = -ENOMEM;
|
|
|
+ goto error_no_resource;
|
|
|
+ }
|
|
|
+ break;
|
|
|
+ }
|
|
|
+ if (!devmem->resource) {
|
|
|
+ ret = -ERANGE;
|
|
|
+ goto error_no_resource;
|
|
|
+ }
|
|
|
+
|
|
|
+ devmem->resource->desc = IORES_DESC_DEVICE_PRIVATE_MEMORY;
|
|
|
+ devmem->pfn_first = devmem->resource->start >> PAGE_SHIFT;
|
|
|
+ devmem->pfn_last = devmem->pfn_first +
|
|
|
+ (resource_size(devmem->resource) >> PAGE_SHIFT);
|
|
|
+
|
|
|
+ ret = hmm_devmem_pages_create(devmem);
|
|
|
+ if (ret)
|
|
|
+ goto error_pages;
|
|
|
+
|
|
|
+ devres_add(device, devmem);
|
|
|
+
|
|
|
+ ret = devm_add_action(device, hmm_devmem_ref_kill, &devmem->ref);
|
|
|
+ if (ret) {
|
|
|
+ hmm_devmem_remove(devmem);
|
|
|
+ return ERR_PTR(ret);
|
|
|
+ }
|
|
|
+
|
|
|
+ return devmem;
|
|
|
+
|
|
|
+error_pages:
|
|
|
+ devm_release_mem_region(device, devmem->resource->start,
|
|
|
+ resource_size(devmem->resource));
|
|
|
+error_no_resource:
|
|
|
+error_devm_add_action:
|
|
|
+ hmm_devmem_ref_kill(&devmem->ref);
|
|
|
+ hmm_devmem_ref_exit(&devmem->ref);
|
|
|
+error_percpu_ref:
|
|
|
+ devres_free(devmem);
|
|
|
+ return ERR_PTR(ret);
|
|
|
+}
|
|
|
+EXPORT_SYMBOL(hmm_devmem_add);
|
|
|
+
|
|
|
+/*
|
|
|
+ * hmm_devmem_remove() - remove device memory (kill and free ZONE_DEVICE)
|
|
|
+ *
|
|
|
+ * @devmem: hmm_devmem struct use to track and manage the ZONE_DEVICE memory
|
|
|
+ *
|
|
|
+ * This will hot-unplug memory that was hotplugged by hmm_devmem_add on behalf
|
|
|
+ * of the device driver. It will free struct page and remove the resource that
|
|
|
+ * reserved the physical address range for this device memory.
|
|
|
+ */
|
|
|
+void hmm_devmem_remove(struct hmm_devmem *devmem)
|
|
|
+{
|
|
|
+ resource_size_t start, size;
|
|
|
+ struct device *device;
|
|
|
+
|
|
|
+ if (!devmem)
|
|
|
+ return;
|
|
|
+
|
|
|
+ device = devmem->device;
|
|
|
+ start = devmem->resource->start;
|
|
|
+ size = resource_size(devmem->resource);
|
|
|
+
|
|
|
+ hmm_devmem_ref_kill(&devmem->ref);
|
|
|
+ hmm_devmem_ref_exit(&devmem->ref);
|
|
|
+ hmm_devmem_pages_remove(devmem);
|
|
|
+
|
|
|
+ devm_release_mem_region(device, start, size);
|
|
|
+}
|
|
|
+EXPORT_SYMBOL(hmm_devmem_remove);
|
|
|
+#endif /* IS_ENABLED(CONFIG_DEVICE_PRIVATE) */
|