|
@@ -28,6 +28,7 @@
|
|
#include <linux/pfn_t.h>
|
|
#include <linux/pfn_t.h>
|
|
#include <linux/slab.h>
|
|
#include <linux/slab.h>
|
|
#include <linux/pmem.h>
|
|
#include <linux/pmem.h>
|
|
|
|
+#include <linux/dax.h>
|
|
#include <linux/nd.h>
|
|
#include <linux/nd.h>
|
|
#include "pmem.h"
|
|
#include "pmem.h"
|
|
#include "pfn.h"
|
|
#include "pfn.h"
|
|
@@ -199,13 +200,13 @@ static int pmem_rw_page(struct block_device *bdev, sector_t sector,
|
|
}
|
|
}
|
|
|
|
|
|
/* see "strong" declaration in tools/testing/nvdimm/pmem-dax.c */
|
|
/* see "strong" declaration in tools/testing/nvdimm/pmem-dax.c */
|
|
-__weak long pmem_direct_access(struct block_device *bdev, sector_t sector,
|
|
|
|
- void **kaddr, pfn_t *pfn, long size)
|
|
|
|
|
|
+__weak long __pmem_direct_access(struct pmem_device *pmem, pgoff_t pgoff,
|
|
|
|
+ long nr_pages, void **kaddr, pfn_t *pfn)
|
|
{
|
|
{
|
|
- struct pmem_device *pmem = bdev->bd_queue->queuedata;
|
|
|
|
- resource_size_t offset = sector * 512 + pmem->data_offset;
|
|
|
|
|
|
+ resource_size_t offset = PFN_PHYS(pgoff) + pmem->data_offset;
|
|
|
|
|
|
- if (unlikely(is_bad_pmem(&pmem->bb, sector, size)))
|
|
|
|
|
|
+ if (unlikely(is_bad_pmem(&pmem->bb, PFN_PHYS(pgoff) / 512,
|
|
|
|
+ PFN_PHYS(nr_pages))))
|
|
return -EIO;
|
|
return -EIO;
|
|
*kaddr = pmem->virt_addr + offset;
|
|
*kaddr = pmem->virt_addr + offset;
|
|
*pfn = phys_to_pfn_t(pmem->phys_addr + offset, pmem->pfn_flags);
|
|
*pfn = phys_to_pfn_t(pmem->phys_addr + offset, pmem->pfn_flags);
|
|
@@ -215,26 +216,51 @@ __weak long pmem_direct_access(struct block_device *bdev, sector_t sector,
|
|
* requested range.
|
|
* requested range.
|
|
*/
|
|
*/
|
|
if (unlikely(pmem->bb.count))
|
|
if (unlikely(pmem->bb.count))
|
|
- return size;
|
|
|
|
- return pmem->size - pmem->pfn_pad - offset;
|
|
|
|
|
|
+ return nr_pages;
|
|
|
|
+ return PHYS_PFN(pmem->size - pmem->pfn_pad - offset);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static long pmem_blk_direct_access(struct block_device *bdev, sector_t sector,
|
|
|
|
+ void **kaddr, pfn_t *pfn, long size)
|
|
|
|
+{
|
|
|
|
+ struct pmem_device *pmem = bdev->bd_queue->queuedata;
|
|
|
|
+
|
|
|
|
+ return __pmem_direct_access(pmem, PHYS_PFN(sector * 512),
|
|
|
|
+ PHYS_PFN(size), kaddr, pfn);
|
|
}
|
|
}
|
|
|
|
|
|
static const struct block_device_operations pmem_fops = {
|
|
static const struct block_device_operations pmem_fops = {
|
|
.owner = THIS_MODULE,
|
|
.owner = THIS_MODULE,
|
|
.rw_page = pmem_rw_page,
|
|
.rw_page = pmem_rw_page,
|
|
- .direct_access = pmem_direct_access,
|
|
|
|
|
|
+ .direct_access = pmem_blk_direct_access,
|
|
.revalidate_disk = nvdimm_revalidate_disk,
|
|
.revalidate_disk = nvdimm_revalidate_disk,
|
|
};
|
|
};
|
|
|
|
|
|
|
|
+static long pmem_dax_direct_access(struct dax_device *dax_dev,
|
|
|
|
+ pgoff_t pgoff, long nr_pages, void **kaddr, pfn_t *pfn)
|
|
|
|
+{
|
|
|
|
+ struct pmem_device *pmem = dax_get_private(dax_dev);
|
|
|
|
+
|
|
|
|
+ return __pmem_direct_access(pmem, pgoff, nr_pages, kaddr, pfn);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static const struct dax_operations pmem_dax_ops = {
|
|
|
|
+ .direct_access = pmem_dax_direct_access,
|
|
|
|
+};
|
|
|
|
+
|
|
static void pmem_release_queue(void *q)
|
|
static void pmem_release_queue(void *q)
|
|
{
|
|
{
|
|
blk_cleanup_queue(q);
|
|
blk_cleanup_queue(q);
|
|
}
|
|
}
|
|
|
|
|
|
-static void pmem_release_disk(void *disk)
|
|
|
|
|
|
+static void pmem_release_disk(void *__pmem)
|
|
{
|
|
{
|
|
- del_gendisk(disk);
|
|
|
|
- put_disk(disk);
|
|
|
|
|
|
+ struct pmem_device *pmem = __pmem;
|
|
|
|
+
|
|
|
|
+ kill_dax(pmem->dax_dev);
|
|
|
|
+ put_dax(pmem->dax_dev);
|
|
|
|
+ del_gendisk(pmem->disk);
|
|
|
|
+ put_disk(pmem->disk);
|
|
}
|
|
}
|
|
|
|
|
|
static int pmem_attach_disk(struct device *dev,
|
|
static int pmem_attach_disk(struct device *dev,
|
|
@@ -245,6 +271,7 @@ static int pmem_attach_disk(struct device *dev,
|
|
struct vmem_altmap __altmap, *altmap = NULL;
|
|
struct vmem_altmap __altmap, *altmap = NULL;
|
|
struct resource *res = &nsio->res;
|
|
struct resource *res = &nsio->res;
|
|
struct nd_pfn *nd_pfn = NULL;
|
|
struct nd_pfn *nd_pfn = NULL;
|
|
|
|
+ struct dax_device *dax_dev;
|
|
int nid = dev_to_node(dev);
|
|
int nid = dev_to_node(dev);
|
|
struct nd_pfn_sb *pfn_sb;
|
|
struct nd_pfn_sb *pfn_sb;
|
|
struct pmem_device *pmem;
|
|
struct pmem_device *pmem;
|
|
@@ -325,6 +352,7 @@ static int pmem_attach_disk(struct device *dev,
|
|
disk = alloc_disk_node(0, nid);
|
|
disk = alloc_disk_node(0, nid);
|
|
if (!disk)
|
|
if (!disk)
|
|
return -ENOMEM;
|
|
return -ENOMEM;
|
|
|
|
+ pmem->disk = disk;
|
|
|
|
|
|
disk->fops = &pmem_fops;
|
|
disk->fops = &pmem_fops;
|
|
disk->queue = q;
|
|
disk->queue = q;
|
|
@@ -336,9 +364,16 @@ static int pmem_attach_disk(struct device *dev,
|
|
return -ENOMEM;
|
|
return -ENOMEM;
|
|
nvdimm_badblocks_populate(nd_region, &pmem->bb, res);
|
|
nvdimm_badblocks_populate(nd_region, &pmem->bb, res);
|
|
disk->bb = &pmem->bb;
|
|
disk->bb = &pmem->bb;
|
|
- device_add_disk(dev, disk);
|
|
|
|
|
|
|
|
- if (devm_add_action_or_reset(dev, pmem_release_disk, disk))
|
|
|
|
|
|
+ dax_dev = alloc_dax(pmem, disk->disk_name, &pmem_dax_ops);
|
|
|
|
+ if (!dax_dev) {
|
|
|
|
+ put_disk(disk);
|
|
|
|
+ return -ENOMEM;
|
|
|
|
+ }
|
|
|
|
+ pmem->dax_dev = dax_dev;
|
|
|
|
+
|
|
|
|
+ device_add_disk(dev, disk);
|
|
|
|
+ if (devm_add_action_or_reset(dev, pmem_release_disk, pmem))
|
|
return -ENOMEM;
|
|
return -ENOMEM;
|
|
|
|
|
|
revalidate_disk(disk);
|
|
revalidate_disk(disk);
|