|
@@ -80,22 +80,40 @@ static blk_status_t pmem_clear_poison(struct pmem_device *pmem,
|
|
|
static void write_pmem(void *pmem_addr, struct page *page,
|
|
|
unsigned int off, unsigned int len)
|
|
|
{
|
|
|
- void *mem = kmap_atomic(page);
|
|
|
-
|
|
|
- memcpy_flushcache(pmem_addr, mem + off, len);
|
|
|
- kunmap_atomic(mem);
|
|
|
+ unsigned int chunk;
|
|
|
+ void *mem;
|
|
|
+
|
|
|
+ while (len) {
|
|
|
+ mem = kmap_atomic(page);
|
|
|
+ chunk = min_t(unsigned int, len, PAGE_SIZE);
|
|
|
+ memcpy_flushcache(pmem_addr, mem + off, chunk);
|
|
|
+ kunmap_atomic(mem);
|
|
|
+ len -= chunk;
|
|
|
+ off = 0;
|
|
|
+ page++;
|
|
|
+ pmem_addr += PAGE_SIZE;
|
|
|
+ }
|
|
|
}
|
|
|
|
|
|
static blk_status_t read_pmem(struct page *page, unsigned int off,
|
|
|
void *pmem_addr, unsigned int len)
|
|
|
{
|
|
|
+ unsigned int chunk;
|
|
|
int rc;
|
|
|
- void *mem = kmap_atomic(page);
|
|
|
-
|
|
|
- rc = memcpy_mcsafe(mem + off, pmem_addr, len);
|
|
|
- kunmap_atomic(mem);
|
|
|
- if (rc)
|
|
|
- return BLK_STS_IOERR;
|
|
|
+ void *mem;
|
|
|
+
|
|
|
+ while (len) {
|
|
|
+ mem = kmap_atomic(page);
|
|
|
+ chunk = min_t(unsigned int, len, PAGE_SIZE);
|
|
|
+ rc = memcpy_mcsafe(mem + off, pmem_addr, chunk);
|
|
|
+ kunmap_atomic(mem);
|
|
|
+ if (rc)
|
|
|
+ return BLK_STS_IOERR;
|
|
|
+ len -= chunk;
|
|
|
+ off = 0;
|
|
|
+ page++;
|
|
|
+ pmem_addr += PAGE_SIZE;
|
|
|
+ }
|
|
|
return BLK_STS_OK;
|
|
|
}
|
|
|
|
|
@@ -188,7 +206,8 @@ static int pmem_rw_page(struct block_device *bdev, sector_t sector,
|
|
|
struct pmem_device *pmem = bdev->bd_queue->queuedata;
|
|
|
blk_status_t rc;
|
|
|
|
|
|
- rc = pmem_do_bvec(pmem, page, PAGE_SIZE, 0, is_write, sector);
|
|
|
+ rc = pmem_do_bvec(pmem, page, hpage_nr_pages(page) * PAGE_SIZE,
|
|
|
+ 0, is_write, sector);
|
|
|
|
|
|
/*
|
|
|
* The ->rw_page interface is subtle and tricky. The core
|