|
@@ -113,6 +113,11 @@ static int pmem_do_bvec(struct pmem_device *pmem, struct page *page,
|
|
|
return rc;
|
|
|
}
|
|
|
|
|
|
+/* account for REQ_FLUSH rename, replace with REQ_PREFLUSH after v4.8-rc1 */
|
|
|
+#ifndef REQ_FLUSH
|
|
|
+#define REQ_FLUSH REQ_PREFLUSH
|
|
|
+#endif
|
|
|
+
|
|
|
static blk_qc_t pmem_make_request(struct request_queue *q, struct bio *bio)
|
|
|
{
|
|
|
int rc = 0;
|
|
@@ -121,6 +126,10 @@ static blk_qc_t pmem_make_request(struct request_queue *q, struct bio *bio)
|
|
|
struct bio_vec bvec;
|
|
|
struct bvec_iter iter;
|
|
|
struct pmem_device *pmem = q->queuedata;
|
|
|
+ struct nd_region *nd_region = to_region(pmem);
|
|
|
+
|
|
|
+ if (bio->bi_rw & REQ_FLUSH)
|
|
|
+ nvdimm_flush(nd_region);
|
|
|
|
|
|
do_acct = nd_iostat_start(bio, &start);
|
|
|
bio_for_each_segment(bvec, bio, iter) {
|
|
@@ -135,8 +144,8 @@ static blk_qc_t pmem_make_request(struct request_queue *q, struct bio *bio)
|
|
|
if (do_acct)
|
|
|
nd_iostat_end(bio, start);
|
|
|
|
|
|
- if (bio_data_dir(bio))
|
|
|
- nvdimm_flush(to_region(pmem));
|
|
|
+ if (bio->bi_rw & REQ_FUA)
|
|
|
+ nvdimm_flush(nd_region);
|
|
|
|
|
|
bio_endio(bio);
|
|
|
return BLK_QC_T_NONE;
|
|
@@ -149,8 +158,6 @@ static int pmem_rw_page(struct block_device *bdev, sector_t sector,
|
|
|
int rc;
|
|
|
|
|
|
rc = pmem_do_bvec(pmem, page, PAGE_SIZE, 0, rw, sector);
|
|
|
- if (rw & WRITE)
|
|
|
- nvdimm_flush(to_region(pmem));
|
|
|
|
|
|
/*
|
|
|
* The ->rw_page interface is subtle and tricky. The core
|
|
@@ -279,6 +286,7 @@ static int pmem_attach_disk(struct device *dev,
|
|
|
return PTR_ERR(addr);
|
|
|
pmem->virt_addr = (void __pmem *) addr;
|
|
|
|
|
|
+ blk_queue_write_cache(q, true, true);
|
|
|
blk_queue_make_request(q, pmem_make_request);
|
|
|
blk_queue_physical_block_size(q, PAGE_SIZE);
|
|
|
blk_queue_max_hw_sectors(q, UINT_MAX);
|