|
@@ -97,13 +97,13 @@ static struct page *brd_insert_page(struct brd_device *brd, sector_t sector)
|
|
|
* Must use NOIO because we don't want to recurse back into the
|
|
|
* block or filesystem layers from page reclaim.
|
|
|
*
|
|
|
- * Cannot support XIP and highmem, because our ->direct_access
|
|
|
- * routine for XIP must return memory that is always addressable.
|
|
|
- * If XIP was reworked to use pfns and kmap throughout, this
|
|
|
+ * Cannot support DAX and highmem, because our ->direct_access
|
|
|
+ * routine for DAX must return memory that is always addressable.
|
|
|
+ * If DAX was reworked to use pfns and kmap throughout, this
|
|
|
* restriction might be able to be lifted.
|
|
|
*/
|
|
|
gfp_flags = GFP_NOIO | __GFP_ZERO;
|
|
|
-#ifndef CONFIG_BLK_DEV_XIP
|
|
|
+#ifndef CONFIG_BLK_DEV_RAM_DAX
|
|
|
gfp_flags |= __GFP_HIGHMEM;
|
|
|
#endif
|
|
|
page = alloc_page(gfp_flags);
|
|
@@ -369,7 +369,7 @@ static int brd_rw_page(struct block_device *bdev, sector_t sector,
|
|
|
return err;
|
|
|
}
|
|
|
|
|
|
-#ifdef CONFIG_BLK_DEV_XIP
|
|
|
+#ifdef CONFIG_BLK_DEV_RAM_DAX
|
|
|
static long brd_direct_access(struct block_device *bdev, sector_t sector,
|
|
|
void **kaddr, unsigned long *pfn, long size)
|
|
|
{
|
|
@@ -390,6 +390,8 @@ static long brd_direct_access(struct block_device *bdev, sector_t sector,
|
|
|
*/
|
|
|
return PAGE_SIZE;
|
|
|
}
|
|
|
+#else
|
|
|
+#define brd_direct_access NULL
|
|
|
#endif
|
|
|
|
|
|
static int brd_ioctl(struct block_device *bdev, fmode_t mode,
|
|
@@ -430,9 +432,7 @@ static const struct block_device_operations brd_fops = {
|
|
|
.owner = THIS_MODULE,
|
|
|
.rw_page = brd_rw_page,
|
|
|
.ioctl = brd_ioctl,
|
|
|
-#ifdef CONFIG_BLK_DEV_XIP
|
|
|
.direct_access = brd_direct_access,
|
|
|
-#endif
|
|
|
};
|
|
|
|
|
|
/*
|