|
@@ -461,7 +461,7 @@ struct scrub_ctx *scrub_setup_ctx(struct btrfs_device *dev, int is_dev_replace)
|
|
|
struct btrfs_fs_info *fs_info = dev->dev_root->fs_info;
|
|
|
int ret;
|
|
|
|
|
|
- sctx = kzalloc(sizeof(*sctx), GFP_NOFS);
|
|
|
+ sctx = kzalloc(sizeof(*sctx), GFP_KERNEL);
|
|
|
if (!sctx)
|
|
|
goto nomem;
|
|
|
atomic_set(&sctx->refs, 1);
|
|
@@ -472,7 +472,7 @@ struct scrub_ctx *scrub_setup_ctx(struct btrfs_device *dev, int is_dev_replace)
|
|
|
for (i = 0; i < SCRUB_BIOS_PER_SCTX; ++i) {
|
|
|
struct scrub_bio *sbio;
|
|
|
|
|
|
- sbio = kzalloc(sizeof(*sbio), GFP_NOFS);
|
|
|
+ sbio = kzalloc(sizeof(*sbio), GFP_KERNEL);
|
|
|
if (!sbio)
|
|
|
goto nomem;
|
|
|
sctx->bios[i] = sbio;
|
|
@@ -1654,7 +1654,7 @@ static int scrub_add_page_to_wr_bio(struct scrub_ctx *sctx,
|
|
|
again:
|
|
|
if (!wr_ctx->wr_curr_bio) {
|
|
|
wr_ctx->wr_curr_bio = kzalloc(sizeof(*wr_ctx->wr_curr_bio),
|
|
|
- GFP_NOFS);
|
|
|
+ GFP_KERNEL);
|
|
|
if (!wr_ctx->wr_curr_bio) {
|
|
|
mutex_unlock(&wr_ctx->wr_lock);
|
|
|
return -ENOMEM;
|
|
@@ -1671,7 +1671,8 @@ again:
|
|
|
sbio->dev = wr_ctx->tgtdev;
|
|
|
bio = sbio->bio;
|
|
|
if (!bio) {
|
|
|
- bio = btrfs_io_bio_alloc(GFP_NOFS, wr_ctx->pages_per_wr_bio);
|
|
|
+ bio = btrfs_io_bio_alloc(GFP_KERNEL,
|
|
|
+ wr_ctx->pages_per_wr_bio);
|
|
|
if (!bio) {
|
|
|
mutex_unlock(&wr_ctx->wr_lock);
|
|
|
return -ENOMEM;
|
|
@@ -2076,7 +2077,8 @@ again:
|
|
|
sbio->dev = spage->dev;
|
|
|
bio = sbio->bio;
|
|
|
if (!bio) {
|
|
|
- bio = btrfs_io_bio_alloc(GFP_NOFS, sctx->pages_per_rd_bio);
|
|
|
+ bio = btrfs_io_bio_alloc(GFP_KERNEL,
|
|
|
+ sctx->pages_per_rd_bio);
|
|
|
if (!bio)
|
|
|
return -ENOMEM;
|
|
|
sbio->bio = bio;
|
|
@@ -2241,7 +2243,7 @@ static int scrub_pages(struct scrub_ctx *sctx, u64 logical, u64 len,
|
|
|
struct scrub_block *sblock;
|
|
|
int index;
|
|
|
|
|
|
- sblock = kzalloc(sizeof(*sblock), GFP_NOFS);
|
|
|
+ sblock = kzalloc(sizeof(*sblock), GFP_KERNEL);
|
|
|
if (!sblock) {
|
|
|
spin_lock(&sctx->stat_lock);
|
|
|
sctx->stat.malloc_errors++;
|
|
@@ -2259,7 +2261,7 @@ static int scrub_pages(struct scrub_ctx *sctx, u64 logical, u64 len,
|
|
|
struct scrub_page *spage;
|
|
|
u64 l = min_t(u64, len, PAGE_SIZE);
|
|
|
|
|
|
- spage = kzalloc(sizeof(*spage), GFP_NOFS);
|
|
|
+ spage = kzalloc(sizeof(*spage), GFP_KERNEL);
|
|
|
if (!spage) {
|
|
|
leave_nomem:
|
|
|
spin_lock(&sctx->stat_lock);
|
|
@@ -2286,7 +2288,7 @@ leave_nomem:
|
|
|
spage->have_csum = 0;
|
|
|
}
|
|
|
sblock->page_count++;
|
|
|
- spage->page = alloc_page(GFP_NOFS);
|
|
|
+ spage->page = alloc_page(GFP_KERNEL);
|
|
|
if (!spage->page)
|
|
|
goto leave_nomem;
|
|
|
len -= l;
|
|
@@ -2541,7 +2543,7 @@ static int scrub_pages_for_parity(struct scrub_parity *sparity,
|
|
|
struct scrub_block *sblock;
|
|
|
int index;
|
|
|
|
|
|
- sblock = kzalloc(sizeof(*sblock), GFP_NOFS);
|
|
|
+ sblock = kzalloc(sizeof(*sblock), GFP_KERNEL);
|
|
|
if (!sblock) {
|
|
|
spin_lock(&sctx->stat_lock);
|
|
|
sctx->stat.malloc_errors++;
|
|
@@ -2561,7 +2563,7 @@ static int scrub_pages_for_parity(struct scrub_parity *sparity,
|
|
|
struct scrub_page *spage;
|
|
|
u64 l = min_t(u64, len, PAGE_SIZE);
|
|
|
|
|
|
- spage = kzalloc(sizeof(*spage), GFP_NOFS);
|
|
|
+ spage = kzalloc(sizeof(*spage), GFP_KERNEL);
|
|
|
if (!spage) {
|
|
|
leave_nomem:
|
|
|
spin_lock(&sctx->stat_lock);
|
|
@@ -2591,7 +2593,7 @@ leave_nomem:
|
|
|
spage->have_csum = 0;
|
|
|
}
|
|
|
sblock->page_count++;
|
|
|
- spage->page = alloc_page(GFP_NOFS);
|
|
|
+ spage->page = alloc_page(GFP_KERNEL);
|
|
|
if (!spage->page)
|
|
|
goto leave_nomem;
|
|
|
len -= l;
|