|
@@ -30,6 +30,11 @@
|
|
|
#include "trace.h"
|
|
|
#include <trace/events/f2fs.h>
|
|
|
|
|
|
+#define NUM_PREALLOC_POST_READ_CTXS 128
|
|
|
+
|
|
|
+static struct kmem_cache *bio_post_read_ctx_cache;
|
|
|
+static mempool_t *bio_post_read_ctx_pool;
|
|
|
+
|
|
|
static bool __is_cp_guaranteed(struct page *page)
|
|
|
{
|
|
|
struct address_space *mapping = page->mapping;
|
|
@@ -50,11 +55,77 @@ static bool __is_cp_guaranteed(struct page *page)
|
|
|
return false;
|
|
|
}
|
|
|
|
|
|
-static void f2fs_read_end_io(struct bio *bio)
|
|
|
+/* postprocessing steps for read bios */
|
|
|
+enum bio_post_read_step {
|
|
|
+ STEP_INITIAL = 0,
|
|
|
+ STEP_DECRYPT,
|
|
|
+};
|
|
|
+
|
|
|
+struct bio_post_read_ctx {
|
|
|
+ struct bio *bio;
|
|
|
+ struct work_struct work;
|
|
|
+ unsigned int cur_step;
|
|
|
+ unsigned int enabled_steps;
|
|
|
+};
|
|
|
+
|
|
|
+static void __read_end_io(struct bio *bio)
|
|
|
{
|
|
|
- struct bio_vec *bvec;
|
|
|
+ struct page *page;
|
|
|
+ struct bio_vec *bv;
|
|
|
int i;
|
|
|
|
|
|
+ bio_for_each_segment_all(bv, bio, i) {
|
|
|
+ page = bv->bv_page;
|
|
|
+
|
|
|
+ /* PG_error was set if any post_read step failed */
|
|
|
+ if (bio->bi_status || PageError(page)) {
|
|
|
+ ClearPageUptodate(page);
|
|
|
+ SetPageError(page);
|
|
|
+ } else {
|
|
|
+ SetPageUptodate(page);
|
|
|
+ }
|
|
|
+ unlock_page(page);
|
|
|
+ }
|
|
|
+ if (bio->bi_private)
|
|
|
+ mempool_free(bio->bi_private, bio_post_read_ctx_pool);
|
|
|
+ bio_put(bio);
|
|
|
+}
|
|
|
+
|
|
|
+static void bio_post_read_processing(struct bio_post_read_ctx *ctx);
|
|
|
+
|
|
|
+static void decrypt_work(struct work_struct *work)
|
|
|
+{
|
|
|
+ struct bio_post_read_ctx *ctx =
|
|
|
+ container_of(work, struct bio_post_read_ctx, work);
|
|
|
+
|
|
|
+ fscrypt_decrypt_bio(ctx->bio);
|
|
|
+
|
|
|
+ bio_post_read_processing(ctx);
|
|
|
+}
|
|
|
+
|
|
|
+static void bio_post_read_processing(struct bio_post_read_ctx *ctx)
|
|
|
+{
|
|
|
+ switch (++ctx->cur_step) {
|
|
|
+ case STEP_DECRYPT:
|
|
|
+ if (ctx->enabled_steps & (1 << STEP_DECRYPT)) {
|
|
|
+ INIT_WORK(&ctx->work, decrypt_work);
|
|
|
+ fscrypt_enqueue_decrypt_work(&ctx->work);
|
|
|
+ return;
|
|
|
+ }
|
|
|
+ ctx->cur_step++;
|
|
|
+ /* fall-through */
|
|
|
+ default:
|
|
|
+ __read_end_io(ctx->bio);
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
+static bool f2fs_bio_post_read_required(struct bio *bio)
|
|
|
+{
|
|
|
+ return bio->bi_private && !bio->bi_status;
|
|
|
+}
|
|
|
+
|
|
|
+static void f2fs_read_end_io(struct bio *bio)
|
|
|
+{
|
|
|
#ifdef CONFIG_F2FS_FAULT_INJECTION
|
|
|
if (time_to_inject(F2FS_P_SB(bio_first_page_all(bio)), FAULT_IO)) {
|
|
|
f2fs_show_injection_info(FAULT_IO);
|
|
@@ -62,28 +133,15 @@ static void f2fs_read_end_io(struct bio *bio)
|
|
|
}
|
|
|
#endif
|
|
|
|
|
|
- if (f2fs_bio_encrypted(bio)) {
|
|
|
- if (bio->bi_status) {
|
|
|
- fscrypt_release_ctx(bio->bi_private);
|
|
|
- } else {
|
|
|
- fscrypt_enqueue_decrypt_bio(bio->bi_private, bio);
|
|
|
- return;
|
|
|
- }
|
|
|
- }
|
|
|
-
|
|
|
- bio_for_each_segment_all(bvec, bio, i) {
|
|
|
- struct page *page = bvec->bv_page;
|
|
|
+ if (f2fs_bio_post_read_required(bio)) {
|
|
|
+ struct bio_post_read_ctx *ctx = bio->bi_private;
|
|
|
|
|
|
- if (!bio->bi_status) {
|
|
|
- if (!PageUptodate(page))
|
|
|
- SetPageUptodate(page);
|
|
|
- } else {
|
|
|
- ClearPageUptodate(page);
|
|
|
- SetPageError(page);
|
|
|
- }
|
|
|
- unlock_page(page);
|
|
|
+ ctx->cur_step = STEP_INITIAL;
|
|
|
+ bio_post_read_processing(ctx);
|
|
|
+ return;
|
|
|
}
|
|
|
- bio_put(bio);
|
|
|
+
|
|
|
+ __read_end_io(bio);
|
|
|
}
|
|
|
|
|
|
static void f2fs_write_end_io(struct bio *bio)
|
|
@@ -481,29 +539,33 @@ static struct bio *f2fs_grab_read_bio(struct inode *inode, block_t blkaddr,
|
|
|
unsigned nr_pages)
|
|
|
{
|
|
|
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
|
|
|
- struct fscrypt_ctx *ctx = NULL;
|
|
|
struct bio *bio;
|
|
|
-
|
|
|
- if (f2fs_encrypted_file(inode)) {
|
|
|
- ctx = fscrypt_get_ctx(inode, GFP_NOFS);
|
|
|
- if (IS_ERR(ctx))
|
|
|
- return ERR_CAST(ctx);
|
|
|
-
|
|
|
- /* wait the page to be moved by cleaning */
|
|
|
- f2fs_wait_on_block_writeback(sbi, blkaddr);
|
|
|
- }
|
|
|
+ struct bio_post_read_ctx *ctx;
|
|
|
+ unsigned int post_read_steps = 0;
|
|
|
|
|
|
bio = f2fs_bio_alloc(sbi, min_t(int, nr_pages, BIO_MAX_PAGES), false);
|
|
|
- if (!bio) {
|
|
|
- if (ctx)
|
|
|
- fscrypt_release_ctx(ctx);
|
|
|
+ if (!bio)
|
|
|
return ERR_PTR(-ENOMEM);
|
|
|
- }
|
|
|
f2fs_target_device(sbi, blkaddr, bio);
|
|
|
bio->bi_end_io = f2fs_read_end_io;
|
|
|
- bio->bi_private = ctx;
|
|
|
bio_set_op_attrs(bio, REQ_OP_READ, 0);
|
|
|
|
|
|
+ if (f2fs_encrypted_file(inode))
|
|
|
+ post_read_steps |= 1 << STEP_DECRYPT;
|
|
|
+ if (post_read_steps) {
|
|
|
+ ctx = mempool_alloc(bio_post_read_ctx_pool, GFP_NOFS);
|
|
|
+ if (!ctx) {
|
|
|
+ bio_put(bio);
|
|
|
+ return ERR_PTR(-ENOMEM);
|
|
|
+ }
|
|
|
+ ctx->bio = bio;
|
|
|
+ ctx->enabled_steps = post_read_steps;
|
|
|
+ bio->bi_private = ctx;
|
|
|
+
|
|
|
+ /* wait the page to be moved by cleaning */
|
|
|
+ f2fs_wait_on_block_writeback(sbi, blkaddr);
|
|
|
+ }
|
|
|
+
|
|
|
return bio;
|
|
|
}
|
|
|
|
|
@@ -1525,7 +1587,7 @@ static int encrypt_one_page(struct f2fs_io_info *fio)
|
|
|
if (!f2fs_encrypted_file(inode))
|
|
|
return 0;
|
|
|
|
|
|
- /* wait for GCed encrypted page writeback */
|
|
|
+ /* wait for GCed page writeback via META_MAPPING */
|
|
|
f2fs_wait_on_block_writeback(fio->sbi, fio->old_blkaddr);
|
|
|
|
|
|
retry_encrypt:
|
|
@@ -2222,8 +2284,8 @@ repeat:
|
|
|
|
|
|
f2fs_wait_on_page_writeback(page, DATA, false);
|
|
|
|
|
|
- /* wait for GCed encrypted page writeback */
|
|
|
- if (f2fs_encrypted_file(inode))
|
|
|
+ /* wait for GCed page writeback via META_MAPPING */
|
|
|
+ if (f2fs_post_read_required(inode))
|
|
|
f2fs_wait_on_block_writeback(sbi, blkaddr);
|
|
|
|
|
|
if (len == PAGE_SIZE || PageUptodate(page))
|
|
@@ -2555,3 +2617,27 @@ const struct address_space_operations f2fs_dblock_aops = {
|
|
|
.migratepage = f2fs_migrate_page,
|
|
|
#endif
|
|
|
};
|
|
|
+
|
|
|
+int __init f2fs_init_post_read_processing(void)
|
|
|
+{
|
|
|
+ bio_post_read_ctx_cache = KMEM_CACHE(bio_post_read_ctx, 0);
|
|
|
+ if (!bio_post_read_ctx_cache)
|
|
|
+ goto fail;
|
|
|
+ bio_post_read_ctx_pool =
|
|
|
+ mempool_create_slab_pool(NUM_PREALLOC_POST_READ_CTXS,
|
|
|
+ bio_post_read_ctx_cache);
|
|
|
+ if (!bio_post_read_ctx_pool)
|
|
|
+ goto fail_free_cache;
|
|
|
+ return 0;
|
|
|
+
|
|
|
+fail_free_cache:
|
|
|
+ kmem_cache_destroy(bio_post_read_ctx_cache);
|
|
|
+fail:
|
|
|
+ return -ENOMEM;
|
|
|
+}
|
|
|
+
|
|
|
+void __exit f2fs_destroy_post_read_processing(void)
|
|
|
+{
|
|
|
+ mempool_destroy(bio_post_read_ctx_pool);
|
|
|
+ kmem_cache_destroy(bio_post_read_ctx_cache);
|
|
|
+}
|