|
@@ -53,9 +53,9 @@ static ssize_t name##_show(struct device *d, \
|
|
} \
|
|
} \
|
|
static DEVICE_ATTR_RO(name);
|
|
static DEVICE_ATTR_RO(name);
|
|
|
|
|
|
-static inline int init_done(struct zram *zram)
|
|
|
|
|
|
+static inline bool init_done(struct zram *zram)
|
|
{
|
|
{
|
|
- return zram->meta != NULL;
|
|
|
|
|
|
+ return zram->disksize;
|
|
}
|
|
}
|
|
|
|
|
|
static inline struct zram *dev_to_zram(struct device *dev)
|
|
static inline struct zram *dev_to_zram(struct device *dev)
|
|
@@ -356,6 +356,18 @@ out_error:
|
|
return NULL;
|
|
return NULL;
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+static inline bool zram_meta_get(struct zram *zram)
|
|
|
|
+{
|
|
|
|
+ if (atomic_inc_not_zero(&zram->refcount))
|
|
|
|
+ return true;
|
|
|
|
+ return false;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static inline void zram_meta_put(struct zram *zram)
|
|
|
|
+{
|
|
|
|
+ atomic_dec(&zram->refcount);
|
|
|
|
+}
|
|
|
|
+
|
|
static void update_position(u32 *index, int *offset, struct bio_vec *bvec)
|
|
static void update_position(u32 *index, int *offset, struct bio_vec *bvec)
|
|
{
|
|
{
|
|
if (*offset + bvec->bv_len >= PAGE_SIZE)
|
|
if (*offset + bvec->bv_len >= PAGE_SIZE)
|
|
@@ -717,6 +729,10 @@ static void zram_bio_discard(struct zram *zram, u32 index,
|
|
|
|
|
|
static void zram_reset_device(struct zram *zram)
|
|
static void zram_reset_device(struct zram *zram)
|
|
{
|
|
{
|
|
|
|
+ struct zram_meta *meta;
|
|
|
|
+ struct zcomp *comp;
|
|
|
|
+ u64 disksize;
|
|
|
|
+
|
|
down_write(&zram->init_lock);
|
|
down_write(&zram->init_lock);
|
|
|
|
|
|
zram->limit_pages = 0;
|
|
zram->limit_pages = 0;
|
|
@@ -726,16 +742,31 @@ static void zram_reset_device(struct zram *zram)
|
|
return;
|
|
return;
|
|
}
|
|
}
|
|
|
|
|
|
- zcomp_destroy(zram->comp);
|
|
|
|
- zram->max_comp_streams = 1;
|
|
|
|
- zram_meta_free(zram->meta, zram->disksize);
|
|
|
|
- zram->meta = NULL;
|
|
|
|
|
|
+ meta = zram->meta;
|
|
|
|
+ comp = zram->comp;
|
|
|
|
+ disksize = zram->disksize;
|
|
|
|
+ /*
|
|
|
|
+ * Refcount will go down to 0 eventually and r/w handler
|
|
|
|
+ * cannot handle further I/O so it will bail out by
|
|
|
|
+ * check zram_meta_get.
|
|
|
|
+ */
|
|
|
|
+ zram_meta_put(zram);
|
|
|
|
+ /*
|
|
|
|
+ * We want to free zram_meta in process context to avoid
|
|
|
|
+ * deadlock between reclaim path and any other locks.
|
|
|
|
+ */
|
|
|
|
+ wait_event(zram->io_done, atomic_read(&zram->refcount) == 0);
|
|
|
|
+
|
|
/* Reset stats */
|
|
/* Reset stats */
|
|
memset(&zram->stats, 0, sizeof(zram->stats));
|
|
memset(&zram->stats, 0, sizeof(zram->stats));
|
|
zram->disksize = 0;
|
|
zram->disksize = 0;
|
|
|
|
+ zram->max_comp_streams = 1;
|
|
set_capacity(zram->disk, 0);
|
|
set_capacity(zram->disk, 0);
|
|
|
|
|
|
up_write(&zram->init_lock);
|
|
up_write(&zram->init_lock);
|
|
|
|
+ /* I/O operation under all of CPU are done so let's free */
|
|
|
|
+ zram_meta_free(meta, disksize);
|
|
|
|
+ zcomp_destroy(comp);
|
|
}
|
|
}
|
|
|
|
|
|
static ssize_t disksize_store(struct device *dev,
|
|
static ssize_t disksize_store(struct device *dev,
|
|
@@ -771,6 +802,8 @@ static ssize_t disksize_store(struct device *dev,
|
|
goto out_destroy_comp;
|
|
goto out_destroy_comp;
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+ init_waitqueue_head(&zram->io_done);
|
|
|
|
+ atomic_set(&zram->refcount, 1);
|
|
zram->meta = meta;
|
|
zram->meta = meta;
|
|
zram->comp = comp;
|
|
zram->comp = comp;
|
|
zram->disksize = disksize;
|
|
zram->disksize = disksize;
|
|
@@ -901,23 +934,21 @@ static void zram_make_request(struct request_queue *queue, struct bio *bio)
|
|
{
|
|
{
|
|
struct zram *zram = queue->queuedata;
|
|
struct zram *zram = queue->queuedata;
|
|
|
|
|
|
- down_read(&zram->init_lock);
|
|
|
|
- if (unlikely(!init_done(zram)))
|
|
|
|
|
|
+ if (unlikely(!zram_meta_get(zram)))
|
|
goto error;
|
|
goto error;
|
|
|
|
|
|
if (!valid_io_request(zram, bio->bi_iter.bi_sector,
|
|
if (!valid_io_request(zram, bio->bi_iter.bi_sector,
|
|
bio->bi_iter.bi_size)) {
|
|
bio->bi_iter.bi_size)) {
|
|
atomic64_inc(&zram->stats.invalid_io);
|
|
atomic64_inc(&zram->stats.invalid_io);
|
|
- goto error;
|
|
|
|
|
|
+ goto put_zram;
|
|
}
|
|
}
|
|
|
|
|
|
__zram_make_request(zram, bio);
|
|
__zram_make_request(zram, bio);
|
|
- up_read(&zram->init_lock);
|
|
|
|
-
|
|
|
|
|
|
+ zram_meta_put(zram);
|
|
return;
|
|
return;
|
|
-
|
|
|
|
|
|
+put_zram:
|
|
|
|
+ zram_meta_put(zram);
|
|
error:
|
|
error:
|
|
- up_read(&zram->init_lock);
|
|
|
|
bio_io_error(bio);
|
|
bio_io_error(bio);
|
|
}
|
|
}
|
|
|
|
|
|
@@ -939,21 +970,19 @@ static void zram_slot_free_notify(struct block_device *bdev,
|
|
static int zram_rw_page(struct block_device *bdev, sector_t sector,
|
|
static int zram_rw_page(struct block_device *bdev, sector_t sector,
|
|
struct page *page, int rw)
|
|
struct page *page, int rw)
|
|
{
|
|
{
|
|
- int offset, err;
|
|
|
|
|
|
+ int offset, err = -EIO;
|
|
u32 index;
|
|
u32 index;
|
|
struct zram *zram;
|
|
struct zram *zram;
|
|
struct bio_vec bv;
|
|
struct bio_vec bv;
|
|
|
|
|
|
zram = bdev->bd_disk->private_data;
|
|
zram = bdev->bd_disk->private_data;
|
|
|
|
+ if (unlikely(!zram_meta_get(zram)))
|
|
|
|
+ goto out;
|
|
|
|
+
|
|
if (!valid_io_request(zram, sector, PAGE_SIZE)) {
|
|
if (!valid_io_request(zram, sector, PAGE_SIZE)) {
|
|
atomic64_inc(&zram->stats.invalid_io);
|
|
atomic64_inc(&zram->stats.invalid_io);
|
|
- return -EINVAL;
|
|
|
|
- }
|
|
|
|
-
|
|
|
|
- down_read(&zram->init_lock);
|
|
|
|
- if (unlikely(!init_done(zram))) {
|
|
|
|
- err = -EIO;
|
|
|
|
- goto out_unlock;
|
|
|
|
|
|
+ err = -EINVAL;
|
|
|
|
+ goto put_zram;
|
|
}
|
|
}
|
|
|
|
|
|
index = sector >> SECTORS_PER_PAGE_SHIFT;
|
|
index = sector >> SECTORS_PER_PAGE_SHIFT;
|
|
@@ -964,8 +993,9 @@ static int zram_rw_page(struct block_device *bdev, sector_t sector,
|
|
bv.bv_offset = 0;
|
|
bv.bv_offset = 0;
|
|
|
|
|
|
err = zram_bvec_rw(zram, &bv, index, offset, rw);
|
|
err = zram_bvec_rw(zram, &bv, index, offset, rw);
|
|
-out_unlock:
|
|
|
|
- up_read(&zram->init_lock);
|
|
|
|
|
|
+put_zram:
|
|
|
|
+ zram_meta_put(zram);
|
|
|
|
+out:
|
|
/*
|
|
/*
|
|
* If I/O fails, just return error(ie, non-zero) without
|
|
* If I/O fails, just return error(ie, non-zero) without
|
|
* calling page_endio.
|
|
* calling page_endio.
|