|
@@ -238,7 +238,7 @@ struct cache {
|
|
*/
|
|
*/
|
|
dm_dblock_t discard_nr_blocks;
|
|
dm_dblock_t discard_nr_blocks;
|
|
unsigned long *discard_bitset;
|
|
unsigned long *discard_bitset;
|
|
- uint32_t discard_block_size;
|
|
|
|
|
|
+ uint32_t discard_block_size; /* a power of 2 times sectors per block */
|
|
|
|
|
|
/*
|
|
/*
|
|
* Rather than reconstructing the table line for the status we just
|
|
* Rather than reconstructing the table line for the status we just
|
|
@@ -2197,6 +2197,35 @@ static int create_cache_policy(struct cache *cache, struct cache_args *ca,
|
|
return 0;
|
|
return 0;
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+/*
|
|
|
|
+ * We want the discard block size to be a power of two, at least the size
|
|
|
|
+ * of the cache block size, and have no more than 2^14 discard blocks
|
|
|
|
+ * across the origin.
|
|
|
|
+ */
|
|
|
|
+#define MAX_DISCARD_BLOCKS (1 << 14)
|
|
|
|
+
|
|
|
|
+static bool too_many_discard_blocks(sector_t discard_block_size,
|
|
|
|
+ sector_t origin_size)
|
|
|
|
+{
|
|
|
|
+ (void) sector_div(origin_size, discard_block_size);
|
|
|
|
+
|
|
|
|
+ return origin_size > MAX_DISCARD_BLOCKS;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static sector_t calculate_discard_block_size(sector_t cache_block_size,
|
|
|
|
+ sector_t origin_size)
|
|
|
|
+{
|
|
|
|
+ sector_t discard_block_size;
|
|
|
|
+
|
|
|
|
+ discard_block_size = roundup_pow_of_two(cache_block_size);
|
|
|
|
+
|
|
|
|
+ if (origin_size)
|
|
|
|
+ while (too_many_discard_blocks(discard_block_size, origin_size))
|
|
|
|
+ discard_block_size *= 2;
|
|
|
|
+
|
|
|
|
+ return discard_block_size;
|
|
|
|
+}
|
|
|
|
+
|
|
#define DEFAULT_MIGRATION_THRESHOLD 2048
|
|
#define DEFAULT_MIGRATION_THRESHOLD 2048
|
|
|
|
|
|
static int cache_create(struct cache_args *ca, struct cache **result)
|
|
static int cache_create(struct cache_args *ca, struct cache **result)
|
|
@@ -2320,7 +2349,9 @@ static int cache_create(struct cache_args *ca, struct cache **result)
|
|
}
|
|
}
|
|
clear_bitset(cache->dirty_bitset, from_cblock(cache->cache_size));
|
|
clear_bitset(cache->dirty_bitset, from_cblock(cache->cache_size));
|
|
|
|
|
|
- cache->discard_block_size = cache->sectors_per_block;
|
|
|
|
|
|
+ cache->discard_block_size =
|
|
|
|
+ calculate_discard_block_size(cache->sectors_per_block,
|
|
|
|
+ cache->origin_sectors);
|
|
cache->discard_nr_blocks = oblock_to_dblock(cache, cache->origin_blocks);
|
|
cache->discard_nr_blocks = oblock_to_dblock(cache, cache->origin_blocks);
|
|
cache->discard_bitset = alloc_bitset(from_dblock(cache->discard_nr_blocks));
|
|
cache->discard_bitset = alloc_bitset(from_dblock(cache->discard_nr_blocks));
|
|
if (!cache->discard_bitset) {
|
|
if (!cache->discard_bitset) {
|
|
@@ -3099,7 +3130,7 @@ static void set_discard_limits(struct cache *cache, struct queue_limits *limits)
|
|
/*
|
|
/*
|
|
* FIXME: these limits may be incompatible with the cache device
|
|
* FIXME: these limits may be incompatible with the cache device
|
|
*/
|
|
*/
|
|
- limits->max_discard_sectors = cache->discard_block_size;
|
|
|
|
|
|
+ limits->max_discard_sectors = cache->discard_block_size * 1024;
|
|
limits->discard_granularity = cache->discard_block_size << SECTOR_SHIFT;
|
|
limits->discard_granularity = cache->discard_block_size << SECTOR_SHIFT;
|
|
}
|
|
}
|
|
|
|
|