|
@@ -148,6 +148,8 @@ struct crypt_config {
|
|
mempool_t *tag_pool;
|
|
mempool_t *tag_pool;
|
|
unsigned tag_pool_max_sectors;
|
|
unsigned tag_pool_max_sectors;
|
|
|
|
|
|
|
|
+ struct percpu_counter n_allocated_pages;
|
|
|
|
+
|
|
struct bio_set *bs;
|
|
struct bio_set *bs;
|
|
struct mutex bio_alloc_lock;
|
|
struct mutex bio_alloc_lock;
|
|
|
|
|
|
@@ -219,6 +221,12 @@ struct crypt_config {
|
|
#define MAX_TAG_SIZE 480
|
|
#define MAX_TAG_SIZE 480
|
|
#define POOL_ENTRY_SIZE 512
|
|
#define POOL_ENTRY_SIZE 512
|
|
|
|
|
|
|
|
+static DEFINE_SPINLOCK(dm_crypt_clients_lock);
|
|
|
|
+static unsigned dm_crypt_clients_n = 0;
|
|
|
|
+static volatile unsigned long dm_crypt_pages_per_client;
|
|
|
|
+#define DM_CRYPT_MEMORY_PERCENT 2
|
|
|
|
+#define DM_CRYPT_MIN_PAGES_PER_CLIENT (BIO_MAX_PAGES * 16)
|
|
|
|
+
|
|
static void clone_init(struct dm_crypt_io *, struct bio *);
|
|
static void clone_init(struct dm_crypt_io *, struct bio *);
|
|
static void kcryptd_queue_crypt(struct dm_crypt_io *io);
|
|
static void kcryptd_queue_crypt(struct dm_crypt_io *io);
|
|
static struct scatterlist *crypt_get_sg_data(struct crypt_config *cc,
|
|
static struct scatterlist *crypt_get_sg_data(struct crypt_config *cc,
|
|
@@ -2155,6 +2163,43 @@ static int crypt_wipe_key(struct crypt_config *cc)
|
|
return r;
|
|
return r;
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+static void crypt_calculate_pages_per_client(void)
|
|
|
|
+{
|
|
|
|
+ unsigned long pages = (totalram_pages - totalhigh_pages) * DM_CRYPT_MEMORY_PERCENT / 100;
|
|
|
|
+
|
|
|
|
+ if (!dm_crypt_clients_n)
|
|
|
|
+ return;
|
|
|
|
+
|
|
|
|
+ pages /= dm_crypt_clients_n;
|
|
|
|
+ if (pages < DM_CRYPT_MIN_PAGES_PER_CLIENT)
|
|
|
|
+ pages = DM_CRYPT_MIN_PAGES_PER_CLIENT;
|
|
|
|
+ dm_crypt_pages_per_client = pages;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static void *crypt_page_alloc(gfp_t gfp_mask, void *pool_data)
|
|
|
|
+{
|
|
|
|
+ struct crypt_config *cc = pool_data;
|
|
|
|
+ struct page *page;
|
|
|
|
+
|
|
|
|
+ if (unlikely(percpu_counter_compare(&cc->n_allocated_pages, dm_crypt_pages_per_client) >= 0) &&
|
|
|
|
+ likely(gfp_mask & __GFP_NORETRY))
|
|
|
|
+ return NULL;
|
|
|
|
+
|
|
|
|
+ page = alloc_page(gfp_mask);
|
|
|
|
+ if (likely(page != NULL))
|
|
|
|
+ percpu_counter_add(&cc->n_allocated_pages, 1);
|
|
|
|
+
|
|
|
|
+ return page;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static void crypt_page_free(void *page, void *pool_data)
|
|
|
|
+{
|
|
|
|
+ struct crypt_config *cc = pool_data;
|
|
|
|
+
|
|
|
|
+ __free_page(page);
|
|
|
|
+ percpu_counter_sub(&cc->n_allocated_pages, 1);
|
|
|
|
+}
|
|
|
|
+
|
|
static void crypt_dtr(struct dm_target *ti)
|
|
static void crypt_dtr(struct dm_target *ti)
|
|
{
|
|
{
|
|
struct crypt_config *cc = ti->private;
|
|
struct crypt_config *cc = ti->private;
|
|
@@ -2181,6 +2226,10 @@ static void crypt_dtr(struct dm_target *ti)
|
|
mempool_destroy(cc->req_pool);
|
|
mempool_destroy(cc->req_pool);
|
|
mempool_destroy(cc->tag_pool);
|
|
mempool_destroy(cc->tag_pool);
|
|
|
|
|
|
|
|
+ if (cc->page_pool)
|
|
|
|
+ WARN_ON(percpu_counter_sum(&cc->n_allocated_pages) != 0);
|
|
|
|
+ percpu_counter_destroy(&cc->n_allocated_pages);
|
|
|
|
+
|
|
if (cc->iv_gen_ops && cc->iv_gen_ops->dtr)
|
|
if (cc->iv_gen_ops && cc->iv_gen_ops->dtr)
|
|
cc->iv_gen_ops->dtr(cc);
|
|
cc->iv_gen_ops->dtr(cc);
|
|
|
|
|
|
@@ -2197,6 +2246,12 @@ static void crypt_dtr(struct dm_target *ti)
|
|
|
|
|
|
/* Must zero key material before freeing */
|
|
/* Must zero key material before freeing */
|
|
kzfree(cc);
|
|
kzfree(cc);
|
|
|
|
+
|
|
|
|
+ spin_lock(&dm_crypt_clients_lock);
|
|
|
|
+ WARN_ON(!dm_crypt_clients_n);
|
|
|
|
+ dm_crypt_clients_n--;
|
|
|
|
+ crypt_calculate_pages_per_client();
|
|
|
|
+ spin_unlock(&dm_crypt_clients_lock);
|
|
}
|
|
}
|
|
|
|
|
|
static int crypt_ctr_ivmode(struct dm_target *ti, const char *ivmode)
|
|
static int crypt_ctr_ivmode(struct dm_target *ti, const char *ivmode)
|
|
@@ -2644,6 +2699,15 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
|
|
|
|
|
|
ti->private = cc;
|
|
ti->private = cc;
|
|
|
|
|
|
|
|
+ spin_lock(&dm_crypt_clients_lock);
|
|
|
|
+ dm_crypt_clients_n++;
|
|
|
|
+ crypt_calculate_pages_per_client();
|
|
|
|
+ spin_unlock(&dm_crypt_clients_lock);
|
|
|
|
+
|
|
|
|
+ ret = percpu_counter_init(&cc->n_allocated_pages, 0, GFP_KERNEL);
|
|
|
|
+ if (ret < 0)
|
|
|
|
+ goto bad;
|
|
|
|
+
|
|
/* Optional parameters need to be read before cipher constructor */
|
|
/* Optional parameters need to be read before cipher constructor */
|
|
if (argc > 5) {
|
|
if (argc > 5) {
|
|
ret = crypt_ctr_optional(ti, argc - 5, &argv[5]);
|
|
ret = crypt_ctr_optional(ti, argc - 5, &argv[5]);
|
|
@@ -2698,7 +2762,7 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
|
|
ALIGN(sizeof(struct dm_crypt_io) + cc->dmreq_start + additional_req_size,
|
|
ALIGN(sizeof(struct dm_crypt_io) + cc->dmreq_start + additional_req_size,
|
|
ARCH_KMALLOC_MINALIGN);
|
|
ARCH_KMALLOC_MINALIGN);
|
|
|
|
|
|
- cc->page_pool = mempool_create_page_pool(BIO_MAX_PAGES, 0);
|
|
|
|
|
|
+ cc->page_pool = mempool_create(BIO_MAX_PAGES, crypt_page_alloc, crypt_page_free, cc);
|
|
if (!cc->page_pool) {
|
|
if (!cc->page_pool) {
|
|
ti->error = "Cannot allocate page mempool";
|
|
ti->error = "Cannot allocate page mempool";
|
|
goto bad;
|
|
goto bad;
|