|
@@ -2016,63 +2016,65 @@ struct ctl_table random_table[] = {
|
|
|
};
|
|
|
#endif /* CONFIG_SYSCTL */
|
|
|
|
|
|
-static u32 random_int_secret[MD5_MESSAGE_BYTES / 4] ____cacheline_aligned;
|
|
|
-
|
|
|
-int random_int_secret_init(void)
|
|
|
-{
|
|
|
- get_random_bytes(random_int_secret, sizeof(random_int_secret));
|
|
|
- return 0;
|
|
|
-}
|
|
|
-
|
|
|
-static DEFINE_PER_CPU(__u32 [MD5_DIGEST_WORDS], get_random_int_hash)
|
|
|
- __aligned(sizeof(unsigned long));
|
|
|
+struct batched_entropy {
|
|
|
+ union {
|
|
|
+ unsigned long entropy_long[CHACHA20_BLOCK_SIZE / sizeof(unsigned long)];
|
|
|
+ unsigned int entropy_int[CHACHA20_BLOCK_SIZE / sizeof(unsigned int)];
|
|
|
+ };
|
|
|
+ unsigned int position;
|
|
|
+};
|
|
|
|
|
|
/*
|
|
|
- * Get a random word for internal kernel use only. Similar to urandom but
|
|
|
- * with the goal of minimal entropy pool depletion. As a result, the random
|
|
|
- * value is not cryptographically secure but for several uses the cost of
|
|
|
- * depleting entropy is too high
|
|
|
+ * Get a random word for internal kernel use only. The quality of the random
|
|
|
+ * number is either as good as RDRAND or as good as /dev/urandom, with the
|
|
|
+ * goal of being quite fast and not depleting entropy.
|
|
|
*/
|
|
|
-unsigned int get_random_int(void)
|
|
|
+static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_long);
|
|
|
+unsigned long get_random_long(void)
|
|
|
{
|
|
|
- __u32 *hash;
|
|
|
- unsigned int ret;
|
|
|
+ unsigned long ret;
|
|
|
+ struct batched_entropy *batch;
|
|
|
|
|
|
- if (arch_get_random_int(&ret))
|
|
|
+ if (arch_get_random_long(&ret))
|
|
|
return ret;
|
|
|
|
|
|
- hash = get_cpu_var(get_random_int_hash);
|
|
|
-
|
|
|
- hash[0] += current->pid + jiffies + random_get_entropy();
|
|
|
- md5_transform(hash, random_int_secret);
|
|
|
- ret = hash[0];
|
|
|
- put_cpu_var(get_random_int_hash);
|
|
|
-
|
|
|
+ batch = &get_cpu_var(batched_entropy_long);
|
|
|
+ if (batch->position % ARRAY_SIZE(batch->entropy_long) == 0) {
|
|
|
+ extract_crng((u8 *)batch->entropy_long);
|
|
|
+ batch->position = 0;
|
|
|
+ }
|
|
|
+ ret = batch->entropy_long[batch->position++];
|
|
|
+ put_cpu_var(batched_entropy_long);
|
|
|
return ret;
|
|
|
}
|
|
|
-EXPORT_SYMBOL(get_random_int);
|
|
|
+EXPORT_SYMBOL(get_random_long);
|
|
|
|
|
|
-/*
|
|
|
- * Same as get_random_int(), but returns unsigned long.
|
|
|
- */
|
|
|
-unsigned long get_random_long(void)
|
|
|
+#if BITS_PER_LONG == 32
|
|
|
+unsigned int get_random_int(void)
|
|
|
{
|
|
|
- __u32 *hash;
|
|
|
- unsigned long ret;
|
|
|
+ return get_random_long();
|
|
|
+}
|
|
|
+#else
|
|
|
+static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_int);
|
|
|
+unsigned int get_random_int(void)
|
|
|
+{
|
|
|
+ unsigned int ret;
|
|
|
+ struct batched_entropy *batch;
|
|
|
|
|
|
- if (arch_get_random_long(&ret))
|
|
|
+ if (arch_get_random_int(&ret))
|
|
|
return ret;
|
|
|
|
|
|
- hash = get_cpu_var(get_random_int_hash);
|
|
|
-
|
|
|
- hash[0] += current->pid + jiffies + random_get_entropy();
|
|
|
- md5_transform(hash, random_int_secret);
|
|
|
- ret = *(unsigned long *)hash;
|
|
|
- put_cpu_var(get_random_int_hash);
|
|
|
-
|
|
|
+ batch = &get_cpu_var(batched_entropy_int);
|
|
|
+ if (batch->position % ARRAY_SIZE(batch->entropy_int) == 0) {
|
|
|
+ extract_crng((u8 *)batch->entropy_int);
|
|
|
+ batch->position = 0;
|
|
|
+ }
|
|
|
+ ret = batch->entropy_int[batch->position++];
|
|
|
+ put_cpu_var(batched_entropy_int);
|
|
|
return ret;
|
|
|
}
|
|
|
-EXPORT_SYMBOL(get_random_long);
|
|
|
+#endif
|
|
|
+EXPORT_SYMBOL(get_random_int);
|
|
|
|
|
|
/**
|
|
|
* randomize_page - Generate a random, page aligned address
|