|
@@ -409,6 +409,9 @@ static DECLARE_WAIT_QUEUE_HEAD(random_write_wait);
|
|
|
static DECLARE_WAIT_QUEUE_HEAD(urandom_init_wait);
|
|
|
static struct fasync_struct *fasync;
|
|
|
|
|
|
+static DEFINE_SPINLOCK(random_ready_list_lock);
|
|
|
+static LIST_HEAD(random_ready_list);
|
|
|
+
|
|
|
/**********************************************************************
|
|
|
*
|
|
|
* OS independent entropy store. Here are the functions which handle
|
|
@@ -589,6 +592,22 @@ static void fast_mix(struct fast_pool *f)
|
|
|
f->count++;
|
|
|
}
|
|
|
|
|
|
+static void process_random_ready_list(void)
|
|
|
+{
|
|
|
+ unsigned long flags;
|
|
|
+ struct random_ready_callback *rdy, *tmp;
|
|
|
+
|
|
|
+ spin_lock_irqsave(&random_ready_list_lock, flags);
|
|
|
+ list_for_each_entry_safe(rdy, tmp, &random_ready_list, list) {
|
|
|
+ struct module *owner = rdy->owner;
|
|
|
+
|
|
|
+ list_del_init(&rdy->list);
|
|
|
+ rdy->func(rdy);
|
|
|
+ module_put(owner);
|
|
|
+ }
|
|
|
+ spin_unlock_irqrestore(&random_ready_list_lock, flags);
|
|
|
+}
|
|
|
+
|
|
|
/*
|
|
|
* Credit (or debit) the entropy store with n bits of entropy.
|
|
|
* Use credit_entropy_bits_safe() if the value comes from userspace
|
|
@@ -660,6 +679,7 @@ retry:
|
|
|
r->entropy_total = 0;
|
|
|
if (r == &nonblocking_pool) {
|
|
|
prandom_reseed_late();
|
|
|
+ process_random_ready_list();
|
|
|
wake_up_all(&urandom_init_wait);
|
|
|
pr_notice("random: %s pool is initialized\n", r->name);
|
|
|
}
|
|
@@ -1256,6 +1276,64 @@ void get_blocking_random_bytes(void *buf, int nbytes)
|
|
|
}
|
|
|
EXPORT_SYMBOL(get_blocking_random_bytes);
|
|
|
|
|
|
+/*
|
|
|
+ * Add a callback function that will be invoked when the nonblocking
|
|
|
+ * pool is initialised.
|
|
|
+ *
|
|
|
+ * returns: 0 if callback is successfully added
|
|
|
+ * -EALREADY if pool is already initialised (callback not called)
|
|
|
+ * -ENOENT if module for callback is not alive
|
|
|
+ */
|
|
|
+int add_random_ready_callback(struct random_ready_callback *rdy)
|
|
|
+{
|
|
|
+ struct module *owner;
|
|
|
+ unsigned long flags;
|
|
|
+ int err = -EALREADY;
|
|
|
+
|
|
|
+ if (likely(nonblocking_pool.initialized))
|
|
|
+ return err;
|
|
|
+
|
|
|
+ owner = rdy->owner;
|
|
|
+ if (!try_module_get(owner))
|
|
|
+ return -ENOENT;
|
|
|
+
|
|
|
+ spin_lock_irqsave(&random_ready_list_lock, flags);
|
|
|
+ if (nonblocking_pool.initialized)
|
|
|
+ goto out;
|
|
|
+
|
|
|
+ owner = NULL;
|
|
|
+
|
|
|
+ list_add(&rdy->list, &random_ready_list);
|
|
|
+ err = 0;
|
|
|
+
|
|
|
+out:
|
|
|
+ spin_unlock_irqrestore(&random_ready_list_lock, flags);
|
|
|
+
|
|
|
+ module_put(owner);
|
|
|
+
|
|
|
+ return err;
|
|
|
+}
|
|
|
+EXPORT_SYMBOL(add_random_ready_callback);
|
|
|
+
|
|
|
+/*
|
|
|
+ * Delete a previously registered readiness callback function.
|
|
|
+ */
|
|
|
+void del_random_ready_callback(struct random_ready_callback *rdy)
|
|
|
+{
|
|
|
+ unsigned long flags;
|
|
|
+ struct module *owner = NULL;
|
|
|
+
|
|
|
+ spin_lock_irqsave(&random_ready_list_lock, flags);
|
|
|
+ if (!list_empty(&rdy->list)) {
|
|
|
+ list_del_init(&rdy->list);
|
|
|
+ owner = rdy->owner;
|
|
|
+ }
|
|
|
+ spin_unlock_irqrestore(&random_ready_list_lock, flags);
|
|
|
+
|
|
|
+ module_put(owner);
|
|
|
+}
|
|
|
+EXPORT_SYMBOL(del_random_ready_callback);
|
|
|
+
|
|
|
/*
|
|
|
* This function will use the architecture-specific hardware random
|
|
|
* number generator if it is available. The arch-specific hw RNG will
|