core.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570
  1. /*
  2. * hw_random/core.c: HWRNG core API
  3. *
  4. * Copyright 2006 Michael Buesch <m@bues.ch>
  5. * Copyright 2005 (c) MontaVista Software, Inc.
  6. *
  7. * Please read Documentation/hw_random.txt for details on use.
  8. *
  9. * This software may be used and distributed according to the terms
  10. * of the GNU General Public License, incorporated herein by reference.
  11. */
  12. #include <linux/delay.h>
  13. #include <linux/device.h>
  14. #include <linux/err.h>
  15. #include <linux/fs.h>
  16. #include <linux/hw_random.h>
  17. #include <linux/kernel.h>
  18. #include <linux/kthread.h>
  19. #include <linux/sched/signal.h>
  20. #include <linux/miscdevice.h>
  21. #include <linux/module.h>
  22. #include <linux/random.h>
  23. #include <linux/sched.h>
  24. #include <linux/slab.h>
  25. #include <linux/uaccess.h>
  26. #define RNG_MODULE_NAME "hw_random"
  27. static struct hwrng *current_rng;
  28. static struct task_struct *hwrng_fill;
  29. static LIST_HEAD(rng_list);
  30. /* Protects rng_list and current_rng */
  31. static DEFINE_MUTEX(rng_mutex);
  32. /* Protects rng read functions, data_avail, rng_buffer and rng_fillbuf */
  33. static DEFINE_MUTEX(reading_mutex);
  34. static int data_avail;
  35. static u8 *rng_buffer, *rng_fillbuf;
  36. static unsigned short current_quality;
  37. static unsigned short default_quality; /* = 0; default to "off" */
  38. module_param(current_quality, ushort, 0644);
  39. MODULE_PARM_DESC(current_quality,
  40. "current hwrng entropy estimation per mill");
  41. module_param(default_quality, ushort, 0644);
  42. MODULE_PARM_DESC(default_quality,
  43. "default entropy content of hwrng per mill");
  44. static void drop_current_rng(void);
  45. static int hwrng_init(struct hwrng *rng);
  46. static void start_khwrngd(void);
  47. static inline int rng_get_data(struct hwrng *rng, u8 *buffer, size_t size,
  48. int wait);
  49. static size_t rng_buffer_size(void)
  50. {
  51. return SMP_CACHE_BYTES < 32 ? 32 : SMP_CACHE_BYTES;
  52. }
  53. static void add_early_randomness(struct hwrng *rng)
  54. {
  55. int bytes_read;
  56. size_t size = min_t(size_t, 16, rng_buffer_size());
  57. mutex_lock(&reading_mutex);
  58. bytes_read = rng_get_data(rng, rng_buffer, size, 1);
  59. mutex_unlock(&reading_mutex);
  60. if (bytes_read > 0)
  61. add_device_randomness(rng_buffer, bytes_read);
  62. }
  63. static inline void cleanup_rng(struct kref *kref)
  64. {
  65. struct hwrng *rng = container_of(kref, struct hwrng, ref);
  66. if (rng->cleanup)
  67. rng->cleanup(rng);
  68. complete(&rng->cleanup_done);
  69. }
  70. static int set_current_rng(struct hwrng *rng)
  71. {
  72. int err;
  73. BUG_ON(!mutex_is_locked(&rng_mutex));
  74. err = hwrng_init(rng);
  75. if (err)
  76. return err;
  77. drop_current_rng();
  78. current_rng = rng;
  79. return 0;
  80. }
  81. static void drop_current_rng(void)
  82. {
  83. BUG_ON(!mutex_is_locked(&rng_mutex));
  84. if (!current_rng)
  85. return;
  86. /* decrease last reference for triggering the cleanup */
  87. kref_put(&current_rng->ref, cleanup_rng);
  88. current_rng = NULL;
  89. }
  90. /* Returns ERR_PTR(), NULL or refcounted hwrng */
  91. static struct hwrng *get_current_rng(void)
  92. {
  93. struct hwrng *rng;
  94. if (mutex_lock_interruptible(&rng_mutex))
  95. return ERR_PTR(-ERESTARTSYS);
  96. rng = current_rng;
  97. if (rng)
  98. kref_get(&rng->ref);
  99. mutex_unlock(&rng_mutex);
  100. return rng;
  101. }
  102. static void put_rng(struct hwrng *rng)
  103. {
  104. /*
  105. * Hold rng_mutex here so we serialize in case they set_current_rng
  106. * on rng again immediately.
  107. */
  108. mutex_lock(&rng_mutex);
  109. if (rng)
  110. kref_put(&rng->ref, cleanup_rng);
  111. mutex_unlock(&rng_mutex);
  112. }
  113. static int hwrng_init(struct hwrng *rng)
  114. {
  115. if (kref_get_unless_zero(&rng->ref))
  116. goto skip_init;
  117. if (rng->init) {
  118. int ret;
  119. ret = rng->init(rng);
  120. if (ret)
  121. return ret;
  122. }
  123. kref_init(&rng->ref);
  124. reinit_completion(&rng->cleanup_done);
  125. skip_init:
  126. add_early_randomness(rng);
  127. current_quality = rng->quality ? : default_quality;
  128. if (current_quality > 1024)
  129. current_quality = 1024;
  130. if (current_quality == 0 && hwrng_fill)
  131. kthread_stop(hwrng_fill);
  132. if (current_quality > 0 && !hwrng_fill)
  133. start_khwrngd();
  134. return 0;
  135. }
  136. static int rng_dev_open(struct inode *inode, struct file *filp)
  137. {
  138. /* enforce read-only access to this chrdev */
  139. if ((filp->f_mode & FMODE_READ) == 0)
  140. return -EINVAL;
  141. if (filp->f_mode & FMODE_WRITE)
  142. return -EINVAL;
  143. return 0;
  144. }
  145. static inline int rng_get_data(struct hwrng *rng, u8 *buffer, size_t size,
  146. int wait) {
  147. int present;
  148. BUG_ON(!mutex_is_locked(&reading_mutex));
  149. if (rng->read)
  150. return rng->read(rng, (void *)buffer, size, wait);
  151. if (rng->data_present)
  152. present = rng->data_present(rng, wait);
  153. else
  154. present = 1;
  155. if (present)
  156. return rng->data_read(rng, (u32 *)buffer);
  157. return 0;
  158. }
  159. static ssize_t rng_dev_read(struct file *filp, char __user *buf,
  160. size_t size, loff_t *offp)
  161. {
  162. ssize_t ret = 0;
  163. int err = 0;
  164. int bytes_read, len;
  165. struct hwrng *rng;
  166. while (size) {
  167. rng = get_current_rng();
  168. if (IS_ERR(rng)) {
  169. err = PTR_ERR(rng);
  170. goto out;
  171. }
  172. if (!rng) {
  173. err = -ENODEV;
  174. goto out;
  175. }
  176. if (mutex_lock_interruptible(&reading_mutex)) {
  177. err = -ERESTARTSYS;
  178. goto out_put;
  179. }
  180. if (!data_avail) {
  181. bytes_read = rng_get_data(rng, rng_buffer,
  182. rng_buffer_size(),
  183. !(filp->f_flags & O_NONBLOCK));
  184. if (bytes_read < 0) {
  185. err = bytes_read;
  186. goto out_unlock_reading;
  187. }
  188. data_avail = bytes_read;
  189. }
  190. if (!data_avail) {
  191. if (filp->f_flags & O_NONBLOCK) {
  192. err = -EAGAIN;
  193. goto out_unlock_reading;
  194. }
  195. } else {
  196. len = data_avail;
  197. if (len > size)
  198. len = size;
  199. data_avail -= len;
  200. if (copy_to_user(buf + ret, rng_buffer + data_avail,
  201. len)) {
  202. err = -EFAULT;
  203. goto out_unlock_reading;
  204. }
  205. size -= len;
  206. ret += len;
  207. }
  208. mutex_unlock(&reading_mutex);
  209. put_rng(rng);
  210. if (need_resched())
  211. schedule_timeout_interruptible(1);
  212. if (signal_pending(current)) {
  213. err = -ERESTARTSYS;
  214. goto out;
  215. }
  216. }
  217. out:
  218. return ret ? : err;
  219. out_unlock_reading:
  220. mutex_unlock(&reading_mutex);
  221. out_put:
  222. put_rng(rng);
  223. goto out;
  224. }
  225. static const struct file_operations rng_chrdev_ops = {
  226. .owner = THIS_MODULE,
  227. .open = rng_dev_open,
  228. .read = rng_dev_read,
  229. .llseek = noop_llseek,
  230. };
  231. static const struct attribute_group *rng_dev_groups[];
  232. static struct miscdevice rng_miscdev = {
  233. .minor = HWRNG_MINOR,
  234. .name = RNG_MODULE_NAME,
  235. .nodename = "hwrng",
  236. .fops = &rng_chrdev_ops,
  237. .groups = rng_dev_groups,
  238. };
  239. static ssize_t hwrng_attr_current_store(struct device *dev,
  240. struct device_attribute *attr,
  241. const char *buf, size_t len)
  242. {
  243. int err;
  244. struct hwrng *rng;
  245. err = mutex_lock_interruptible(&rng_mutex);
  246. if (err)
  247. return -ERESTARTSYS;
  248. err = -ENODEV;
  249. list_for_each_entry(rng, &rng_list, list) {
  250. if (sysfs_streq(rng->name, buf)) {
  251. err = 0;
  252. if (rng != current_rng)
  253. err = set_current_rng(rng);
  254. break;
  255. }
  256. }
  257. mutex_unlock(&rng_mutex);
  258. return err ? : len;
  259. }
  260. static ssize_t hwrng_attr_current_show(struct device *dev,
  261. struct device_attribute *attr,
  262. char *buf)
  263. {
  264. ssize_t ret;
  265. struct hwrng *rng;
  266. rng = get_current_rng();
  267. if (IS_ERR(rng))
  268. return PTR_ERR(rng);
  269. ret = snprintf(buf, PAGE_SIZE, "%s\n", rng ? rng->name : "none");
  270. put_rng(rng);
  271. return ret;
  272. }
  273. static ssize_t hwrng_attr_available_show(struct device *dev,
  274. struct device_attribute *attr,
  275. char *buf)
  276. {
  277. int err;
  278. struct hwrng *rng;
  279. err = mutex_lock_interruptible(&rng_mutex);
  280. if (err)
  281. return -ERESTARTSYS;
  282. buf[0] = '\0';
  283. list_for_each_entry(rng, &rng_list, list) {
  284. strlcat(buf, rng->name, PAGE_SIZE);
  285. strlcat(buf, " ", PAGE_SIZE);
  286. }
  287. strlcat(buf, "\n", PAGE_SIZE);
  288. mutex_unlock(&rng_mutex);
  289. return strlen(buf);
  290. }
  291. static DEVICE_ATTR(rng_current, S_IRUGO | S_IWUSR,
  292. hwrng_attr_current_show,
  293. hwrng_attr_current_store);
  294. static DEVICE_ATTR(rng_available, S_IRUGO,
  295. hwrng_attr_available_show,
  296. NULL);
  297. static struct attribute *rng_dev_attrs[] = {
  298. &dev_attr_rng_current.attr,
  299. &dev_attr_rng_available.attr,
  300. NULL
  301. };
  302. ATTRIBUTE_GROUPS(rng_dev);
  303. static void __exit unregister_miscdev(void)
  304. {
  305. misc_deregister(&rng_miscdev);
  306. }
  307. static int __init register_miscdev(void)
  308. {
  309. return misc_register(&rng_miscdev);
  310. }
  311. static int hwrng_fillfn(void *unused)
  312. {
  313. long rc;
  314. while (!kthread_should_stop()) {
  315. struct hwrng *rng;
  316. rng = get_current_rng();
  317. if (IS_ERR(rng) || !rng)
  318. break;
  319. mutex_lock(&reading_mutex);
  320. rc = rng_get_data(rng, rng_fillbuf,
  321. rng_buffer_size(), 1);
  322. mutex_unlock(&reading_mutex);
  323. put_rng(rng);
  324. if (rc <= 0) {
  325. pr_warn("hwrng: no data available\n");
  326. msleep_interruptible(10000);
  327. continue;
  328. }
  329. /* Outside lock, sure, but y'know: randomness. */
  330. add_hwgenerator_randomness((void *)rng_fillbuf, rc,
  331. rc * current_quality * 8 >> 10);
  332. }
  333. hwrng_fill = NULL;
  334. return 0;
  335. }
  336. static void start_khwrngd(void)
  337. {
  338. hwrng_fill = kthread_run(hwrng_fillfn, NULL, "hwrng");
  339. if (IS_ERR(hwrng_fill)) {
  340. pr_err("hwrng_fill thread creation failed");
  341. hwrng_fill = NULL;
  342. }
  343. }
  344. int hwrng_register(struct hwrng *rng)
  345. {
  346. int err = -EINVAL;
  347. struct hwrng *old_rng, *tmp;
  348. if (!rng->name || (!rng->data_read && !rng->read))
  349. goto out;
  350. mutex_lock(&rng_mutex);
  351. /* Must not register two RNGs with the same name. */
  352. err = -EEXIST;
  353. list_for_each_entry(tmp, &rng_list, list) {
  354. if (strcmp(tmp->name, rng->name) == 0)
  355. goto out_unlock;
  356. }
  357. init_completion(&rng->cleanup_done);
  358. complete(&rng->cleanup_done);
  359. old_rng = current_rng;
  360. err = 0;
  361. if (!old_rng) {
  362. err = set_current_rng(rng);
  363. if (err)
  364. goto out_unlock;
  365. }
  366. list_add_tail(&rng->list, &rng_list);
  367. if (old_rng && !rng->init) {
  368. /*
  369. * Use a new device's input to add some randomness to
  370. * the system. If this rng device isn't going to be
  371. * used right away, its init function hasn't been
  372. * called yet; so only use the randomness from devices
  373. * that don't need an init callback.
  374. */
  375. add_early_randomness(rng);
  376. }
  377. out_unlock:
  378. mutex_unlock(&rng_mutex);
  379. out:
  380. return err;
  381. }
  382. EXPORT_SYMBOL_GPL(hwrng_register);
  383. void hwrng_unregister(struct hwrng *rng)
  384. {
  385. mutex_lock(&rng_mutex);
  386. list_del(&rng->list);
  387. if (current_rng == rng) {
  388. drop_current_rng();
  389. if (!list_empty(&rng_list)) {
  390. struct hwrng *tail;
  391. tail = list_entry(rng_list.prev, struct hwrng, list);
  392. set_current_rng(tail);
  393. }
  394. }
  395. if (list_empty(&rng_list)) {
  396. mutex_unlock(&rng_mutex);
  397. if (hwrng_fill)
  398. kthread_stop(hwrng_fill);
  399. } else
  400. mutex_unlock(&rng_mutex);
  401. wait_for_completion(&rng->cleanup_done);
  402. }
  403. EXPORT_SYMBOL_GPL(hwrng_unregister);
  404. static void devm_hwrng_release(struct device *dev, void *res)
  405. {
  406. hwrng_unregister(*(struct hwrng **)res);
  407. }
  408. static int devm_hwrng_match(struct device *dev, void *res, void *data)
  409. {
  410. struct hwrng **r = res;
  411. if (WARN_ON(!r || !*r))
  412. return 0;
  413. return *r == data;
  414. }
  415. int devm_hwrng_register(struct device *dev, struct hwrng *rng)
  416. {
  417. struct hwrng **ptr;
  418. int error;
  419. ptr = devres_alloc(devm_hwrng_release, sizeof(*ptr), GFP_KERNEL);
  420. if (!ptr)
  421. return -ENOMEM;
  422. error = hwrng_register(rng);
  423. if (error) {
  424. devres_free(ptr);
  425. return error;
  426. }
  427. *ptr = rng;
  428. devres_add(dev, ptr);
  429. return 0;
  430. }
  431. EXPORT_SYMBOL_GPL(devm_hwrng_register);
  432. void devm_hwrng_unregister(struct device *dev, struct hwrng *rng)
  433. {
  434. devres_release(dev, devm_hwrng_release, devm_hwrng_match, rng);
  435. }
  436. EXPORT_SYMBOL_GPL(devm_hwrng_unregister);
  437. static int __init hwrng_modinit(void)
  438. {
  439. int ret = -ENOMEM;
  440. /* kmalloc makes this safe for virt_to_page() in virtio_rng.c */
  441. rng_buffer = kmalloc(rng_buffer_size(), GFP_KERNEL);
  442. if (!rng_buffer)
  443. return -ENOMEM;
  444. rng_fillbuf = kmalloc(rng_buffer_size(), GFP_KERNEL);
  445. if (!rng_fillbuf) {
  446. kfree(rng_buffer);
  447. return -ENOMEM;
  448. }
  449. ret = register_miscdev();
  450. if (ret) {
  451. kfree(rng_fillbuf);
  452. kfree(rng_buffer);
  453. }
  454. return ret;
  455. }
  456. static void __exit hwrng_modexit(void)
  457. {
  458. mutex_lock(&rng_mutex);
  459. BUG_ON(current_rng);
  460. kfree(rng_buffer);
  461. kfree(rng_fillbuf);
  462. mutex_unlock(&rng_mutex);
  463. unregister_miscdev();
  464. }
  465. module_init(hwrng_modinit);
  466. module_exit(hwrng_modexit);
  467. MODULE_DESCRIPTION("H/W Random Number Generator (RNG) driver");
  468. MODULE_LICENSE("GPL");