core.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600
  1. /*
  2. Added support for the AMD Geode LX RNG
  3. (c) Copyright 2004-2005 Advanced Micro Devices, Inc.
  4. derived from
  5. Hardware driver for the Intel/AMD/VIA Random Number Generators (RNG)
  6. (c) Copyright 2003 Red Hat Inc <jgarzik@redhat.com>
  7. derived from
  8. Hardware driver for the AMD 768 Random Number Generator (RNG)
  9. (c) Copyright 2001 Red Hat Inc <alan@redhat.com>
  10. derived from
  11. Hardware driver for Intel i810 Random Number Generator (RNG)
  12. Copyright 2000,2001 Jeff Garzik <jgarzik@pobox.com>
  13. Copyright 2000,2001 Philipp Rumpf <prumpf@mandrakesoft.com>
  14. Added generic RNG API
  15. Copyright 2006 Michael Buesch <m@bues.ch>
  16. Copyright 2005 (c) MontaVista Software, Inc.
  17. Please read Documentation/hw_random.txt for details on use.
  18. ----------------------------------------------------------
  19. This software may be used and distributed according to the terms
  20. of the GNU General Public License, incorporated herein by reference.
  21. */
  22. #include <linux/device.h>
  23. #include <linux/hw_random.h>
  24. #include <linux/module.h>
  25. #include <linux/kernel.h>
  26. #include <linux/fs.h>
  27. #include <linux/sched.h>
  28. #include <linux/miscdevice.h>
  29. #include <linux/kthread.h>
  30. #include <linux/delay.h>
  31. #include <linux/slab.h>
  32. #include <linux/random.h>
  33. #include <linux/err.h>
  34. #include <linux/uaccess.h>
  35. #define RNG_MODULE_NAME "hw_random"
  36. #define PFX RNG_MODULE_NAME ": "
  37. #define RNG_MISCDEV_MINOR 183 /* official */
  38. static struct hwrng *current_rng;
  39. static struct task_struct *hwrng_fill;
  40. static LIST_HEAD(rng_list);
  41. /* Protects rng_list and current_rng */
  42. static DEFINE_MUTEX(rng_mutex);
  43. /* Protects rng read functions, data_avail, rng_buffer and rng_fillbuf */
  44. static DEFINE_MUTEX(reading_mutex);
  45. static int data_avail;
  46. static u8 *rng_buffer, *rng_fillbuf;
  47. static unsigned short current_quality;
  48. static unsigned short default_quality; /* = 0; default to "off" */
  49. module_param(current_quality, ushort, 0644);
  50. MODULE_PARM_DESC(current_quality,
  51. "current hwrng entropy estimation per mill");
  52. module_param(default_quality, ushort, 0644);
  53. MODULE_PARM_DESC(default_quality,
  54. "default entropy content of hwrng per mill");
  55. static void drop_current_rng(void);
  56. static int hwrng_init(struct hwrng *rng);
  57. static void start_khwrngd(void);
  58. static inline int rng_get_data(struct hwrng *rng, u8 *buffer, size_t size,
  59. int wait);
  60. static size_t rng_buffer_size(void)
  61. {
  62. return SMP_CACHE_BYTES < 32 ? 32 : SMP_CACHE_BYTES;
  63. }
  64. static void add_early_randomness(struct hwrng *rng)
  65. {
  66. int bytes_read;
  67. size_t size = min_t(size_t, 16, rng_buffer_size());
  68. mutex_lock(&reading_mutex);
  69. bytes_read = rng_get_data(rng, rng_buffer, size, 1);
  70. mutex_unlock(&reading_mutex);
  71. if (bytes_read > 0)
  72. add_device_randomness(rng_buffer, bytes_read);
  73. memset(rng_buffer, 0, size);
  74. }
  75. static inline void cleanup_rng(struct kref *kref)
  76. {
  77. struct hwrng *rng = container_of(kref, struct hwrng, ref);
  78. if (rng->cleanup)
  79. rng->cleanup(rng);
  80. complete(&rng->cleanup_done);
  81. }
  82. static int set_current_rng(struct hwrng *rng)
  83. {
  84. int err;
  85. BUG_ON(!mutex_is_locked(&rng_mutex));
  86. err = hwrng_init(rng);
  87. if (err)
  88. return err;
  89. drop_current_rng();
  90. current_rng = rng;
  91. return 0;
  92. }
  93. static void drop_current_rng(void)
  94. {
  95. BUG_ON(!mutex_is_locked(&rng_mutex));
  96. if (!current_rng)
  97. return;
  98. /* decrease last reference for triggering the cleanup */
  99. kref_put(&current_rng->ref, cleanup_rng);
  100. current_rng = NULL;
  101. }
  102. /* Returns ERR_PTR(), NULL or refcounted hwrng */
  103. static struct hwrng *get_current_rng(void)
  104. {
  105. struct hwrng *rng;
  106. if (mutex_lock_interruptible(&rng_mutex))
  107. return ERR_PTR(-ERESTARTSYS);
  108. rng = current_rng;
  109. if (rng)
  110. kref_get(&rng->ref);
  111. mutex_unlock(&rng_mutex);
  112. return rng;
  113. }
  114. static void put_rng(struct hwrng *rng)
  115. {
  116. /*
  117. * Hold rng_mutex here so we serialize in case they set_current_rng
  118. * on rng again immediately.
  119. */
  120. mutex_lock(&rng_mutex);
  121. if (rng)
  122. kref_put(&rng->ref, cleanup_rng);
  123. mutex_unlock(&rng_mutex);
  124. }
  125. static int hwrng_init(struct hwrng *rng)
  126. {
  127. if (kref_get_unless_zero(&rng->ref))
  128. goto skip_init;
  129. if (rng->init) {
  130. int ret;
  131. ret = rng->init(rng);
  132. if (ret)
  133. return ret;
  134. }
  135. kref_init(&rng->ref);
  136. reinit_completion(&rng->cleanup_done);
  137. skip_init:
  138. add_early_randomness(rng);
  139. current_quality = rng->quality ? : default_quality;
  140. if (current_quality > 1024)
  141. current_quality = 1024;
  142. if (current_quality == 0 && hwrng_fill)
  143. kthread_stop(hwrng_fill);
  144. if (current_quality > 0 && !hwrng_fill)
  145. start_khwrngd();
  146. return 0;
  147. }
  148. static int rng_dev_open(struct inode *inode, struct file *filp)
  149. {
  150. /* enforce read-only access to this chrdev */
  151. if ((filp->f_mode & FMODE_READ) == 0)
  152. return -EINVAL;
  153. if (filp->f_mode & FMODE_WRITE)
  154. return -EINVAL;
  155. return 0;
  156. }
  157. static inline int rng_get_data(struct hwrng *rng, u8 *buffer, size_t size,
  158. int wait) {
  159. int present;
  160. BUG_ON(!mutex_is_locked(&reading_mutex));
  161. if (rng->read)
  162. return rng->read(rng, (void *)buffer, size, wait);
  163. if (rng->data_present)
  164. present = rng->data_present(rng, wait);
  165. else
  166. present = 1;
  167. if (present)
  168. return rng->data_read(rng, (u32 *)buffer);
  169. return 0;
  170. }
  171. static ssize_t rng_dev_read(struct file *filp, char __user *buf,
  172. size_t size, loff_t *offp)
  173. {
  174. ssize_t ret = 0;
  175. int err = 0;
  176. int bytes_read, len;
  177. struct hwrng *rng;
  178. while (size) {
  179. rng = get_current_rng();
  180. if (IS_ERR(rng)) {
  181. err = PTR_ERR(rng);
  182. goto out;
  183. }
  184. if (!rng) {
  185. err = -ENODEV;
  186. goto out;
  187. }
  188. if (mutex_lock_interruptible(&reading_mutex)) {
  189. err = -ERESTARTSYS;
  190. goto out_put;
  191. }
  192. if (!data_avail) {
  193. bytes_read = rng_get_data(rng, rng_buffer,
  194. rng_buffer_size(),
  195. !(filp->f_flags & O_NONBLOCK));
  196. if (bytes_read < 0) {
  197. err = bytes_read;
  198. goto out_unlock_reading;
  199. }
  200. data_avail = bytes_read;
  201. }
  202. if (!data_avail) {
  203. if (filp->f_flags & O_NONBLOCK) {
  204. err = -EAGAIN;
  205. goto out_unlock_reading;
  206. }
  207. } else {
  208. len = data_avail;
  209. if (len > size)
  210. len = size;
  211. data_avail -= len;
  212. if (copy_to_user(buf + ret, rng_buffer + data_avail,
  213. len)) {
  214. err = -EFAULT;
  215. goto out_unlock_reading;
  216. }
  217. size -= len;
  218. ret += len;
  219. }
  220. mutex_unlock(&reading_mutex);
  221. put_rng(rng);
  222. if (need_resched())
  223. schedule_timeout_interruptible(1);
  224. if (signal_pending(current)) {
  225. err = -ERESTARTSYS;
  226. goto out;
  227. }
  228. }
  229. out:
  230. memset(rng_buffer, 0, rng_buffer_size());
  231. return ret ? : err;
  232. out_unlock_reading:
  233. mutex_unlock(&reading_mutex);
  234. out_put:
  235. put_rng(rng);
  236. goto out;
  237. }
  238. static const struct file_operations rng_chrdev_ops = {
  239. .owner = THIS_MODULE,
  240. .open = rng_dev_open,
  241. .read = rng_dev_read,
  242. .llseek = noop_llseek,
  243. };
  244. static const struct attribute_group *rng_dev_groups[];
  245. static struct miscdevice rng_miscdev = {
  246. .minor = RNG_MISCDEV_MINOR,
  247. .name = RNG_MODULE_NAME,
  248. .nodename = "hwrng",
  249. .fops = &rng_chrdev_ops,
  250. .groups = rng_dev_groups,
  251. };
  252. static ssize_t hwrng_attr_current_store(struct device *dev,
  253. struct device_attribute *attr,
  254. const char *buf, size_t len)
  255. {
  256. int err;
  257. struct hwrng *rng;
  258. err = mutex_lock_interruptible(&rng_mutex);
  259. if (err)
  260. return -ERESTARTSYS;
  261. err = -ENODEV;
  262. list_for_each_entry(rng, &rng_list, list) {
  263. if (sysfs_streq(rng->name, buf)) {
  264. err = 0;
  265. if (rng != current_rng)
  266. err = set_current_rng(rng);
  267. break;
  268. }
  269. }
  270. mutex_unlock(&rng_mutex);
  271. return err ? : len;
  272. }
  273. static ssize_t hwrng_attr_current_show(struct device *dev,
  274. struct device_attribute *attr,
  275. char *buf)
  276. {
  277. ssize_t ret;
  278. struct hwrng *rng;
  279. rng = get_current_rng();
  280. if (IS_ERR(rng))
  281. return PTR_ERR(rng);
  282. ret = snprintf(buf, PAGE_SIZE, "%s\n", rng ? rng->name : "none");
  283. put_rng(rng);
  284. return ret;
  285. }
  286. static ssize_t hwrng_attr_available_show(struct device *dev,
  287. struct device_attribute *attr,
  288. char *buf)
  289. {
  290. int err;
  291. struct hwrng *rng;
  292. err = mutex_lock_interruptible(&rng_mutex);
  293. if (err)
  294. return -ERESTARTSYS;
  295. buf[0] = '\0';
  296. list_for_each_entry(rng, &rng_list, list) {
  297. strlcat(buf, rng->name, PAGE_SIZE);
  298. strlcat(buf, " ", PAGE_SIZE);
  299. }
  300. strlcat(buf, "\n", PAGE_SIZE);
  301. mutex_unlock(&rng_mutex);
  302. return strlen(buf);
  303. }
  304. static DEVICE_ATTR(rng_current, S_IRUGO | S_IWUSR,
  305. hwrng_attr_current_show,
  306. hwrng_attr_current_store);
  307. static DEVICE_ATTR(rng_available, S_IRUGO,
  308. hwrng_attr_available_show,
  309. NULL);
  310. static struct attribute *rng_dev_attrs[] = {
  311. &dev_attr_rng_current.attr,
  312. &dev_attr_rng_available.attr,
  313. NULL
  314. };
  315. ATTRIBUTE_GROUPS(rng_dev);
  316. static void __exit unregister_miscdev(void)
  317. {
  318. misc_deregister(&rng_miscdev);
  319. }
  320. static int __init register_miscdev(void)
  321. {
  322. return misc_register(&rng_miscdev);
  323. }
  324. static int hwrng_fillfn(void *unused)
  325. {
  326. long rc;
  327. while (!kthread_should_stop()) {
  328. struct hwrng *rng;
  329. rng = get_current_rng();
  330. if (IS_ERR(rng) || !rng)
  331. break;
  332. mutex_lock(&reading_mutex);
  333. rc = rng_get_data(rng, rng_fillbuf,
  334. rng_buffer_size(), 1);
  335. mutex_unlock(&reading_mutex);
  336. put_rng(rng);
  337. if (rc <= 0) {
  338. pr_warn("hwrng: no data available\n");
  339. msleep_interruptible(10000);
  340. continue;
  341. }
  342. /* Outside lock, sure, but y'know: randomness. */
  343. add_hwgenerator_randomness((void *)rng_fillbuf, rc,
  344. rc * current_quality * 8 >> 10);
  345. memset(rng_fillbuf, 0, rng_buffer_size());
  346. }
  347. hwrng_fill = NULL;
  348. return 0;
  349. }
  350. static void start_khwrngd(void)
  351. {
  352. hwrng_fill = kthread_run(hwrng_fillfn, NULL, "hwrng");
  353. if (IS_ERR(hwrng_fill)) {
  354. pr_err("hwrng_fill thread creation failed");
  355. hwrng_fill = NULL;
  356. }
  357. }
  358. int hwrng_register(struct hwrng *rng)
  359. {
  360. int err = -EINVAL;
  361. struct hwrng *old_rng, *tmp;
  362. if (rng->name == NULL ||
  363. (rng->data_read == NULL && rng->read == NULL))
  364. goto out;
  365. mutex_lock(&rng_mutex);
  366. /* Must not register two RNGs with the same name. */
  367. err = -EEXIST;
  368. list_for_each_entry(tmp, &rng_list, list) {
  369. if (strcmp(tmp->name, rng->name) == 0)
  370. goto out_unlock;
  371. }
  372. init_completion(&rng->cleanup_done);
  373. complete(&rng->cleanup_done);
  374. old_rng = current_rng;
  375. err = 0;
  376. if (!old_rng) {
  377. err = set_current_rng(rng);
  378. if (err)
  379. goto out_unlock;
  380. }
  381. list_add_tail(&rng->list, &rng_list);
  382. if (old_rng && !rng->init) {
  383. /*
  384. * Use a new device's input to add some randomness to
  385. * the system. If this rng device isn't going to be
  386. * used right away, its init function hasn't been
  387. * called yet; so only use the randomness from devices
  388. * that don't need an init callback.
  389. */
  390. add_early_randomness(rng);
  391. }
  392. out_unlock:
  393. mutex_unlock(&rng_mutex);
  394. out:
  395. return err;
  396. }
  397. EXPORT_SYMBOL_GPL(hwrng_register);
  398. void hwrng_unregister(struct hwrng *rng)
  399. {
  400. mutex_lock(&rng_mutex);
  401. list_del(&rng->list);
  402. if (current_rng == rng) {
  403. drop_current_rng();
  404. if (!list_empty(&rng_list)) {
  405. struct hwrng *tail;
  406. tail = list_entry(rng_list.prev, struct hwrng, list);
  407. set_current_rng(tail);
  408. }
  409. }
  410. if (list_empty(&rng_list)) {
  411. mutex_unlock(&rng_mutex);
  412. if (hwrng_fill)
  413. kthread_stop(hwrng_fill);
  414. } else
  415. mutex_unlock(&rng_mutex);
  416. wait_for_completion(&rng->cleanup_done);
  417. }
  418. EXPORT_SYMBOL_GPL(hwrng_unregister);
  419. static void devm_hwrng_release(struct device *dev, void *res)
  420. {
  421. hwrng_unregister(*(struct hwrng **)res);
  422. }
  423. static int devm_hwrng_match(struct device *dev, void *res, void *data)
  424. {
  425. struct hwrng **r = res;
  426. if (WARN_ON(!r || !*r))
  427. return 0;
  428. return *r == data;
  429. }
  430. int devm_hwrng_register(struct device *dev, struct hwrng *rng)
  431. {
  432. struct hwrng **ptr;
  433. int error;
  434. ptr = devres_alloc(devm_hwrng_release, sizeof(*ptr), GFP_KERNEL);
  435. if (!ptr)
  436. return -ENOMEM;
  437. error = hwrng_register(rng);
  438. if (error) {
  439. devres_free(ptr);
  440. return error;
  441. }
  442. *ptr = rng;
  443. devres_add(dev, ptr);
  444. return 0;
  445. }
  446. EXPORT_SYMBOL_GPL(devm_hwrng_register);
  447. void devm_hwrng_unregister(struct device *dev, struct hwrng *rng)
  448. {
  449. devres_release(dev, devm_hwrng_release, devm_hwrng_match, rng);
  450. }
  451. EXPORT_SYMBOL_GPL(devm_hwrng_unregister);
  452. static int __init hwrng_modinit(void)
  453. {
  454. int ret = -ENOMEM;
  455. /* kmalloc makes this safe for virt_to_page() in virtio_rng.c */
  456. rng_buffer = kmalloc(rng_buffer_size(), GFP_KERNEL);
  457. if (!rng_buffer)
  458. return -ENOMEM;
  459. rng_fillbuf = kmalloc(rng_buffer_size(), GFP_KERNEL);
  460. if (!rng_fillbuf) {
  461. kfree(rng_buffer);
  462. return -ENOMEM;
  463. }
  464. ret = register_miscdev();
  465. if (ret) {
  466. kfree(rng_fillbuf);
  467. kfree(rng_buffer);
  468. }
  469. return ret;
  470. }
  471. static void __exit hwrng_modexit(void)
  472. {
  473. mutex_lock(&rng_mutex);
  474. BUG_ON(current_rng);
  475. kfree(rng_buffer);
  476. kfree(rng_fillbuf);
  477. mutex_unlock(&rng_mutex);
  478. unregister_miscdev();
  479. }
  480. module_init(hwrng_modinit);
  481. module_exit(hwrng_modexit);
  482. MODULE_DESCRIPTION("H/W Random Number Generator (RNG) driver");
  483. MODULE_LICENSE("GPL");