fallback.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667
  1. // SPDX-License-Identifier: GPL-2.0
  2. #include <linux/types.h>
  3. #include <linux/kconfig.h>
  4. #include <linux/list.h>
  5. #include <linux/slab.h>
  6. #include <linux/security.h>
  7. #include <linux/highmem.h>
  8. #include <linux/umh.h>
  9. #include <linux/sysctl.h>
  10. #include "fallback.h"
  11. #include "firmware.h"
  12. /*
  13. * firmware fallback mechanism
  14. */
  15. extern struct firmware_fallback_config fw_fallback_config;
  16. /* These getters are vetted to use int properly */
  17. static inline int __firmware_loading_timeout(void)
  18. {
  19. return fw_fallback_config.loading_timeout;
  20. }
  21. /* These setters are vetted to use int properly */
  22. static void __fw_fallback_set_timeout(int timeout)
  23. {
  24. fw_fallback_config.loading_timeout = timeout;
  25. }
  26. /*
  27. * use small loading timeout for caching devices' firmware because all these
  28. * firmware images have been loaded successfully at lease once, also system is
  29. * ready for completing firmware loading now. The maximum size of firmware in
  30. * current distributions is about 2M bytes, so 10 secs should be enough.
  31. */
  32. void fw_fallback_set_cache_timeout(void)
  33. {
  34. fw_fallback_config.old_timeout = __firmware_loading_timeout();
  35. __fw_fallback_set_timeout(10);
  36. }
  37. /* Restores the timeout to the value last configured during normal operation */
  38. void fw_fallback_set_default_timeout(void)
  39. {
  40. __fw_fallback_set_timeout(fw_fallback_config.old_timeout);
  41. }
  42. static long firmware_loading_timeout(void)
  43. {
  44. return __firmware_loading_timeout() > 0 ?
  45. __firmware_loading_timeout() * HZ : MAX_JIFFY_OFFSET;
  46. }
  47. static inline bool fw_sysfs_done(struct fw_priv *fw_priv)
  48. {
  49. return __fw_state_check(fw_priv, FW_STATUS_DONE);
  50. }
  51. static inline bool fw_sysfs_loading(struct fw_priv *fw_priv)
  52. {
  53. return __fw_state_check(fw_priv, FW_STATUS_LOADING);
  54. }
  55. static inline int fw_sysfs_wait_timeout(struct fw_priv *fw_priv, long timeout)
  56. {
  57. return __fw_state_wait_common(fw_priv, timeout);
  58. }
  59. struct fw_sysfs {
  60. bool nowait;
  61. struct device dev;
  62. struct fw_priv *fw_priv;
  63. struct firmware *fw;
  64. };
  65. static struct fw_sysfs *to_fw_sysfs(struct device *dev)
  66. {
  67. return container_of(dev, struct fw_sysfs, dev);
  68. }
  69. static void __fw_load_abort(struct fw_priv *fw_priv)
  70. {
  71. /*
  72. * There is a small window in which user can write to 'loading'
  73. * between loading done and disappearance of 'loading'
  74. */
  75. if (fw_sysfs_done(fw_priv))
  76. return;
  77. list_del_init(&fw_priv->pending_list);
  78. fw_state_aborted(fw_priv);
  79. }
  80. static void fw_load_abort(struct fw_sysfs *fw_sysfs)
  81. {
  82. struct fw_priv *fw_priv = fw_sysfs->fw_priv;
  83. __fw_load_abort(fw_priv);
  84. }
  85. static LIST_HEAD(pending_fw_head);
  86. void kill_pending_fw_fallback_reqs(bool only_kill_custom)
  87. {
  88. struct fw_priv *fw_priv;
  89. struct fw_priv *next;
  90. mutex_lock(&fw_lock);
  91. list_for_each_entry_safe(fw_priv, next, &pending_fw_head,
  92. pending_list) {
  93. if (!fw_priv->need_uevent || !only_kill_custom)
  94. __fw_load_abort(fw_priv);
  95. }
  96. mutex_unlock(&fw_lock);
  97. }
  98. static ssize_t timeout_show(struct class *class, struct class_attribute *attr,
  99. char *buf)
  100. {
  101. return sprintf(buf, "%d\n", __firmware_loading_timeout());
  102. }
  103. /**
  104. * firmware_timeout_store - set number of seconds to wait for firmware
  105. * @class: device class pointer
  106. * @attr: device attribute pointer
  107. * @buf: buffer to scan for timeout value
  108. * @count: number of bytes in @buf
  109. *
  110. * Sets the number of seconds to wait for the firmware. Once
  111. * this expires an error will be returned to the driver and no
  112. * firmware will be provided.
  113. *
  114. * Note: zero means 'wait forever'.
  115. **/
  116. static ssize_t timeout_store(struct class *class, struct class_attribute *attr,
  117. const char *buf, size_t count)
  118. {
  119. int tmp_loading_timeout = simple_strtol(buf, NULL, 10);
  120. if (tmp_loading_timeout < 0)
  121. tmp_loading_timeout = 0;
  122. __fw_fallback_set_timeout(tmp_loading_timeout);
  123. return count;
  124. }
  125. static CLASS_ATTR_RW(timeout);
  126. static struct attribute *firmware_class_attrs[] = {
  127. &class_attr_timeout.attr,
  128. NULL,
  129. };
  130. ATTRIBUTE_GROUPS(firmware_class);
  131. static void fw_dev_release(struct device *dev)
  132. {
  133. struct fw_sysfs *fw_sysfs = to_fw_sysfs(dev);
  134. kfree(fw_sysfs);
  135. }
  136. static int do_firmware_uevent(struct fw_sysfs *fw_sysfs, struct kobj_uevent_env *env)
  137. {
  138. if (add_uevent_var(env, "FIRMWARE=%s", fw_sysfs->fw_priv->fw_name))
  139. return -ENOMEM;
  140. if (add_uevent_var(env, "TIMEOUT=%i", __firmware_loading_timeout()))
  141. return -ENOMEM;
  142. if (add_uevent_var(env, "ASYNC=%d", fw_sysfs->nowait))
  143. return -ENOMEM;
  144. return 0;
  145. }
  146. static int firmware_uevent(struct device *dev, struct kobj_uevent_env *env)
  147. {
  148. struct fw_sysfs *fw_sysfs = to_fw_sysfs(dev);
  149. int err = 0;
  150. mutex_lock(&fw_lock);
  151. if (fw_sysfs->fw_priv)
  152. err = do_firmware_uevent(fw_sysfs, env);
  153. mutex_unlock(&fw_lock);
  154. return err;
  155. }
  156. static struct class firmware_class = {
  157. .name = "firmware",
  158. .class_groups = firmware_class_groups,
  159. .dev_uevent = firmware_uevent,
  160. .dev_release = fw_dev_release,
  161. };
  162. int register_sysfs_loader(void)
  163. {
  164. return class_register(&firmware_class);
  165. }
  166. void unregister_sysfs_loader(void)
  167. {
  168. class_unregister(&firmware_class);
  169. }
  170. static ssize_t firmware_loading_show(struct device *dev,
  171. struct device_attribute *attr, char *buf)
  172. {
  173. struct fw_sysfs *fw_sysfs = to_fw_sysfs(dev);
  174. int loading = 0;
  175. mutex_lock(&fw_lock);
  176. if (fw_sysfs->fw_priv)
  177. loading = fw_sysfs_loading(fw_sysfs->fw_priv);
  178. mutex_unlock(&fw_lock);
  179. return sprintf(buf, "%d\n", loading);
  180. }
  181. /* Some architectures don't have PAGE_KERNEL_RO */
  182. #ifndef PAGE_KERNEL_RO
  183. #define PAGE_KERNEL_RO PAGE_KERNEL
  184. #endif
  185. /* one pages buffer should be mapped/unmapped only once */
  186. static int map_fw_priv_pages(struct fw_priv *fw_priv)
  187. {
  188. if (!fw_priv->is_paged_buf)
  189. return 0;
  190. vunmap(fw_priv->data);
  191. fw_priv->data = vmap(fw_priv->pages, fw_priv->nr_pages, 0,
  192. PAGE_KERNEL_RO);
  193. if (!fw_priv->data)
  194. return -ENOMEM;
  195. return 0;
  196. }
  197. /**
  198. * firmware_loading_store - set value in the 'loading' control file
  199. * @dev: device pointer
  200. * @attr: device attribute pointer
  201. * @buf: buffer to scan for loading control value
  202. * @count: number of bytes in @buf
  203. *
  204. * The relevant values are:
  205. *
  206. * 1: Start a load, discarding any previous partial load.
  207. * 0: Conclude the load and hand the data to the driver code.
  208. * -1: Conclude the load with an error and discard any written data.
  209. **/
  210. static ssize_t firmware_loading_store(struct device *dev,
  211. struct device_attribute *attr,
  212. const char *buf, size_t count)
  213. {
  214. struct fw_sysfs *fw_sysfs = to_fw_sysfs(dev);
  215. struct fw_priv *fw_priv;
  216. ssize_t written = count;
  217. int loading = simple_strtol(buf, NULL, 10);
  218. int i;
  219. mutex_lock(&fw_lock);
  220. fw_priv = fw_sysfs->fw_priv;
  221. if (fw_state_is_aborted(fw_priv))
  222. goto out;
  223. switch (loading) {
  224. case 1:
  225. /* discarding any previous partial load */
  226. if (!fw_sysfs_done(fw_priv)) {
  227. for (i = 0; i < fw_priv->nr_pages; i++)
  228. __free_page(fw_priv->pages[i]);
  229. vfree(fw_priv->pages);
  230. fw_priv->pages = NULL;
  231. fw_priv->page_array_size = 0;
  232. fw_priv->nr_pages = 0;
  233. fw_state_start(fw_priv);
  234. }
  235. break;
  236. case 0:
  237. if (fw_sysfs_loading(fw_priv)) {
  238. int rc;
  239. /*
  240. * Several loading requests may be pending on
  241. * one same firmware buf, so let all requests
  242. * see the mapped 'buf->data' once the loading
  243. * is completed.
  244. * */
  245. rc = map_fw_priv_pages(fw_priv);
  246. if (rc)
  247. dev_err(dev, "%s: map pages failed\n",
  248. __func__);
  249. else
  250. rc = security_kernel_post_read_file(NULL,
  251. fw_priv->data, fw_priv->size,
  252. READING_FIRMWARE);
  253. /*
  254. * Same logic as fw_load_abort, only the DONE bit
  255. * is ignored and we set ABORT only on failure.
  256. */
  257. list_del_init(&fw_priv->pending_list);
  258. if (rc) {
  259. fw_state_aborted(fw_priv);
  260. written = rc;
  261. } else {
  262. fw_state_done(fw_priv);
  263. }
  264. break;
  265. }
  266. /* fallthrough */
  267. default:
  268. dev_err(dev, "%s: unexpected value (%d)\n", __func__, loading);
  269. /* fallthrough */
  270. case -1:
  271. fw_load_abort(fw_sysfs);
  272. break;
  273. }
  274. out:
  275. mutex_unlock(&fw_lock);
  276. return written;
  277. }
  278. static DEVICE_ATTR(loading, 0644, firmware_loading_show, firmware_loading_store);
  279. static void firmware_rw_data(struct fw_priv *fw_priv, char *buffer,
  280. loff_t offset, size_t count, bool read)
  281. {
  282. if (read)
  283. memcpy(buffer, fw_priv->data + offset, count);
  284. else
  285. memcpy(fw_priv->data + offset, buffer, count);
  286. }
  287. static void firmware_rw(struct fw_priv *fw_priv, char *buffer,
  288. loff_t offset, size_t count, bool read)
  289. {
  290. while (count) {
  291. void *page_data;
  292. int page_nr = offset >> PAGE_SHIFT;
  293. int page_ofs = offset & (PAGE_SIZE-1);
  294. int page_cnt = min_t(size_t, PAGE_SIZE - page_ofs, count);
  295. page_data = kmap(fw_priv->pages[page_nr]);
  296. if (read)
  297. memcpy(buffer, page_data + page_ofs, page_cnt);
  298. else
  299. memcpy(page_data + page_ofs, buffer, page_cnt);
  300. kunmap(fw_priv->pages[page_nr]);
  301. buffer += page_cnt;
  302. offset += page_cnt;
  303. count -= page_cnt;
  304. }
  305. }
  306. static ssize_t firmware_data_read(struct file *filp, struct kobject *kobj,
  307. struct bin_attribute *bin_attr,
  308. char *buffer, loff_t offset, size_t count)
  309. {
  310. struct device *dev = kobj_to_dev(kobj);
  311. struct fw_sysfs *fw_sysfs = to_fw_sysfs(dev);
  312. struct fw_priv *fw_priv;
  313. ssize_t ret_count;
  314. mutex_lock(&fw_lock);
  315. fw_priv = fw_sysfs->fw_priv;
  316. if (!fw_priv || fw_sysfs_done(fw_priv)) {
  317. ret_count = -ENODEV;
  318. goto out;
  319. }
  320. if (offset > fw_priv->size) {
  321. ret_count = 0;
  322. goto out;
  323. }
  324. if (count > fw_priv->size - offset)
  325. count = fw_priv->size - offset;
  326. ret_count = count;
  327. if (fw_priv->data)
  328. firmware_rw_data(fw_priv, buffer, offset, count, true);
  329. else
  330. firmware_rw(fw_priv, buffer, offset, count, true);
  331. out:
  332. mutex_unlock(&fw_lock);
  333. return ret_count;
  334. }
  335. static int fw_realloc_pages(struct fw_sysfs *fw_sysfs, int min_size)
  336. {
  337. struct fw_priv *fw_priv= fw_sysfs->fw_priv;
  338. int pages_needed = PAGE_ALIGN(min_size) >> PAGE_SHIFT;
  339. /* If the array of pages is too small, grow it... */
  340. if (fw_priv->page_array_size < pages_needed) {
  341. int new_array_size = max(pages_needed,
  342. fw_priv->page_array_size * 2);
  343. struct page **new_pages;
  344. new_pages = vmalloc(new_array_size * sizeof(void *));
  345. if (!new_pages) {
  346. fw_load_abort(fw_sysfs);
  347. return -ENOMEM;
  348. }
  349. memcpy(new_pages, fw_priv->pages,
  350. fw_priv->page_array_size * sizeof(void *));
  351. memset(&new_pages[fw_priv->page_array_size], 0, sizeof(void *) *
  352. (new_array_size - fw_priv->page_array_size));
  353. vfree(fw_priv->pages);
  354. fw_priv->pages = new_pages;
  355. fw_priv->page_array_size = new_array_size;
  356. }
  357. while (fw_priv->nr_pages < pages_needed) {
  358. fw_priv->pages[fw_priv->nr_pages] =
  359. alloc_page(GFP_KERNEL | __GFP_HIGHMEM);
  360. if (!fw_priv->pages[fw_priv->nr_pages]) {
  361. fw_load_abort(fw_sysfs);
  362. return -ENOMEM;
  363. }
  364. fw_priv->nr_pages++;
  365. }
  366. return 0;
  367. }
  368. /**
  369. * firmware_data_write - write method for firmware
  370. * @filp: open sysfs file
  371. * @kobj: kobject for the device
  372. * @bin_attr: bin_attr structure
  373. * @buffer: buffer being written
  374. * @offset: buffer offset for write in total data store area
  375. * @count: buffer size
  376. *
  377. * Data written to the 'data' attribute will be later handed to
  378. * the driver as a firmware image.
  379. **/
  380. static ssize_t firmware_data_write(struct file *filp, struct kobject *kobj,
  381. struct bin_attribute *bin_attr,
  382. char *buffer, loff_t offset, size_t count)
  383. {
  384. struct device *dev = kobj_to_dev(kobj);
  385. struct fw_sysfs *fw_sysfs = to_fw_sysfs(dev);
  386. struct fw_priv *fw_priv;
  387. ssize_t retval;
  388. if (!capable(CAP_SYS_RAWIO))
  389. return -EPERM;
  390. mutex_lock(&fw_lock);
  391. fw_priv = fw_sysfs->fw_priv;
  392. if (!fw_priv || fw_sysfs_done(fw_priv)) {
  393. retval = -ENODEV;
  394. goto out;
  395. }
  396. if (fw_priv->data) {
  397. if (offset + count > fw_priv->allocated_size) {
  398. retval = -ENOMEM;
  399. goto out;
  400. }
  401. firmware_rw_data(fw_priv, buffer, offset, count, false);
  402. retval = count;
  403. } else {
  404. retval = fw_realloc_pages(fw_sysfs, offset + count);
  405. if (retval)
  406. goto out;
  407. retval = count;
  408. firmware_rw(fw_priv, buffer, offset, count, false);
  409. }
  410. fw_priv->size = max_t(size_t, offset + count, fw_priv->size);
  411. out:
  412. mutex_unlock(&fw_lock);
  413. return retval;
  414. }
  415. static struct bin_attribute firmware_attr_data = {
  416. .attr = { .name = "data", .mode = 0644 },
  417. .size = 0,
  418. .read = firmware_data_read,
  419. .write = firmware_data_write,
  420. };
  421. static struct attribute *fw_dev_attrs[] = {
  422. &dev_attr_loading.attr,
  423. NULL
  424. };
  425. static struct bin_attribute *fw_dev_bin_attrs[] = {
  426. &firmware_attr_data,
  427. NULL
  428. };
  429. static const struct attribute_group fw_dev_attr_group = {
  430. .attrs = fw_dev_attrs,
  431. .bin_attrs = fw_dev_bin_attrs,
  432. };
  433. static const struct attribute_group *fw_dev_attr_groups[] = {
  434. &fw_dev_attr_group,
  435. NULL
  436. };
  437. static struct fw_sysfs *
  438. fw_create_instance(struct firmware *firmware, const char *fw_name,
  439. struct device *device, unsigned int opt_flags)
  440. {
  441. struct fw_sysfs *fw_sysfs;
  442. struct device *f_dev;
  443. fw_sysfs = kzalloc(sizeof(*fw_sysfs), GFP_KERNEL);
  444. if (!fw_sysfs) {
  445. fw_sysfs = ERR_PTR(-ENOMEM);
  446. goto exit;
  447. }
  448. fw_sysfs->nowait = !!(opt_flags & FW_OPT_NOWAIT);
  449. fw_sysfs->fw = firmware;
  450. f_dev = &fw_sysfs->dev;
  451. device_initialize(f_dev);
  452. dev_set_name(f_dev, "%s", fw_name);
  453. f_dev->parent = device;
  454. f_dev->class = &firmware_class;
  455. f_dev->groups = fw_dev_attr_groups;
  456. exit:
  457. return fw_sysfs;
  458. }
  459. /* load a firmware via user helper */
  460. static int _request_firmware_load(struct fw_sysfs *fw_sysfs,
  461. unsigned int opt_flags, long timeout)
  462. {
  463. int retval = 0;
  464. struct device *f_dev = &fw_sysfs->dev;
  465. struct fw_priv *fw_priv = fw_sysfs->fw_priv;
  466. /* fall back on userspace loading */
  467. if (!fw_priv->data)
  468. fw_priv->is_paged_buf = true;
  469. dev_set_uevent_suppress(f_dev, true);
  470. retval = device_add(f_dev);
  471. if (retval) {
  472. dev_err(f_dev, "%s: device_register failed\n", __func__);
  473. goto err_put_dev;
  474. }
  475. mutex_lock(&fw_lock);
  476. list_add(&fw_priv->pending_list, &pending_fw_head);
  477. mutex_unlock(&fw_lock);
  478. if (opt_flags & FW_OPT_UEVENT) {
  479. fw_priv->need_uevent = true;
  480. dev_set_uevent_suppress(f_dev, false);
  481. dev_dbg(f_dev, "firmware: requesting %s\n", fw_priv->fw_name);
  482. kobject_uevent(&fw_sysfs->dev.kobj, KOBJ_ADD);
  483. } else {
  484. timeout = MAX_JIFFY_OFFSET;
  485. }
  486. retval = fw_sysfs_wait_timeout(fw_priv, timeout);
  487. if (retval < 0) {
  488. mutex_lock(&fw_lock);
  489. fw_load_abort(fw_sysfs);
  490. mutex_unlock(&fw_lock);
  491. }
  492. if (fw_state_is_aborted(fw_priv)) {
  493. if (retval == -ERESTARTSYS)
  494. retval = -EINTR;
  495. else
  496. retval = -EAGAIN;
  497. } else if (fw_priv->is_paged_buf && !fw_priv->data)
  498. retval = -ENOMEM;
  499. device_del(f_dev);
  500. err_put_dev:
  501. put_device(f_dev);
  502. return retval;
  503. }
  504. static int fw_load_from_user_helper(struct firmware *firmware,
  505. const char *name, struct device *device,
  506. unsigned int opt_flags)
  507. {
  508. struct fw_sysfs *fw_sysfs;
  509. long timeout;
  510. int ret;
  511. timeout = firmware_loading_timeout();
  512. if (opt_flags & FW_OPT_NOWAIT) {
  513. timeout = usermodehelper_read_lock_wait(timeout);
  514. if (!timeout) {
  515. dev_dbg(device, "firmware: %s loading timed out\n",
  516. name);
  517. return -EBUSY;
  518. }
  519. } else {
  520. ret = usermodehelper_read_trylock();
  521. if (WARN_ON(ret)) {
  522. dev_err(device, "firmware: %s will not be loaded\n",
  523. name);
  524. return ret;
  525. }
  526. }
  527. fw_sysfs = fw_create_instance(firmware, name, device, opt_flags);
  528. if (IS_ERR(fw_sysfs)) {
  529. ret = PTR_ERR(fw_sysfs);
  530. goto out_unlock;
  531. }
  532. fw_sysfs->fw_priv = firmware->priv;
  533. ret = _request_firmware_load(fw_sysfs, opt_flags, timeout);
  534. if (!ret)
  535. ret = assign_fw(firmware, device, opt_flags);
  536. out_unlock:
  537. usermodehelper_read_unlock();
  538. return ret;
  539. }
  540. static bool fw_force_sysfs_fallback(unsigned int opt_flags)
  541. {
  542. if (fw_fallback_config.force_sysfs_fallback)
  543. return true;
  544. if (!(opt_flags & FW_OPT_USERHELPER))
  545. return false;
  546. return true;
  547. }
  548. static bool fw_run_sysfs_fallback(unsigned int opt_flags)
  549. {
  550. if (fw_fallback_config.ignore_sysfs_fallback) {
  551. pr_info_once("Ignoring firmware sysfs fallback due to debugfs knob\n");
  552. return false;
  553. }
  554. if ((opt_flags & FW_OPT_NOFALLBACK))
  555. return false;
  556. return fw_force_sysfs_fallback(opt_flags);
  557. }
  558. int fw_sysfs_fallback(struct firmware *fw, const char *name,
  559. struct device *device,
  560. unsigned int opt_flags,
  561. int ret)
  562. {
  563. if (!fw_run_sysfs_fallback(opt_flags))
  564. return ret;
  565. dev_warn(device, "Falling back to user helper\n");
  566. return fw_load_from_user_helper(fw, name, device, opt_flags);
  567. }