firmware_class.c 31 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347
  1. /*
  2. * firmware_class.c - Multi purpose firmware loading support
  3. *
  4. * Copyright (c) 2003 Manuel Estrada Sainz
  5. *
  6. * Please see Documentation/firmware_class/ for more information.
  7. *
  8. */
  9. #include <linux/capability.h>
  10. #include <linux/device.h>
  11. #include <linux/module.h>
  12. #include <linux/init.h>
  13. #include <linux/timer.h>
  14. #include <linux/vmalloc.h>
  15. #include <linux/interrupt.h>
  16. #include <linux/bitops.h>
  17. #include <linux/mutex.h>
  18. #include <linux/workqueue.h>
  19. #include <linux/highmem.h>
  20. #include <linux/firmware.h>
  21. #include <linux/slab.h>
  22. #include <linux/sched.h>
  23. #include <linux/list.h>
  24. #include <linux/async.h>
  25. #include <linux/pm.h>
  26. #include <linux/suspend.h>
  27. #include <linux/syscore_ops.h>
  28. #include "base.h"
  29. MODULE_AUTHOR("Manuel Estrada Sainz");
  30. MODULE_DESCRIPTION("Multi purpose firmware loading support");
  31. MODULE_LICENSE("GPL");
  32. /* Builtin firmware support */
  33. #ifdef CONFIG_FW_LOADER
  34. extern struct builtin_fw __start_builtin_fw[];
  35. extern struct builtin_fw __end_builtin_fw[];
  36. static bool fw_get_builtin_firmware(struct firmware *fw, const char *name)
  37. {
  38. struct builtin_fw *b_fw;
  39. for (b_fw = __start_builtin_fw; b_fw != __end_builtin_fw; b_fw++) {
  40. if (strcmp(name, b_fw->name) == 0) {
  41. fw->size = b_fw->size;
  42. fw->data = b_fw->data;
  43. return true;
  44. }
  45. }
  46. return false;
  47. }
  48. static bool fw_is_builtin_firmware(const struct firmware *fw)
  49. {
  50. struct builtin_fw *b_fw;
  51. for (b_fw = __start_builtin_fw; b_fw != __end_builtin_fw; b_fw++)
  52. if (fw->data == b_fw->data)
  53. return true;
  54. return false;
  55. }
  56. #else /* Module case - no builtin firmware support */
  57. static inline bool fw_get_builtin_firmware(struct firmware *fw, const char *name)
  58. {
  59. return false;
  60. }
  61. static inline bool fw_is_builtin_firmware(const struct firmware *fw)
  62. {
  63. return false;
  64. }
  65. #endif
  66. enum {
  67. FW_STATUS_LOADING,
  68. FW_STATUS_DONE,
  69. FW_STATUS_ABORT,
  70. };
  71. static int loading_timeout = 60; /* In seconds */
  72. static inline long firmware_loading_timeout(void)
  73. {
  74. return loading_timeout > 0 ? loading_timeout * HZ : MAX_SCHEDULE_TIMEOUT;
  75. }
  76. struct firmware_cache {
  77. /* firmware_buf instance will be added into the below list */
  78. spinlock_t lock;
  79. struct list_head head;
  80. /*
  81. * Names of firmware images which have been cached successfully
  82. * will be added into the below list so that device uncache
  83. * helper can trace which firmware images have been cached
  84. * before.
  85. */
  86. spinlock_t name_lock;
  87. struct list_head fw_names;
  88. int state;
  89. wait_queue_head_t wait_queue;
  90. int cnt;
  91. struct delayed_work work;
  92. struct notifier_block pm_notify;
  93. };
  94. struct firmware_buf {
  95. struct kref ref;
  96. struct list_head list;
  97. struct completion completion;
  98. struct firmware_cache *fwc;
  99. unsigned long status;
  100. void *data;
  101. size_t size;
  102. struct page **pages;
  103. int nr_pages;
  104. int page_array_size;
  105. char fw_id[];
  106. };
  107. struct fw_cache_entry {
  108. struct list_head list;
  109. char name[];
  110. };
  111. struct firmware_priv {
  112. struct timer_list timeout;
  113. bool nowait;
  114. struct device dev;
  115. struct firmware_buf *buf;
  116. struct firmware *fw;
  117. };
  118. struct fw_name_devm {
  119. unsigned long magic;
  120. char name[];
  121. };
  122. #define to_fwbuf(d) container_of(d, struct firmware_buf, ref)
  123. #define FW_LOADER_NO_CACHE 0
  124. #define FW_LOADER_START_CACHE 1
  125. static int fw_cache_piggyback_on_request(const char *name);
  126. /* fw_lock could be moved to 'struct firmware_priv' but since it is just
  127. * guarding for corner cases a global lock should be OK */
  128. static DEFINE_MUTEX(fw_lock);
  129. static struct firmware_cache fw_cache;
  130. static struct firmware_buf *__allocate_fw_buf(const char *fw_name,
  131. struct firmware_cache *fwc)
  132. {
  133. struct firmware_buf *buf;
  134. buf = kzalloc(sizeof(*buf) + strlen(fw_name) + 1 , GFP_ATOMIC);
  135. if (!buf)
  136. return buf;
  137. kref_init(&buf->ref);
  138. strcpy(buf->fw_id, fw_name);
  139. buf->fwc = fwc;
  140. init_completion(&buf->completion);
  141. pr_debug("%s: fw-%s buf=%p\n", __func__, fw_name, buf);
  142. return buf;
  143. }
  144. static struct firmware_buf *__fw_lookup_buf(const char *fw_name)
  145. {
  146. struct firmware_buf *tmp;
  147. struct firmware_cache *fwc = &fw_cache;
  148. list_for_each_entry(tmp, &fwc->head, list)
  149. if (!strcmp(tmp->fw_id, fw_name))
  150. return tmp;
  151. return NULL;
  152. }
  153. static int fw_lookup_and_allocate_buf(const char *fw_name,
  154. struct firmware_cache *fwc,
  155. struct firmware_buf **buf)
  156. {
  157. struct firmware_buf *tmp;
  158. spin_lock(&fwc->lock);
  159. tmp = __fw_lookup_buf(fw_name);
  160. if (tmp) {
  161. kref_get(&tmp->ref);
  162. spin_unlock(&fwc->lock);
  163. *buf = tmp;
  164. return 1;
  165. }
  166. tmp = __allocate_fw_buf(fw_name, fwc);
  167. if (tmp)
  168. list_add(&tmp->list, &fwc->head);
  169. spin_unlock(&fwc->lock);
  170. *buf = tmp;
  171. return tmp ? 0 : -ENOMEM;
  172. }
  173. static struct firmware_buf *fw_lookup_buf(const char *fw_name)
  174. {
  175. struct firmware_buf *tmp;
  176. struct firmware_cache *fwc = &fw_cache;
  177. spin_lock(&fwc->lock);
  178. tmp = __fw_lookup_buf(fw_name);
  179. spin_unlock(&fwc->lock);
  180. return tmp;
  181. }
  182. static void __fw_free_buf(struct kref *ref)
  183. {
  184. struct firmware_buf *buf = to_fwbuf(ref);
  185. struct firmware_cache *fwc = buf->fwc;
  186. int i;
  187. pr_debug("%s: fw-%s buf=%p data=%p size=%u\n",
  188. __func__, buf->fw_id, buf, buf->data,
  189. (unsigned int)buf->size);
  190. spin_lock(&fwc->lock);
  191. list_del(&buf->list);
  192. spin_unlock(&fwc->lock);
  193. vunmap(buf->data);
  194. for (i = 0; i < buf->nr_pages; i++)
  195. __free_page(buf->pages[i]);
  196. kfree(buf->pages);
  197. kfree(buf);
  198. }
  199. static void fw_free_buf(struct firmware_buf *buf)
  200. {
  201. kref_put(&buf->ref, __fw_free_buf);
  202. }
  203. static struct firmware_priv *to_firmware_priv(struct device *dev)
  204. {
  205. return container_of(dev, struct firmware_priv, dev);
  206. }
  207. static void fw_load_abort(struct firmware_priv *fw_priv)
  208. {
  209. struct firmware_buf *buf = fw_priv->buf;
  210. set_bit(FW_STATUS_ABORT, &buf->status);
  211. complete_all(&buf->completion);
  212. }
  213. static ssize_t firmware_timeout_show(struct class *class,
  214. struct class_attribute *attr,
  215. char *buf)
  216. {
  217. return sprintf(buf, "%d\n", loading_timeout);
  218. }
  219. /**
  220. * firmware_timeout_store - set number of seconds to wait for firmware
  221. * @class: device class pointer
  222. * @attr: device attribute pointer
  223. * @buf: buffer to scan for timeout value
  224. * @count: number of bytes in @buf
  225. *
  226. * Sets the number of seconds to wait for the firmware. Once
  227. * this expires an error will be returned to the driver and no
  228. * firmware will be provided.
  229. *
  230. * Note: zero means 'wait forever'.
  231. **/
  232. static ssize_t firmware_timeout_store(struct class *class,
  233. struct class_attribute *attr,
  234. const char *buf, size_t count)
  235. {
  236. loading_timeout = simple_strtol(buf, NULL, 10);
  237. if (loading_timeout < 0)
  238. loading_timeout = 0;
  239. return count;
  240. }
  241. static struct class_attribute firmware_class_attrs[] = {
  242. __ATTR(timeout, S_IWUSR | S_IRUGO,
  243. firmware_timeout_show, firmware_timeout_store),
  244. __ATTR_NULL
  245. };
  246. static void fw_dev_release(struct device *dev)
  247. {
  248. struct firmware_priv *fw_priv = to_firmware_priv(dev);
  249. kfree(fw_priv);
  250. module_put(THIS_MODULE);
  251. }
  252. static int firmware_uevent(struct device *dev, struct kobj_uevent_env *env)
  253. {
  254. struct firmware_priv *fw_priv = to_firmware_priv(dev);
  255. if (add_uevent_var(env, "FIRMWARE=%s", fw_priv->buf->fw_id))
  256. return -ENOMEM;
  257. if (add_uevent_var(env, "TIMEOUT=%i", loading_timeout))
  258. return -ENOMEM;
  259. if (add_uevent_var(env, "ASYNC=%d", fw_priv->nowait))
  260. return -ENOMEM;
  261. return 0;
  262. }
  263. static struct class firmware_class = {
  264. .name = "firmware",
  265. .class_attrs = firmware_class_attrs,
  266. .dev_uevent = firmware_uevent,
  267. .dev_release = fw_dev_release,
  268. };
  269. static ssize_t firmware_loading_show(struct device *dev,
  270. struct device_attribute *attr, char *buf)
  271. {
  272. struct firmware_priv *fw_priv = to_firmware_priv(dev);
  273. int loading = test_bit(FW_STATUS_LOADING, &fw_priv->buf->status);
  274. return sprintf(buf, "%d\n", loading);
  275. }
  276. /* firmware holds the ownership of pages */
  277. static void firmware_free_data(const struct firmware *fw)
  278. {
  279. WARN_ON(!fw->priv);
  280. fw_free_buf(fw->priv);
  281. }
  282. /* Some architectures don't have PAGE_KERNEL_RO */
  283. #ifndef PAGE_KERNEL_RO
  284. #define PAGE_KERNEL_RO PAGE_KERNEL
  285. #endif
  286. /**
  287. * firmware_loading_store - set value in the 'loading' control file
  288. * @dev: device pointer
  289. * @attr: device attribute pointer
  290. * @buf: buffer to scan for loading control value
  291. * @count: number of bytes in @buf
  292. *
  293. * The relevant values are:
  294. *
  295. * 1: Start a load, discarding any previous partial load.
  296. * 0: Conclude the load and hand the data to the driver code.
  297. * -1: Conclude the load with an error and discard any written data.
  298. **/
  299. static ssize_t firmware_loading_store(struct device *dev,
  300. struct device_attribute *attr,
  301. const char *buf, size_t count)
  302. {
  303. struct firmware_priv *fw_priv = to_firmware_priv(dev);
  304. struct firmware_buf *fw_buf = fw_priv->buf;
  305. int loading = simple_strtol(buf, NULL, 10);
  306. int i;
  307. mutex_lock(&fw_lock);
  308. if (!fw_buf)
  309. goto out;
  310. switch (loading) {
  311. case 1:
  312. /* discarding any previous partial load */
  313. if (!test_bit(FW_STATUS_DONE, &fw_buf->status)) {
  314. for (i = 0; i < fw_buf->nr_pages; i++)
  315. __free_page(fw_buf->pages[i]);
  316. kfree(fw_buf->pages);
  317. fw_buf->pages = NULL;
  318. fw_buf->page_array_size = 0;
  319. fw_buf->nr_pages = 0;
  320. set_bit(FW_STATUS_LOADING, &fw_buf->status);
  321. }
  322. break;
  323. case 0:
  324. if (test_bit(FW_STATUS_LOADING, &fw_buf->status)) {
  325. set_bit(FW_STATUS_DONE, &fw_buf->status);
  326. clear_bit(FW_STATUS_LOADING, &fw_buf->status);
  327. complete_all(&fw_buf->completion);
  328. break;
  329. }
  330. /* fallthrough */
  331. default:
  332. dev_err(dev, "%s: unexpected value (%d)\n", __func__, loading);
  333. /* fallthrough */
  334. case -1:
  335. fw_load_abort(fw_priv);
  336. break;
  337. }
  338. out:
  339. mutex_unlock(&fw_lock);
  340. return count;
  341. }
  342. static DEVICE_ATTR(loading, 0644, firmware_loading_show, firmware_loading_store);
  343. static ssize_t firmware_data_read(struct file *filp, struct kobject *kobj,
  344. struct bin_attribute *bin_attr,
  345. char *buffer, loff_t offset, size_t count)
  346. {
  347. struct device *dev = kobj_to_dev(kobj);
  348. struct firmware_priv *fw_priv = to_firmware_priv(dev);
  349. struct firmware_buf *buf;
  350. ssize_t ret_count;
  351. mutex_lock(&fw_lock);
  352. buf = fw_priv->buf;
  353. if (!buf || test_bit(FW_STATUS_DONE, &buf->status)) {
  354. ret_count = -ENODEV;
  355. goto out;
  356. }
  357. if (offset > buf->size) {
  358. ret_count = 0;
  359. goto out;
  360. }
  361. if (count > buf->size - offset)
  362. count = buf->size - offset;
  363. ret_count = count;
  364. while (count) {
  365. void *page_data;
  366. int page_nr = offset >> PAGE_SHIFT;
  367. int page_ofs = offset & (PAGE_SIZE-1);
  368. int page_cnt = min_t(size_t, PAGE_SIZE - page_ofs, count);
  369. page_data = kmap(buf->pages[page_nr]);
  370. memcpy(buffer, page_data + page_ofs, page_cnt);
  371. kunmap(buf->pages[page_nr]);
  372. buffer += page_cnt;
  373. offset += page_cnt;
  374. count -= page_cnt;
  375. }
  376. out:
  377. mutex_unlock(&fw_lock);
  378. return ret_count;
  379. }
  380. static int fw_realloc_buffer(struct firmware_priv *fw_priv, int min_size)
  381. {
  382. struct firmware_buf *buf = fw_priv->buf;
  383. int pages_needed = ALIGN(min_size, PAGE_SIZE) >> PAGE_SHIFT;
  384. /* If the array of pages is too small, grow it... */
  385. if (buf->page_array_size < pages_needed) {
  386. int new_array_size = max(pages_needed,
  387. buf->page_array_size * 2);
  388. struct page **new_pages;
  389. new_pages = kmalloc(new_array_size * sizeof(void *),
  390. GFP_KERNEL);
  391. if (!new_pages) {
  392. fw_load_abort(fw_priv);
  393. return -ENOMEM;
  394. }
  395. memcpy(new_pages, buf->pages,
  396. buf->page_array_size * sizeof(void *));
  397. memset(&new_pages[buf->page_array_size], 0, sizeof(void *) *
  398. (new_array_size - buf->page_array_size));
  399. kfree(buf->pages);
  400. buf->pages = new_pages;
  401. buf->page_array_size = new_array_size;
  402. }
  403. while (buf->nr_pages < pages_needed) {
  404. buf->pages[buf->nr_pages] =
  405. alloc_page(GFP_KERNEL | __GFP_HIGHMEM);
  406. if (!buf->pages[buf->nr_pages]) {
  407. fw_load_abort(fw_priv);
  408. return -ENOMEM;
  409. }
  410. buf->nr_pages++;
  411. }
  412. return 0;
  413. }
  414. /**
  415. * firmware_data_write - write method for firmware
  416. * @filp: open sysfs file
  417. * @kobj: kobject for the device
  418. * @bin_attr: bin_attr structure
  419. * @buffer: buffer being written
  420. * @offset: buffer offset for write in total data store area
  421. * @count: buffer size
  422. *
  423. * Data written to the 'data' attribute will be later handed to
  424. * the driver as a firmware image.
  425. **/
  426. static ssize_t firmware_data_write(struct file *filp, struct kobject *kobj,
  427. struct bin_attribute *bin_attr,
  428. char *buffer, loff_t offset, size_t count)
  429. {
  430. struct device *dev = kobj_to_dev(kobj);
  431. struct firmware_priv *fw_priv = to_firmware_priv(dev);
  432. struct firmware_buf *buf;
  433. ssize_t retval;
  434. if (!capable(CAP_SYS_RAWIO))
  435. return -EPERM;
  436. mutex_lock(&fw_lock);
  437. buf = fw_priv->buf;
  438. if (!buf || test_bit(FW_STATUS_DONE, &buf->status)) {
  439. retval = -ENODEV;
  440. goto out;
  441. }
  442. retval = fw_realloc_buffer(fw_priv, offset + count);
  443. if (retval)
  444. goto out;
  445. retval = count;
  446. while (count) {
  447. void *page_data;
  448. int page_nr = offset >> PAGE_SHIFT;
  449. int page_ofs = offset & (PAGE_SIZE - 1);
  450. int page_cnt = min_t(size_t, PAGE_SIZE - page_ofs, count);
  451. page_data = kmap(buf->pages[page_nr]);
  452. memcpy(page_data + page_ofs, buffer, page_cnt);
  453. kunmap(buf->pages[page_nr]);
  454. buffer += page_cnt;
  455. offset += page_cnt;
  456. count -= page_cnt;
  457. }
  458. buf->size = max_t(size_t, offset, buf->size);
  459. out:
  460. mutex_unlock(&fw_lock);
  461. return retval;
  462. }
  463. static struct bin_attribute firmware_attr_data = {
  464. .attr = { .name = "data", .mode = 0644 },
  465. .size = 0,
  466. .read = firmware_data_read,
  467. .write = firmware_data_write,
  468. };
  469. static void firmware_class_timeout(u_long data)
  470. {
  471. struct firmware_priv *fw_priv = (struct firmware_priv *) data;
  472. fw_load_abort(fw_priv);
  473. }
  474. static struct firmware_priv *
  475. fw_create_instance(struct firmware *firmware, const char *fw_name,
  476. struct device *device, bool uevent, bool nowait)
  477. {
  478. struct firmware_priv *fw_priv;
  479. struct device *f_dev;
  480. fw_priv = kzalloc(sizeof(*fw_priv), GFP_KERNEL);
  481. if (!fw_priv) {
  482. dev_err(device, "%s: kmalloc failed\n", __func__);
  483. fw_priv = ERR_PTR(-ENOMEM);
  484. goto exit;
  485. }
  486. fw_priv->nowait = nowait;
  487. fw_priv->fw = firmware;
  488. setup_timer(&fw_priv->timeout,
  489. firmware_class_timeout, (u_long) fw_priv);
  490. f_dev = &fw_priv->dev;
  491. device_initialize(f_dev);
  492. dev_set_name(f_dev, "%s", fw_name);
  493. f_dev->parent = device;
  494. f_dev->class = &firmware_class;
  495. exit:
  496. return fw_priv;
  497. }
  498. /* one pages buffer is mapped/unmapped only once */
  499. static int fw_map_pages_buf(struct firmware_buf *buf)
  500. {
  501. buf->data = vmap(buf->pages, buf->nr_pages, 0, PAGE_KERNEL_RO);
  502. if (!buf->data)
  503. return -ENOMEM;
  504. return 0;
  505. }
  506. /* store the pages buffer info firmware from buf */
  507. static void fw_set_page_data(struct firmware_buf *buf, struct firmware *fw)
  508. {
  509. fw->priv = buf;
  510. fw->pages = buf->pages;
  511. fw->size = buf->size;
  512. fw->data = buf->data;
  513. pr_debug("%s: fw-%s buf=%p data=%p size=%u\n",
  514. __func__, buf->fw_id, buf, buf->data,
  515. (unsigned int)buf->size);
  516. }
  517. static void fw_name_devm_release(struct device *dev, void *res)
  518. {
  519. struct fw_name_devm *fwn = res;
  520. if (fwn->magic == (unsigned long)&fw_cache)
  521. pr_debug("%s: fw_name-%s devm-%p released\n",
  522. __func__, fwn->name, res);
  523. }
  524. static int fw_devm_match(struct device *dev, void *res,
  525. void *match_data)
  526. {
  527. struct fw_name_devm *fwn = res;
  528. return (fwn->magic == (unsigned long)&fw_cache) &&
  529. !strcmp(fwn->name, match_data);
  530. }
  531. static struct fw_name_devm *fw_find_devm_name(struct device *dev,
  532. const char *name)
  533. {
  534. struct fw_name_devm *fwn;
  535. fwn = devres_find(dev, fw_name_devm_release,
  536. fw_devm_match, (void *)name);
  537. return fwn;
  538. }
  539. /* add firmware name into devres list */
  540. static int fw_add_devm_name(struct device *dev, const char *name)
  541. {
  542. struct fw_name_devm *fwn;
  543. fwn = fw_find_devm_name(dev, name);
  544. if (fwn)
  545. return 1;
  546. fwn = devres_alloc(fw_name_devm_release, sizeof(struct fw_name_devm) +
  547. strlen(name) + 1, GFP_KERNEL);
  548. if (!fwn)
  549. return -ENOMEM;
  550. fwn->magic = (unsigned long)&fw_cache;
  551. strcpy(fwn->name, name);
  552. devres_add(dev, fwn);
  553. return 0;
  554. }
  555. static void _request_firmware_cleanup(const struct firmware **firmware_p)
  556. {
  557. release_firmware(*firmware_p);
  558. *firmware_p = NULL;
  559. }
  560. static struct firmware_priv *
  561. _request_firmware_prepare(const struct firmware **firmware_p, const char *name,
  562. struct device *device, bool uevent, bool nowait)
  563. {
  564. struct firmware *firmware;
  565. struct firmware_priv *fw_priv = NULL;
  566. struct firmware_buf *buf;
  567. int ret;
  568. if (!firmware_p)
  569. return ERR_PTR(-EINVAL);
  570. *firmware_p = firmware = kzalloc(sizeof(*firmware), GFP_KERNEL);
  571. if (!firmware) {
  572. dev_err(device, "%s: kmalloc(struct firmware) failed\n",
  573. __func__);
  574. return ERR_PTR(-ENOMEM);
  575. }
  576. if (fw_get_builtin_firmware(firmware, name)) {
  577. dev_dbg(device, "firmware: using built-in firmware %s\n", name);
  578. return NULL;
  579. }
  580. ret = fw_lookup_and_allocate_buf(name, &fw_cache, &buf);
  581. if (!ret)
  582. fw_priv = fw_create_instance(firmware, name, device,
  583. uevent, nowait);
  584. if (IS_ERR(fw_priv) || ret < 0) {
  585. kfree(firmware);
  586. *firmware_p = NULL;
  587. return ERR_PTR(-ENOMEM);
  588. } else if (fw_priv) {
  589. fw_priv->buf = buf;
  590. /*
  591. * bind with 'buf' now to avoid warning in failure path
  592. * of requesting firmware.
  593. */
  594. firmware->priv = buf;
  595. return fw_priv;
  596. }
  597. /* share the cached buf, which is inprogessing or completed */
  598. check_status:
  599. mutex_lock(&fw_lock);
  600. if (test_bit(FW_STATUS_ABORT, &buf->status)) {
  601. fw_priv = ERR_PTR(-ENOENT);
  602. firmware->priv = buf;
  603. _request_firmware_cleanup(firmware_p);
  604. goto exit;
  605. } else if (test_bit(FW_STATUS_DONE, &buf->status)) {
  606. fw_priv = NULL;
  607. fw_set_page_data(buf, firmware);
  608. goto exit;
  609. }
  610. mutex_unlock(&fw_lock);
  611. wait_for_completion(&buf->completion);
  612. goto check_status;
  613. exit:
  614. mutex_unlock(&fw_lock);
  615. return fw_priv;
  616. }
  617. static int _request_firmware_load(struct firmware_priv *fw_priv, bool uevent,
  618. long timeout)
  619. {
  620. int retval = 0;
  621. struct device *f_dev = &fw_priv->dev;
  622. struct firmware_buf *buf = fw_priv->buf;
  623. struct firmware_cache *fwc = &fw_cache;
  624. dev_set_uevent_suppress(f_dev, true);
  625. /* Need to pin this module until class device is destroyed */
  626. __module_get(THIS_MODULE);
  627. retval = device_add(f_dev);
  628. if (retval) {
  629. dev_err(f_dev, "%s: device_register failed\n", __func__);
  630. goto err_put_dev;
  631. }
  632. retval = device_create_bin_file(f_dev, &firmware_attr_data);
  633. if (retval) {
  634. dev_err(f_dev, "%s: sysfs_create_bin_file failed\n", __func__);
  635. goto err_del_dev;
  636. }
  637. retval = device_create_file(f_dev, &dev_attr_loading);
  638. if (retval) {
  639. dev_err(f_dev, "%s: device_create_file failed\n", __func__);
  640. goto err_del_bin_attr;
  641. }
  642. if (uevent) {
  643. dev_set_uevent_suppress(f_dev, false);
  644. dev_dbg(f_dev, "firmware: requesting %s\n", buf->fw_id);
  645. if (timeout != MAX_SCHEDULE_TIMEOUT)
  646. mod_timer(&fw_priv->timeout,
  647. round_jiffies_up(jiffies + timeout));
  648. kobject_uevent(&fw_priv->dev.kobj, KOBJ_ADD);
  649. }
  650. wait_for_completion(&buf->completion);
  651. del_timer_sync(&fw_priv->timeout);
  652. mutex_lock(&fw_lock);
  653. if (!buf->size || test_bit(FW_STATUS_ABORT, &buf->status))
  654. retval = -ENOENT;
  655. /*
  656. * add firmware name into devres list so that we can auto cache
  657. * and uncache firmware for device.
  658. *
  659. * f_dev->parent may has been deleted already, but the problem
  660. * should be fixed in devres or driver core.
  661. */
  662. if (!retval && f_dev->parent)
  663. fw_add_devm_name(f_dev->parent, buf->fw_id);
  664. if (!retval)
  665. retval = fw_map_pages_buf(buf);
  666. /*
  667. * After caching firmware image is started, let it piggyback
  668. * on request firmware.
  669. */
  670. if (!retval && fwc->state == FW_LOADER_START_CACHE) {
  671. if (fw_cache_piggyback_on_request(buf->fw_id))
  672. kref_get(&buf->ref);
  673. }
  674. /* pass the pages buffer to driver at the last minute */
  675. fw_set_page_data(buf, fw_priv->fw);
  676. fw_priv->buf = NULL;
  677. mutex_unlock(&fw_lock);
  678. device_remove_file(f_dev, &dev_attr_loading);
  679. err_del_bin_attr:
  680. device_remove_bin_file(f_dev, &firmware_attr_data);
  681. err_del_dev:
  682. device_del(f_dev);
  683. err_put_dev:
  684. put_device(f_dev);
  685. return retval;
  686. }
  687. /**
  688. * request_firmware: - send firmware request and wait for it
  689. * @firmware_p: pointer to firmware image
  690. * @name: name of firmware file
  691. * @device: device for which firmware is being loaded
  692. *
  693. * @firmware_p will be used to return a firmware image by the name
  694. * of @name for device @device.
  695. *
  696. * Should be called from user context where sleeping is allowed.
  697. *
  698. * @name will be used as $FIRMWARE in the uevent environment and
  699. * should be distinctive enough not to be confused with any other
  700. * firmware image for this or any other device.
  701. *
  702. * Caller must hold the reference count of @device.
  703. **/
  704. int
  705. request_firmware(const struct firmware **firmware_p, const char *name,
  706. struct device *device)
  707. {
  708. struct firmware_priv *fw_priv;
  709. int ret;
  710. fw_priv = _request_firmware_prepare(firmware_p, name, device, true,
  711. false);
  712. if (IS_ERR_OR_NULL(fw_priv))
  713. return PTR_RET(fw_priv);
  714. ret = usermodehelper_read_trylock();
  715. if (WARN_ON(ret)) {
  716. dev_err(device, "firmware: %s will not be loaded\n", name);
  717. } else {
  718. ret = _request_firmware_load(fw_priv, true,
  719. firmware_loading_timeout());
  720. usermodehelper_read_unlock();
  721. }
  722. if (ret)
  723. _request_firmware_cleanup(firmware_p);
  724. return ret;
  725. }
  726. /**
  727. * release_firmware: - release the resource associated with a firmware image
  728. * @fw: firmware resource to release
  729. **/
  730. void release_firmware(const struct firmware *fw)
  731. {
  732. if (fw) {
  733. if (!fw_is_builtin_firmware(fw))
  734. firmware_free_data(fw);
  735. kfree(fw);
  736. }
  737. }
  738. /* Async support */
  739. struct firmware_work {
  740. struct work_struct work;
  741. struct module *module;
  742. const char *name;
  743. struct device *device;
  744. void *context;
  745. void (*cont)(const struct firmware *fw, void *context);
  746. bool uevent;
  747. };
  748. static void request_firmware_work_func(struct work_struct *work)
  749. {
  750. struct firmware_work *fw_work;
  751. const struct firmware *fw;
  752. struct firmware_priv *fw_priv;
  753. long timeout;
  754. int ret;
  755. fw_work = container_of(work, struct firmware_work, work);
  756. fw_priv = _request_firmware_prepare(&fw, fw_work->name, fw_work->device,
  757. fw_work->uevent, true);
  758. if (IS_ERR_OR_NULL(fw_priv)) {
  759. ret = PTR_RET(fw_priv);
  760. goto out;
  761. }
  762. timeout = usermodehelper_read_lock_wait(firmware_loading_timeout());
  763. if (timeout) {
  764. ret = _request_firmware_load(fw_priv, fw_work->uevent, timeout);
  765. usermodehelper_read_unlock();
  766. } else {
  767. dev_dbg(fw_work->device, "firmware: %s loading timed out\n",
  768. fw_work->name);
  769. ret = -EAGAIN;
  770. }
  771. if (ret)
  772. _request_firmware_cleanup(&fw);
  773. out:
  774. fw_work->cont(fw, fw_work->context);
  775. put_device(fw_work->device);
  776. module_put(fw_work->module);
  777. kfree(fw_work);
  778. }
  779. /**
  780. * request_firmware_nowait - asynchronous version of request_firmware
  781. * @module: module requesting the firmware
  782. * @uevent: sends uevent to copy the firmware image if this flag
  783. * is non-zero else the firmware copy must be done manually.
  784. * @name: name of firmware file
  785. * @device: device for which firmware is being loaded
  786. * @gfp: allocation flags
  787. * @context: will be passed over to @cont, and
  788. * @fw may be %NULL if firmware request fails.
  789. * @cont: function will be called asynchronously when the firmware
  790. * request is over.
  791. *
  792. * Caller must hold the reference count of @device.
  793. *
  794. * Asynchronous variant of request_firmware() for user contexts:
  795. * - sleep for as small periods as possible since it may
  796. * increase kernel boot time of built-in device drivers
  797. * requesting firmware in their ->probe() methods, if
  798. * @gfp is GFP_KERNEL.
  799. *
  800. * - can't sleep at all if @gfp is GFP_ATOMIC.
  801. **/
  802. int
  803. request_firmware_nowait(
  804. struct module *module, bool uevent,
  805. const char *name, struct device *device, gfp_t gfp, void *context,
  806. void (*cont)(const struct firmware *fw, void *context))
  807. {
  808. struct firmware_work *fw_work;
  809. fw_work = kzalloc(sizeof (struct firmware_work), gfp);
  810. if (!fw_work)
  811. return -ENOMEM;
  812. fw_work->module = module;
  813. fw_work->name = name;
  814. fw_work->device = device;
  815. fw_work->context = context;
  816. fw_work->cont = cont;
  817. fw_work->uevent = uevent;
  818. if (!try_module_get(module)) {
  819. kfree(fw_work);
  820. return -EFAULT;
  821. }
  822. get_device(fw_work->device);
  823. INIT_WORK(&fw_work->work, request_firmware_work_func);
  824. schedule_work(&fw_work->work);
  825. return 0;
  826. }
  827. /**
  828. * cache_firmware - cache one firmware image in kernel memory space
  829. * @fw_name: the firmware image name
  830. *
  831. * Cache firmware in kernel memory so that drivers can use it when
  832. * system isn't ready for them to request firmware image from userspace.
  833. * Once it returns successfully, driver can use request_firmware or its
  834. * nowait version to get the cached firmware without any interacting
  835. * with userspace
  836. *
  837. * Return 0 if the firmware image has been cached successfully
  838. * Return !0 otherwise
  839. *
  840. */
  841. int cache_firmware(const char *fw_name)
  842. {
  843. int ret;
  844. const struct firmware *fw;
  845. pr_debug("%s: %s\n", __func__, fw_name);
  846. ret = request_firmware(&fw, fw_name, NULL);
  847. if (!ret)
  848. kfree(fw);
  849. pr_debug("%s: %s ret=%d\n", __func__, fw_name, ret);
  850. return ret;
  851. }
  852. /**
  853. * uncache_firmware - remove one cached firmware image
  854. * @fw_name: the firmware image name
  855. *
  856. * Uncache one firmware image which has been cached successfully
  857. * before.
  858. *
  859. * Return 0 if the firmware cache has been removed successfully
  860. * Return !0 otherwise
  861. *
  862. */
  863. int uncache_firmware(const char *fw_name)
  864. {
  865. struct firmware_buf *buf;
  866. struct firmware fw;
  867. pr_debug("%s: %s\n", __func__, fw_name);
  868. if (fw_get_builtin_firmware(&fw, fw_name))
  869. return 0;
  870. buf = fw_lookup_buf(fw_name);
  871. if (buf) {
  872. fw_free_buf(buf);
  873. return 0;
  874. }
  875. return -EINVAL;
  876. }
  877. static struct fw_cache_entry *alloc_fw_cache_entry(const char *name)
  878. {
  879. struct fw_cache_entry *fce;
  880. fce = kzalloc(sizeof(*fce) + strlen(name) + 1, GFP_ATOMIC);
  881. if (!fce)
  882. goto exit;
  883. strcpy(fce->name, name);
  884. exit:
  885. return fce;
  886. }
  887. static int fw_cache_piggyback_on_request(const char *name)
  888. {
  889. struct firmware_cache *fwc = &fw_cache;
  890. struct fw_cache_entry *fce;
  891. int ret = 0;
  892. spin_lock(&fwc->name_lock);
  893. list_for_each_entry(fce, &fwc->fw_names, list) {
  894. if (!strcmp(fce->name, name))
  895. goto found;
  896. }
  897. fce = alloc_fw_cache_entry(name);
  898. if (fce) {
  899. ret = 1;
  900. list_add(&fce->list, &fwc->fw_names);
  901. pr_debug("%s: fw: %s\n", __func__, name);
  902. }
  903. found:
  904. spin_unlock(&fwc->name_lock);
  905. return ret;
  906. }
  907. static void free_fw_cache_entry(struct fw_cache_entry *fce)
  908. {
  909. kfree(fce);
  910. }
  911. static void __async_dev_cache_fw_image(void *fw_entry,
  912. async_cookie_t cookie)
  913. {
  914. struct fw_cache_entry *fce = fw_entry;
  915. struct firmware_cache *fwc = &fw_cache;
  916. int ret;
  917. ret = cache_firmware(fce->name);
  918. if (ret) {
  919. spin_lock(&fwc->name_lock);
  920. list_del(&fce->list);
  921. spin_unlock(&fwc->name_lock);
  922. free_fw_cache_entry(fce);
  923. }
  924. spin_lock(&fwc->name_lock);
  925. fwc->cnt--;
  926. spin_unlock(&fwc->name_lock);
  927. wake_up(&fwc->wait_queue);
  928. }
  929. /* called with dev->devres_lock held */
  930. static void dev_create_fw_entry(struct device *dev, void *res,
  931. void *data)
  932. {
  933. struct fw_name_devm *fwn = res;
  934. const char *fw_name = fwn->name;
  935. struct list_head *head = data;
  936. struct fw_cache_entry *fce;
  937. fce = alloc_fw_cache_entry(fw_name);
  938. if (fce)
  939. list_add(&fce->list, head);
  940. }
  941. static int devm_name_match(struct device *dev, void *res,
  942. void *match_data)
  943. {
  944. struct fw_name_devm *fwn = res;
  945. return (fwn->magic == (unsigned long)match_data);
  946. }
  947. static void dev_cache_fw_image(struct device *dev, void *data)
  948. {
  949. LIST_HEAD(todo);
  950. struct fw_cache_entry *fce;
  951. struct fw_cache_entry *fce_next;
  952. struct firmware_cache *fwc = &fw_cache;
  953. devres_for_each_res(dev, fw_name_devm_release,
  954. devm_name_match, &fw_cache,
  955. dev_create_fw_entry, &todo);
  956. list_for_each_entry_safe(fce, fce_next, &todo, list) {
  957. list_del(&fce->list);
  958. spin_lock(&fwc->name_lock);
  959. fwc->cnt++;
  960. list_add(&fce->list, &fwc->fw_names);
  961. spin_unlock(&fwc->name_lock);
  962. async_schedule(__async_dev_cache_fw_image, (void *)fce);
  963. }
  964. }
  965. static void __device_uncache_fw_images(void)
  966. {
  967. struct firmware_cache *fwc = &fw_cache;
  968. struct fw_cache_entry *fce;
  969. spin_lock(&fwc->name_lock);
  970. while (!list_empty(&fwc->fw_names)) {
  971. fce = list_entry(fwc->fw_names.next,
  972. struct fw_cache_entry, list);
  973. list_del(&fce->list);
  974. spin_unlock(&fwc->name_lock);
  975. uncache_firmware(fce->name);
  976. free_fw_cache_entry(fce);
  977. spin_lock(&fwc->name_lock);
  978. }
  979. spin_unlock(&fwc->name_lock);
  980. }
  981. /**
  982. * device_cache_fw_images - cache devices' firmware
  983. *
  984. * If one device called request_firmware or its nowait version
  985. * successfully before, the firmware names are recored into the
  986. * device's devres link list, so device_cache_fw_images can call
  987. * cache_firmware() to cache these firmwares for the device,
  988. * then the device driver can load its firmwares easily at
  989. * time when system is not ready to complete loading firmware.
  990. */
  991. static void device_cache_fw_images(void)
  992. {
  993. struct firmware_cache *fwc = &fw_cache;
  994. int old_timeout;
  995. DEFINE_WAIT(wait);
  996. pr_debug("%s\n", __func__);
  997. /*
  998. * use small loading timeout for caching devices' firmware
  999. * because all these firmware images have been loaded
  1000. * successfully at lease once, also system is ready for
  1001. * completing firmware loading now. The maximum size of
  1002. * firmware in current distributions is about 2M bytes,
  1003. * so 10 secs should be enough.
  1004. */
  1005. old_timeout = loading_timeout;
  1006. loading_timeout = 10;
  1007. mutex_lock(&fw_lock);
  1008. fwc->state = FW_LOADER_START_CACHE;
  1009. dpm_for_each_dev(NULL, dev_cache_fw_image);
  1010. mutex_unlock(&fw_lock);
  1011. /* wait for completion of caching firmware for all devices */
  1012. spin_lock(&fwc->name_lock);
  1013. for (;;) {
  1014. prepare_to_wait(&fwc->wait_queue, &wait,
  1015. TASK_UNINTERRUPTIBLE);
  1016. if (!fwc->cnt)
  1017. break;
  1018. spin_unlock(&fwc->name_lock);
  1019. schedule();
  1020. spin_lock(&fwc->name_lock);
  1021. }
  1022. spin_unlock(&fwc->name_lock);
  1023. finish_wait(&fwc->wait_queue, &wait);
  1024. loading_timeout = old_timeout;
  1025. }
  1026. /**
  1027. * device_uncache_fw_images - uncache devices' firmware
  1028. *
  1029. * uncache all firmwares which have been cached successfully
  1030. * by device_uncache_fw_images earlier
  1031. */
  1032. static void device_uncache_fw_images(void)
  1033. {
  1034. pr_debug("%s\n", __func__);
  1035. __device_uncache_fw_images();
  1036. }
  1037. static void device_uncache_fw_images_work(struct work_struct *work)
  1038. {
  1039. device_uncache_fw_images();
  1040. }
  1041. /**
  1042. * device_uncache_fw_images_delay - uncache devices firmwares
  1043. * @delay: number of milliseconds to delay uncache device firmwares
  1044. *
  1045. * uncache all devices's firmwares which has been cached successfully
  1046. * by device_cache_fw_images after @delay milliseconds.
  1047. */
  1048. static void device_uncache_fw_images_delay(unsigned long delay)
  1049. {
  1050. schedule_delayed_work(&fw_cache.work,
  1051. msecs_to_jiffies(delay));
  1052. }
  1053. #ifdef CONFIG_PM
  1054. static int fw_pm_notify(struct notifier_block *notify_block,
  1055. unsigned long mode, void *unused)
  1056. {
  1057. switch (mode) {
  1058. case PM_HIBERNATION_PREPARE:
  1059. case PM_SUSPEND_PREPARE:
  1060. device_cache_fw_images();
  1061. break;
  1062. case PM_POST_SUSPEND:
  1063. case PM_POST_HIBERNATION:
  1064. case PM_POST_RESTORE:
  1065. /*
  1066. * In case that system sleep failed and syscore_suspend is
  1067. * not called.
  1068. */
  1069. mutex_lock(&fw_lock);
  1070. fw_cache.state = FW_LOADER_NO_CACHE;
  1071. mutex_unlock(&fw_lock);
  1072. device_uncache_fw_images_delay(10 * MSEC_PER_SEC);
  1073. break;
  1074. }
  1075. return 0;
  1076. }
  1077. #else
  1078. static int fw_pm_notify(struct notifier_block *notify_block,
  1079. unsigned long mode, void *unused)
  1080. {
  1081. return 0;
  1082. }
  1083. #endif
  1084. /* stop caching firmware once syscore_suspend is reached */
  1085. static int fw_suspend(void)
  1086. {
  1087. fw_cache.state = FW_LOADER_NO_CACHE;
  1088. return 0;
  1089. }
  1090. static struct syscore_ops fw_syscore_ops = {
  1091. .suspend = fw_suspend,
  1092. };
  1093. static void __init fw_cache_init(void)
  1094. {
  1095. spin_lock_init(&fw_cache.lock);
  1096. INIT_LIST_HEAD(&fw_cache.head);
  1097. spin_lock_init(&fw_cache.name_lock);
  1098. INIT_LIST_HEAD(&fw_cache.fw_names);
  1099. fw_cache.cnt = 0;
  1100. fw_cache.state = FW_LOADER_NO_CACHE;
  1101. init_waitqueue_head(&fw_cache.wait_queue);
  1102. INIT_DELAYED_WORK(&fw_cache.work,
  1103. device_uncache_fw_images_work);
  1104. fw_cache.pm_notify.notifier_call = fw_pm_notify;
  1105. register_pm_notifier(&fw_cache.pm_notify);
  1106. register_syscore_ops(&fw_syscore_ops);
  1107. }
  1108. static int __init firmware_class_init(void)
  1109. {
  1110. fw_cache_init();
  1111. return class_register(&firmware_class);
  1112. }
  1113. static void __exit firmware_class_exit(void)
  1114. {
  1115. unregister_syscore_ops(&fw_syscore_ops);
  1116. unregister_pm_notifier(&fw_cache.pm_notify);
  1117. class_unregister(&firmware_class);
  1118. }
  1119. fs_initcall(firmware_class_init);
  1120. module_exit(firmware_class_exit);
  1121. EXPORT_SYMBOL(release_firmware);
  1122. EXPORT_SYMBOL(request_firmware);
  1123. EXPORT_SYMBOL(request_firmware_nowait);
  1124. EXPORT_SYMBOL_GPL(cache_firmware);
  1125. EXPORT_SYMBOL_GPL(uncache_firmware);