platform.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909
  1. /*
  2. * Persistent Storage - platform driver interface parts.
  3. *
  4. * Copyright (C) 2007-2008 Google, Inc.
  5. * Copyright (C) 2010 Intel Corporation <tony.luck@intel.com>
  6. *
  7. * This program is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU General Public License version 2 as
  9. * published by the Free Software Foundation.
  10. *
  11. * This program is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  14. * GNU General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU General Public License
  17. * along with this program; if not, write to the Free Software
  18. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  19. */
  20. #define pr_fmt(fmt) "pstore: " fmt
  21. #include <linux/atomic.h>
  22. #include <linux/types.h>
  23. #include <linux/errno.h>
  24. #include <linux/init.h>
  25. #include <linux/kmsg_dump.h>
  26. #include <linux/console.h>
  27. #include <linux/module.h>
  28. #include <linux/pstore.h>
  29. #ifdef CONFIG_PSTORE_ZLIB_COMPRESS
  30. #include <linux/zlib.h>
  31. #endif
  32. #ifdef CONFIG_PSTORE_LZO_COMPRESS
  33. #include <linux/lzo.h>
  34. #endif
  35. #ifdef CONFIG_PSTORE_LZ4_COMPRESS
  36. #include <linux/lz4.h>
  37. #endif
  38. #include <linux/string.h>
  39. #include <linux/timer.h>
  40. #include <linux/slab.h>
  41. #include <linux/uaccess.h>
  42. #include <linux/hardirq.h>
  43. #include <linux/jiffies.h>
  44. #include <linux/workqueue.h>
  45. #include "internal.h"
  46. /*
  47. * We defer making "oops" entries appear in pstore - see
  48. * whether the system is actually still running well enough
  49. * to let someone see the entry
  50. */
  51. static int pstore_update_ms = -1;
  52. module_param_named(update_ms, pstore_update_ms, int, 0600);
  53. MODULE_PARM_DESC(update_ms, "milliseconds before pstore updates its content "
  54. "(default is -1, which means runtime updates are disabled; "
  55. "enabling this option is not safe, it may lead to further "
  56. "corruption on Oopses)");
  57. static int pstore_new_entry;
  58. static void pstore_timefunc(unsigned long);
  59. static DEFINE_TIMER(pstore_timer, pstore_timefunc, 0, 0);
  60. static void pstore_dowork(struct work_struct *);
  61. static DECLARE_WORK(pstore_work, pstore_dowork);
  62. /*
  63. * pstore_lock just protects "psinfo" during
  64. * calls to pstore_register()
  65. */
  66. static DEFINE_SPINLOCK(pstore_lock);
  67. struct pstore_info *psinfo;
  68. static char *backend;
  69. /* Compression parameters */
  70. #ifdef CONFIG_PSTORE_ZLIB_COMPRESS
  71. #define COMPR_LEVEL 6
  72. #define WINDOW_BITS 12
  73. #define MEM_LEVEL 4
  74. static struct z_stream_s stream;
  75. #else
  76. static unsigned char *workspace;
  77. #endif
  78. struct pstore_zbackend {
  79. int (*compress)(const void *in, void *out, size_t inlen, size_t outlen);
  80. int (*decompress)(void *in, void *out, size_t inlen, size_t outlen);
  81. void (*allocate)(void);
  82. void (*free)(void);
  83. const char *name;
  84. };
  85. static char *big_oops_buf;
  86. static size_t big_oops_buf_sz;
  87. /* How much of the console log to snapshot */
  88. unsigned long kmsg_bytes = PSTORE_DEFAULT_KMSG_BYTES;
  89. void pstore_set_kmsg_bytes(int bytes)
  90. {
  91. kmsg_bytes = bytes;
  92. }
  93. /* Tag each group of saved records with a sequence number */
  94. static int oopscount;
  95. static const char *get_reason_str(enum kmsg_dump_reason reason)
  96. {
  97. switch (reason) {
  98. case KMSG_DUMP_PANIC:
  99. return "Panic";
  100. case KMSG_DUMP_OOPS:
  101. return "Oops";
  102. case KMSG_DUMP_EMERG:
  103. return "Emergency";
  104. case KMSG_DUMP_RESTART:
  105. return "Restart";
  106. case KMSG_DUMP_HALT:
  107. return "Halt";
  108. case KMSG_DUMP_POWEROFF:
  109. return "Poweroff";
  110. default:
  111. return "Unknown";
  112. }
  113. }
  114. bool pstore_cannot_block_path(enum kmsg_dump_reason reason)
  115. {
  116. /*
  117. * In case of NMI path, pstore shouldn't be blocked
  118. * regardless of reason.
  119. */
  120. if (in_nmi())
  121. return true;
  122. switch (reason) {
  123. /* In panic case, other cpus are stopped by smp_send_stop(). */
  124. case KMSG_DUMP_PANIC:
  125. /* Emergency restart shouldn't be blocked by spin lock. */
  126. case KMSG_DUMP_EMERG:
  127. return true;
  128. default:
  129. return false;
  130. }
  131. }
  132. EXPORT_SYMBOL_GPL(pstore_cannot_block_path);
  133. #ifdef CONFIG_PSTORE_ZLIB_COMPRESS
  134. /* Derived from logfs_compress() */
  135. static int compress_zlib(const void *in, void *out, size_t inlen, size_t outlen)
  136. {
  137. int err, ret;
  138. ret = -EIO;
  139. err = zlib_deflateInit2(&stream, COMPR_LEVEL, Z_DEFLATED, WINDOW_BITS,
  140. MEM_LEVEL, Z_DEFAULT_STRATEGY);
  141. if (err != Z_OK)
  142. goto error;
  143. stream.next_in = in;
  144. stream.avail_in = inlen;
  145. stream.total_in = 0;
  146. stream.next_out = out;
  147. stream.avail_out = outlen;
  148. stream.total_out = 0;
  149. err = zlib_deflate(&stream, Z_FINISH);
  150. if (err != Z_STREAM_END)
  151. goto error;
  152. err = zlib_deflateEnd(&stream);
  153. if (err != Z_OK)
  154. goto error;
  155. if (stream.total_out >= stream.total_in)
  156. goto error;
  157. ret = stream.total_out;
  158. error:
  159. return ret;
  160. }
  161. /* Derived from logfs_uncompress */
  162. static int decompress_zlib(void *in, void *out, size_t inlen, size_t outlen)
  163. {
  164. int err, ret;
  165. ret = -EIO;
  166. err = zlib_inflateInit2(&stream, WINDOW_BITS);
  167. if (err != Z_OK)
  168. goto error;
  169. stream.next_in = in;
  170. stream.avail_in = inlen;
  171. stream.total_in = 0;
  172. stream.next_out = out;
  173. stream.avail_out = outlen;
  174. stream.total_out = 0;
  175. err = zlib_inflate(&stream, Z_FINISH);
  176. if (err != Z_STREAM_END)
  177. goto error;
  178. err = zlib_inflateEnd(&stream);
  179. if (err != Z_OK)
  180. goto error;
  181. ret = stream.total_out;
  182. error:
  183. return ret;
  184. }
  185. static void allocate_zlib(void)
  186. {
  187. size_t size;
  188. size_t cmpr;
  189. switch (psinfo->bufsize) {
  190. /* buffer range for efivars */
  191. case 1000 ... 2000:
  192. cmpr = 56;
  193. break;
  194. case 2001 ... 3000:
  195. cmpr = 54;
  196. break;
  197. case 3001 ... 3999:
  198. cmpr = 52;
  199. break;
  200. /* buffer range for nvram, erst */
  201. case 4000 ... 10000:
  202. cmpr = 45;
  203. break;
  204. default:
  205. cmpr = 60;
  206. break;
  207. }
  208. big_oops_buf_sz = (psinfo->bufsize * 100) / cmpr;
  209. big_oops_buf = kmalloc(big_oops_buf_sz, GFP_KERNEL);
  210. if (big_oops_buf) {
  211. size = max(zlib_deflate_workspacesize(WINDOW_BITS, MEM_LEVEL),
  212. zlib_inflate_workspacesize());
  213. stream.workspace = kmalloc(size, GFP_KERNEL);
  214. if (!stream.workspace) {
  215. pr_err("No memory for compression workspace; skipping compression\n");
  216. kfree(big_oops_buf);
  217. big_oops_buf = NULL;
  218. }
  219. } else {
  220. pr_err("No memory for uncompressed data; skipping compression\n");
  221. stream.workspace = NULL;
  222. }
  223. }
  224. static void free_zlib(void)
  225. {
  226. kfree(stream.workspace);
  227. stream.workspace = NULL;
  228. kfree(big_oops_buf);
  229. big_oops_buf = NULL;
  230. big_oops_buf_sz = 0;
  231. }
  232. static const struct pstore_zbackend backend_zlib = {
  233. .compress = compress_zlib,
  234. .decompress = decompress_zlib,
  235. .allocate = allocate_zlib,
  236. .free = free_zlib,
  237. .name = "zlib",
  238. };
  239. #endif
  240. #ifdef CONFIG_PSTORE_LZO_COMPRESS
  241. static int compress_lzo(const void *in, void *out, size_t inlen, size_t outlen)
  242. {
  243. int ret;
  244. ret = lzo1x_1_compress(in, inlen, out, &outlen, workspace);
  245. if (ret != LZO_E_OK) {
  246. pr_err("lzo_compress error, ret = %d!\n", ret);
  247. return -EIO;
  248. }
  249. return outlen;
  250. }
  251. static int decompress_lzo(void *in, void *out, size_t inlen, size_t outlen)
  252. {
  253. int ret;
  254. ret = lzo1x_decompress_safe(in, inlen, out, &outlen);
  255. if (ret != LZO_E_OK) {
  256. pr_err("lzo_decompress error, ret = %d!\n", ret);
  257. return -EIO;
  258. }
  259. return outlen;
  260. }
  261. static void allocate_lzo(void)
  262. {
  263. big_oops_buf_sz = lzo1x_worst_compress(psinfo->bufsize);
  264. big_oops_buf = kmalloc(big_oops_buf_sz, GFP_KERNEL);
  265. if (big_oops_buf) {
  266. workspace = kmalloc(LZO1X_MEM_COMPRESS, GFP_KERNEL);
  267. if (!workspace) {
  268. pr_err("No memory for compression workspace; skipping compression\n");
  269. kfree(big_oops_buf);
  270. big_oops_buf = NULL;
  271. }
  272. } else {
  273. pr_err("No memory for uncompressed data; skipping compression\n");
  274. workspace = NULL;
  275. }
  276. }
  277. static void free_lzo(void)
  278. {
  279. kfree(workspace);
  280. kfree(big_oops_buf);
  281. big_oops_buf = NULL;
  282. big_oops_buf_sz = 0;
  283. }
  284. static const struct pstore_zbackend backend_lzo = {
  285. .compress = compress_lzo,
  286. .decompress = decompress_lzo,
  287. .allocate = allocate_lzo,
  288. .free = free_lzo,
  289. .name = "lzo",
  290. };
  291. #endif
  292. #ifdef CONFIG_PSTORE_LZ4_COMPRESS
  293. static int compress_lz4(const void *in, void *out, size_t inlen, size_t outlen)
  294. {
  295. int ret;
  296. ret = LZ4_compress_default(in, out, inlen, outlen, workspace);
  297. if (!ret) {
  298. pr_err("LZ4_compress_default error; compression failed!\n");
  299. return -EIO;
  300. }
  301. return ret;
  302. }
  303. static int decompress_lz4(void *in, void *out, size_t inlen, size_t outlen)
  304. {
  305. int ret;
  306. ret = LZ4_decompress_safe(in, out, inlen, outlen);
  307. if (ret < 0) {
  308. /*
  309. * LZ4_decompress_safe will return an error code
  310. * (< 0) if decompression failed
  311. */
  312. pr_err("LZ4_decompress_safe error, ret = %d!\n", ret);
  313. return -EIO;
  314. }
  315. return ret;
  316. }
  317. static void allocate_lz4(void)
  318. {
  319. big_oops_buf_sz = LZ4_compressBound(psinfo->bufsize);
  320. big_oops_buf = kmalloc(big_oops_buf_sz, GFP_KERNEL);
  321. if (big_oops_buf) {
  322. workspace = kmalloc(LZ4_MEM_COMPRESS, GFP_KERNEL);
  323. if (!workspace) {
  324. pr_err("No memory for compression workspace; skipping compression\n");
  325. kfree(big_oops_buf);
  326. big_oops_buf = NULL;
  327. }
  328. } else {
  329. pr_err("No memory for uncompressed data; skipping compression\n");
  330. workspace = NULL;
  331. }
  332. }
  333. static void free_lz4(void)
  334. {
  335. kfree(workspace);
  336. kfree(big_oops_buf);
  337. big_oops_buf = NULL;
  338. big_oops_buf_sz = 0;
  339. }
  340. static const struct pstore_zbackend backend_lz4 = {
  341. .compress = compress_lz4,
  342. .decompress = decompress_lz4,
  343. .allocate = allocate_lz4,
  344. .free = free_lz4,
  345. .name = "lz4",
  346. };
  347. #endif
  348. static const struct pstore_zbackend *zbackend =
  349. #if defined(CONFIG_PSTORE_ZLIB_COMPRESS)
  350. &backend_zlib;
  351. #elif defined(CONFIG_PSTORE_LZO_COMPRESS)
  352. &backend_lzo;
  353. #elif defined(CONFIG_PSTORE_LZ4_COMPRESS)
  354. &backend_lz4;
  355. #else
  356. NULL;
  357. #endif
  358. static int pstore_compress(const void *in, void *out,
  359. size_t inlen, size_t outlen)
  360. {
  361. if (zbackend)
  362. return zbackend->compress(in, out, inlen, outlen);
  363. else
  364. return -EIO;
  365. }
  366. static int pstore_decompress(void *in, void *out, size_t inlen, size_t outlen)
  367. {
  368. if (zbackend)
  369. return zbackend->decompress(in, out, inlen, outlen);
  370. else
  371. return -EIO;
  372. }
  373. static void allocate_buf_for_compression(void)
  374. {
  375. if (zbackend) {
  376. pr_info("using %s compression\n", zbackend->name);
  377. zbackend->allocate();
  378. } else {
  379. pr_err("allocate compression buffer error!\n");
  380. }
  381. }
  382. static void free_buf_for_compression(void)
  383. {
  384. if (zbackend)
  385. zbackend->free();
  386. else
  387. pr_err("free compression buffer error!\n");
  388. }
  389. /*
  390. * Called when compression fails, since the printk buffer
  391. * would be fetched for compression calling it again when
  392. * compression fails would have moved the iterator of
  393. * printk buffer which results in fetching old contents.
  394. * Copy the recent messages from big_oops_buf to psinfo->buf
  395. */
  396. static size_t copy_kmsg_to_buffer(int hsize, size_t len)
  397. {
  398. size_t total_len;
  399. size_t diff;
  400. total_len = hsize + len;
  401. if (total_len > psinfo->bufsize) {
  402. diff = total_len - psinfo->bufsize + hsize;
  403. memcpy(psinfo->buf, big_oops_buf, hsize);
  404. memcpy(psinfo->buf + hsize, big_oops_buf + diff,
  405. psinfo->bufsize - hsize);
  406. total_len = psinfo->bufsize;
  407. } else
  408. memcpy(psinfo->buf, big_oops_buf, total_len);
  409. return total_len;
  410. }
  411. void pstore_record_init(struct pstore_record *record,
  412. struct pstore_info *psinfo)
  413. {
  414. memset(record, 0, sizeof(*record));
  415. record->psi = psinfo;
  416. /* Report zeroed timestamp if called before timekeeping has resumed. */
  417. if (__getnstimeofday(&record->time)) {
  418. record->time.tv_sec = 0;
  419. record->time.tv_nsec = 0;
  420. }
  421. }
  422. /*
  423. * callback from kmsg_dump. (s2,l2) has the most recently
  424. * written bytes, older bytes are in (s1,l1). Save as much
  425. * as we can from the end of the buffer.
  426. */
  427. static void pstore_dump(struct kmsg_dumper *dumper,
  428. enum kmsg_dump_reason reason)
  429. {
  430. unsigned long total = 0;
  431. const char *why;
  432. unsigned int part = 1;
  433. unsigned long flags = 0;
  434. int is_locked;
  435. int ret;
  436. why = get_reason_str(reason);
  437. if (pstore_cannot_block_path(reason)) {
  438. is_locked = spin_trylock_irqsave(&psinfo->buf_lock, flags);
  439. if (!is_locked) {
  440. pr_err("pstore dump routine blocked in %s path, may corrupt error record\n"
  441. , in_nmi() ? "NMI" : why);
  442. return;
  443. }
  444. } else {
  445. spin_lock_irqsave(&psinfo->buf_lock, flags);
  446. is_locked = 1;
  447. }
  448. oopscount++;
  449. while (total < kmsg_bytes) {
  450. char *dst;
  451. size_t dst_size;
  452. int header_size;
  453. int zipped_len = -1;
  454. size_t dump_size;
  455. struct pstore_record record;
  456. pstore_record_init(&record, psinfo);
  457. record.type = PSTORE_TYPE_DMESG;
  458. record.count = oopscount;
  459. record.reason = reason;
  460. record.part = part;
  461. record.buf = psinfo->buf;
  462. if (big_oops_buf && is_locked) {
  463. dst = big_oops_buf;
  464. dst_size = big_oops_buf_sz;
  465. } else {
  466. dst = psinfo->buf;
  467. dst_size = psinfo->bufsize;
  468. }
  469. /* Write dump header. */
  470. header_size = snprintf(dst, dst_size, "%s#%d Part%u\n", why,
  471. oopscount, part);
  472. dst_size -= header_size;
  473. /* Write dump contents. */
  474. if (!kmsg_dump_get_buffer(dumper, true, dst + header_size,
  475. dst_size, &dump_size))
  476. break;
  477. if (big_oops_buf && is_locked) {
  478. zipped_len = pstore_compress(dst, psinfo->buf,
  479. header_size + dump_size,
  480. psinfo->bufsize);
  481. if (zipped_len > 0) {
  482. record.compressed = true;
  483. record.size = zipped_len;
  484. } else {
  485. record.size = copy_kmsg_to_buffer(header_size,
  486. dump_size);
  487. }
  488. } else {
  489. record.size = header_size + dump_size;
  490. }
  491. ret = psinfo->write(&record);
  492. if (ret == 0 && reason == KMSG_DUMP_OOPS && pstore_is_mounted())
  493. pstore_new_entry = 1;
  494. total += record.size;
  495. part++;
  496. }
  497. if (is_locked)
  498. spin_unlock_irqrestore(&psinfo->buf_lock, flags);
  499. }
  500. static struct kmsg_dumper pstore_dumper = {
  501. .dump = pstore_dump,
  502. };
  503. /*
  504. * Register with kmsg_dump to save last part of console log on panic.
  505. */
  506. static void pstore_register_kmsg(void)
  507. {
  508. kmsg_dump_register(&pstore_dumper);
  509. }
  510. static void pstore_unregister_kmsg(void)
  511. {
  512. kmsg_dump_unregister(&pstore_dumper);
  513. }
  514. #ifdef CONFIG_PSTORE_CONSOLE
  515. static void pstore_console_write(struct console *con, const char *s, unsigned c)
  516. {
  517. const char *e = s + c;
  518. while (s < e) {
  519. struct pstore_record record;
  520. unsigned long flags;
  521. pstore_record_init(&record, psinfo);
  522. record.type = PSTORE_TYPE_CONSOLE;
  523. if (c > psinfo->bufsize)
  524. c = psinfo->bufsize;
  525. if (oops_in_progress) {
  526. if (!spin_trylock_irqsave(&psinfo->buf_lock, flags))
  527. break;
  528. } else {
  529. spin_lock_irqsave(&psinfo->buf_lock, flags);
  530. }
  531. record.buf = (char *)s;
  532. record.size = c;
  533. psinfo->write(&record);
  534. spin_unlock_irqrestore(&psinfo->buf_lock, flags);
  535. s += c;
  536. c = e - s;
  537. }
  538. }
  539. static struct console pstore_console = {
  540. .name = "pstore",
  541. .write = pstore_console_write,
  542. .flags = CON_PRINTBUFFER | CON_ENABLED | CON_ANYTIME,
  543. .index = -1,
  544. };
  545. static void pstore_register_console(void)
  546. {
  547. register_console(&pstore_console);
  548. }
  549. static void pstore_unregister_console(void)
  550. {
  551. unregister_console(&pstore_console);
  552. }
  553. #else
  554. static void pstore_register_console(void) {}
  555. static void pstore_unregister_console(void) {}
  556. #endif
  557. static int pstore_write_user_compat(struct pstore_record *record,
  558. const char __user *buf)
  559. {
  560. int ret = 0;
  561. if (record->buf)
  562. return -EINVAL;
  563. record->buf = memdup_user(buf, record->size);
  564. if (unlikely(IS_ERR(record->buf))) {
  565. ret = PTR_ERR(record->buf);
  566. goto out;
  567. }
  568. ret = record->psi->write(record);
  569. kfree(record->buf);
  570. out:
  571. record->buf = NULL;
  572. return unlikely(ret < 0) ? ret : record->size;
  573. }
  574. /*
  575. * platform specific persistent storage driver registers with
  576. * us here. If pstore is already mounted, call the platform
  577. * read function right away to populate the file system. If not
  578. * then the pstore mount code will call us later to fill out
  579. * the file system.
  580. */
  581. int pstore_register(struct pstore_info *psi)
  582. {
  583. struct module *owner = psi->owner;
  584. if (backend && strcmp(backend, psi->name)) {
  585. pr_warn("ignoring unexpected backend '%s'\n", psi->name);
  586. return -EPERM;
  587. }
  588. /* Sanity check flags. */
  589. if (!psi->flags) {
  590. pr_warn("backend '%s' must support at least one frontend\n",
  591. psi->name);
  592. return -EINVAL;
  593. }
  594. /* Check for required functions. */
  595. if (!psi->read || !psi->write) {
  596. pr_warn("backend '%s' must implement read() and write()\n",
  597. psi->name);
  598. return -EINVAL;
  599. }
  600. spin_lock(&pstore_lock);
  601. if (psinfo) {
  602. pr_warn("backend '%s' already loaded: ignoring '%s'\n",
  603. psinfo->name, psi->name);
  604. spin_unlock(&pstore_lock);
  605. return -EBUSY;
  606. }
  607. if (!psi->write_user)
  608. psi->write_user = pstore_write_user_compat;
  609. psinfo = psi;
  610. mutex_init(&psinfo->read_mutex);
  611. spin_unlock(&pstore_lock);
  612. if (owner && !try_module_get(owner)) {
  613. psinfo = NULL;
  614. return -EINVAL;
  615. }
  616. allocate_buf_for_compression();
  617. if (pstore_is_mounted())
  618. pstore_get_records(0);
  619. if (psi->flags & PSTORE_FLAGS_DMESG)
  620. pstore_register_kmsg();
  621. if (psi->flags & PSTORE_FLAGS_CONSOLE)
  622. pstore_register_console();
  623. if (psi->flags & PSTORE_FLAGS_FTRACE)
  624. pstore_register_ftrace();
  625. if (psi->flags & PSTORE_FLAGS_PMSG)
  626. pstore_register_pmsg();
  627. /* Start watching for new records, if desired. */
  628. if (pstore_update_ms >= 0) {
  629. pstore_timer.expires = jiffies +
  630. msecs_to_jiffies(pstore_update_ms);
  631. add_timer(&pstore_timer);
  632. }
  633. /*
  634. * Update the module parameter backend, so it is visible
  635. * through /sys/module/pstore/parameters/backend
  636. */
  637. backend = psi->name;
  638. pr_info("Registered %s as persistent store backend\n", psi->name);
  639. module_put(owner);
  640. return 0;
  641. }
  642. EXPORT_SYMBOL_GPL(pstore_register);
  643. void pstore_unregister(struct pstore_info *psi)
  644. {
  645. /* Stop timer and make sure all work has finished. */
  646. pstore_update_ms = -1;
  647. del_timer_sync(&pstore_timer);
  648. flush_work(&pstore_work);
  649. if (psi->flags & PSTORE_FLAGS_PMSG)
  650. pstore_unregister_pmsg();
  651. if (psi->flags & PSTORE_FLAGS_FTRACE)
  652. pstore_unregister_ftrace();
  653. if (psi->flags & PSTORE_FLAGS_CONSOLE)
  654. pstore_unregister_console();
  655. if (psi->flags & PSTORE_FLAGS_DMESG)
  656. pstore_unregister_kmsg();
  657. free_buf_for_compression();
  658. psinfo = NULL;
  659. backend = NULL;
  660. }
  661. EXPORT_SYMBOL_GPL(pstore_unregister);
  662. static void decompress_record(struct pstore_record *record)
  663. {
  664. int unzipped_len;
  665. char *decompressed;
  666. if (!record->compressed)
  667. return;
  668. /* Only PSTORE_TYPE_DMESG support compression. */
  669. if (record->type != PSTORE_TYPE_DMESG) {
  670. pr_warn("ignored compressed record type %d\n", record->type);
  671. return;
  672. }
  673. /* No compression method has created the common buffer. */
  674. if (!big_oops_buf) {
  675. pr_warn("no decompression buffer allocated\n");
  676. return;
  677. }
  678. unzipped_len = pstore_decompress(record->buf, big_oops_buf,
  679. record->size, big_oops_buf_sz);
  680. if (unzipped_len <= 0) {
  681. pr_err("decompression failed: %d\n", unzipped_len);
  682. return;
  683. }
  684. /* Build new buffer for decompressed contents. */
  685. decompressed = kmalloc(unzipped_len + record->ecc_notice_size,
  686. GFP_KERNEL);
  687. if (!decompressed) {
  688. pr_err("decompression ran out of memory\n");
  689. return;
  690. }
  691. memcpy(decompressed, big_oops_buf, unzipped_len);
  692. /* Append ECC notice to decompressed buffer. */
  693. memcpy(decompressed + unzipped_len, record->buf + record->size,
  694. record->ecc_notice_size);
  695. /* Swap out compresed contents with decompressed contents. */
  696. kfree(record->buf);
  697. record->buf = decompressed;
  698. record->size = unzipped_len;
  699. record->compressed = false;
  700. }
  701. /*
  702. * Read all the records from one persistent store backend. Create
  703. * files in our filesystem. Don't warn about -EEXIST errors
  704. * when we are re-scanning the backing store looking to add new
  705. * error records.
  706. */
  707. void pstore_get_backend_records(struct pstore_info *psi,
  708. struct dentry *root, int quiet)
  709. {
  710. int failed = 0;
  711. unsigned int stop_loop = 65536;
  712. if (!psi || !root)
  713. return;
  714. mutex_lock(&psi->read_mutex);
  715. if (psi->open && psi->open(psi))
  716. goto out;
  717. /*
  718. * Backend callback read() allocates record.buf. decompress_record()
  719. * may reallocate record.buf. On success, pstore_mkfile() will keep
  720. * the record.buf, so free it only on failure.
  721. */
  722. for (; stop_loop; stop_loop--) {
  723. struct pstore_record *record;
  724. int rc;
  725. record = kzalloc(sizeof(*record), GFP_KERNEL);
  726. if (!record) {
  727. pr_err("out of memory creating record\n");
  728. break;
  729. }
  730. pstore_record_init(record, psi);
  731. record->size = psi->read(record);
  732. /* No more records left in backend? */
  733. if (record->size <= 0) {
  734. kfree(record);
  735. break;
  736. }
  737. decompress_record(record);
  738. rc = pstore_mkfile(root, record);
  739. if (rc) {
  740. /* pstore_mkfile() did not take record, so free it. */
  741. kfree(record->buf);
  742. kfree(record);
  743. if (rc != -EEXIST || !quiet)
  744. failed++;
  745. }
  746. }
  747. if (psi->close)
  748. psi->close(psi);
  749. out:
  750. mutex_unlock(&psi->read_mutex);
  751. if (failed)
  752. pr_warn("failed to create %d record(s) from '%s'\n",
  753. failed, psi->name);
  754. if (!stop_loop)
  755. pr_err("looping? Too many records seen from '%s'\n",
  756. psi->name);
  757. }
  758. static void pstore_dowork(struct work_struct *work)
  759. {
  760. pstore_get_records(1);
  761. }
  762. static void pstore_timefunc(unsigned long dummy)
  763. {
  764. if (pstore_new_entry) {
  765. pstore_new_entry = 0;
  766. schedule_work(&pstore_work);
  767. }
  768. if (pstore_update_ms >= 0)
  769. mod_timer(&pstore_timer,
  770. jiffies + msecs_to_jiffies(pstore_update_ms));
  771. }
  772. module_param(backend, charp, 0444);
  773. MODULE_PARM_DESC(backend, "Pstore backend to use");