sysfs.c 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899
  1. /*
  2. * bcache sysfs interfaces
  3. *
  4. * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
  5. * Copyright 2012 Google, Inc.
  6. */
  7. #include "bcache.h"
  8. #include "sysfs.h"
  9. #include "btree.h"
  10. #include "request.h"
  11. #include "writeback.h"
  12. #include <linux/blkdev.h>
  13. #include <linux/sort.h>
  14. #include <linux/sched/clock.h>
  15. static const char * const cache_replacement_policies[] = {
  16. "lru",
  17. "fifo",
  18. "random",
  19. NULL
  20. };
  21. static const char * const error_actions[] = {
  22. "unregister",
  23. "panic",
  24. NULL
  25. };
  26. write_attribute(attach);
  27. write_attribute(detach);
  28. write_attribute(unregister);
  29. write_attribute(stop);
  30. write_attribute(clear_stats);
  31. write_attribute(trigger_gc);
  32. write_attribute(prune_cache);
  33. write_attribute(flash_vol_create);
  34. read_attribute(bucket_size);
  35. read_attribute(block_size);
  36. read_attribute(nbuckets);
  37. read_attribute(tree_depth);
  38. read_attribute(root_usage_percent);
  39. read_attribute(priority_stats);
  40. read_attribute(btree_cache_size);
  41. read_attribute(btree_cache_max_chain);
  42. read_attribute(cache_available_percent);
  43. read_attribute(written);
  44. read_attribute(btree_written);
  45. read_attribute(metadata_written);
  46. read_attribute(active_journal_entries);
  47. sysfs_time_stats_attribute(btree_gc, sec, ms);
  48. sysfs_time_stats_attribute(btree_split, sec, us);
  49. sysfs_time_stats_attribute(btree_sort, ms, us);
  50. sysfs_time_stats_attribute(btree_read, ms, us);
  51. read_attribute(btree_nodes);
  52. read_attribute(btree_used_percent);
  53. read_attribute(average_key_size);
  54. read_attribute(dirty_data);
  55. read_attribute(bset_tree_stats);
  56. read_attribute(state);
  57. read_attribute(cache_read_races);
  58. read_attribute(writeback_keys_done);
  59. read_attribute(writeback_keys_failed);
  60. read_attribute(io_errors);
  61. read_attribute(congested);
  62. rw_attribute(congested_read_threshold_us);
  63. rw_attribute(congested_write_threshold_us);
  64. rw_attribute(sequential_cutoff);
  65. rw_attribute(data_csum);
  66. rw_attribute(cache_mode);
  67. rw_attribute(writeback_metadata);
  68. rw_attribute(writeback_running);
  69. rw_attribute(writeback_percent);
  70. rw_attribute(writeback_delay);
  71. rw_attribute(writeback_rate);
  72. rw_attribute(writeback_rate_update_seconds);
  73. rw_attribute(writeback_rate_d_term);
  74. rw_attribute(writeback_rate_p_term_inverse);
  75. read_attribute(writeback_rate_debug);
  76. read_attribute(stripe_size);
  77. read_attribute(partial_stripes_expensive);
  78. rw_attribute(synchronous);
  79. rw_attribute(journal_delay_ms);
  80. rw_attribute(discard);
  81. rw_attribute(running);
  82. rw_attribute(label);
  83. rw_attribute(readahead);
  84. rw_attribute(errors);
  85. rw_attribute(io_error_limit);
  86. rw_attribute(io_error_halflife);
  87. rw_attribute(verify);
  88. rw_attribute(bypass_torture_test);
  89. rw_attribute(key_merging_disabled);
  90. rw_attribute(gc_always_rewrite);
  91. rw_attribute(expensive_debug_checks);
  92. rw_attribute(cache_replacement_policy);
  93. rw_attribute(btree_shrinker_disabled);
  94. rw_attribute(copy_gc_enabled);
  95. rw_attribute(size);
  96. SHOW(__bch_cached_dev)
  97. {
  98. struct cached_dev *dc = container_of(kobj, struct cached_dev,
  99. disk.kobj);
  100. const char *states[] = { "no cache", "clean", "dirty", "inconsistent" };
  101. #define var(stat) (dc->stat)
  102. if (attr == &sysfs_cache_mode)
  103. return bch_snprint_string_list(buf, PAGE_SIZE,
  104. bch_cache_modes + 1,
  105. BDEV_CACHE_MODE(&dc->sb));
  106. sysfs_printf(data_csum, "%i", dc->disk.data_csum);
  107. var_printf(verify, "%i");
  108. var_printf(bypass_torture_test, "%i");
  109. var_printf(writeback_metadata, "%i");
  110. var_printf(writeback_running, "%i");
  111. var_print(writeback_delay);
  112. var_print(writeback_percent);
  113. sysfs_hprint(writeback_rate, dc->writeback_rate.rate << 9);
  114. var_print(writeback_rate_update_seconds);
  115. var_print(writeback_rate_d_term);
  116. var_print(writeback_rate_p_term_inverse);
  117. if (attr == &sysfs_writeback_rate_debug) {
  118. char rate[20];
  119. char dirty[20];
  120. char target[20];
  121. char proportional[20];
  122. char derivative[20];
  123. char change[20];
  124. s64 next_io;
  125. bch_hprint(rate, dc->writeback_rate.rate << 9);
  126. bch_hprint(dirty, bcache_dev_sectors_dirty(&dc->disk) << 9);
  127. bch_hprint(target, dc->writeback_rate_target << 9);
  128. bch_hprint(proportional,dc->writeback_rate_proportional << 9);
  129. bch_hprint(derivative, dc->writeback_rate_derivative << 9);
  130. bch_hprint(change, dc->writeback_rate_change << 9);
  131. next_io = div64_s64(dc->writeback_rate.next - local_clock(),
  132. NSEC_PER_MSEC);
  133. return sprintf(buf,
  134. "rate:\t\t%s/sec\n"
  135. "dirty:\t\t%s\n"
  136. "target:\t\t%s\n"
  137. "proportional:\t%s\n"
  138. "derivative:\t%s\n"
  139. "change:\t\t%s/sec\n"
  140. "next io:\t%llims\n",
  141. rate, dirty, target, proportional,
  142. derivative, change, next_io);
  143. }
  144. sysfs_hprint(dirty_data,
  145. bcache_dev_sectors_dirty(&dc->disk) << 9);
  146. sysfs_hprint(stripe_size, dc->disk.stripe_size << 9);
  147. var_printf(partial_stripes_expensive, "%u");
  148. var_hprint(sequential_cutoff);
  149. var_hprint(readahead);
  150. sysfs_print(running, atomic_read(&dc->running));
  151. sysfs_print(state, states[BDEV_STATE(&dc->sb)]);
  152. if (attr == &sysfs_label) {
  153. memcpy(buf, dc->sb.label, SB_LABEL_SIZE);
  154. buf[SB_LABEL_SIZE + 1] = '\0';
  155. strcat(buf, "\n");
  156. return strlen(buf);
  157. }
  158. #undef var
  159. return 0;
  160. }
  161. SHOW_LOCKED(bch_cached_dev)
  162. STORE(__cached_dev)
  163. {
  164. struct cached_dev *dc = container_of(kobj, struct cached_dev,
  165. disk.kobj);
  166. unsigned v = size;
  167. struct cache_set *c;
  168. struct kobj_uevent_env *env;
  169. #define d_strtoul(var) sysfs_strtoul(var, dc->var)
  170. #define d_strtoul_nonzero(var) sysfs_strtoul_clamp(var, dc->var, 1, INT_MAX)
  171. #define d_strtoi_h(var) sysfs_hatoi(var, dc->var)
  172. sysfs_strtoul(data_csum, dc->disk.data_csum);
  173. d_strtoul(verify);
  174. d_strtoul(bypass_torture_test);
  175. d_strtoul(writeback_metadata);
  176. d_strtoul(writeback_running);
  177. d_strtoul(writeback_delay);
  178. sysfs_strtoul_clamp(writeback_percent, dc->writeback_percent, 0, 40);
  179. sysfs_strtoul_clamp(writeback_rate,
  180. dc->writeback_rate.rate, 1, INT_MAX);
  181. d_strtoul_nonzero(writeback_rate_update_seconds);
  182. d_strtoul(writeback_rate_d_term);
  183. d_strtoul_nonzero(writeback_rate_p_term_inverse);
  184. d_strtoi_h(sequential_cutoff);
  185. d_strtoi_h(readahead);
  186. if (attr == &sysfs_clear_stats)
  187. bch_cache_accounting_clear(&dc->accounting);
  188. if (attr == &sysfs_running &&
  189. strtoul_or_return(buf))
  190. bch_cached_dev_run(dc);
  191. if (attr == &sysfs_cache_mode) {
  192. ssize_t v = bch_read_string_list(buf, bch_cache_modes + 1);
  193. if (v < 0)
  194. return v;
  195. if ((unsigned) v != BDEV_CACHE_MODE(&dc->sb)) {
  196. SET_BDEV_CACHE_MODE(&dc->sb, v);
  197. bch_write_bdev_super(dc, NULL);
  198. }
  199. }
  200. if (attr == &sysfs_label) {
  201. if (size > SB_LABEL_SIZE)
  202. return -EINVAL;
  203. memcpy(dc->sb.label, buf, size);
  204. if (size < SB_LABEL_SIZE)
  205. dc->sb.label[size] = '\0';
  206. if (size && dc->sb.label[size - 1] == '\n')
  207. dc->sb.label[size - 1] = '\0';
  208. bch_write_bdev_super(dc, NULL);
  209. if (dc->disk.c) {
  210. memcpy(dc->disk.c->uuids[dc->disk.id].label,
  211. buf, SB_LABEL_SIZE);
  212. bch_uuid_write(dc->disk.c);
  213. }
  214. env = kzalloc(sizeof(struct kobj_uevent_env), GFP_KERNEL);
  215. if (!env)
  216. return -ENOMEM;
  217. add_uevent_var(env, "DRIVER=bcache");
  218. add_uevent_var(env, "CACHED_UUID=%pU", dc->sb.uuid),
  219. add_uevent_var(env, "CACHED_LABEL=%s", buf);
  220. kobject_uevent_env(
  221. &disk_to_dev(dc->disk.disk)->kobj, KOBJ_CHANGE, env->envp);
  222. kfree(env);
  223. }
  224. if (attr == &sysfs_attach) {
  225. if (bch_parse_uuid(buf, dc->sb.set_uuid) < 16)
  226. return -EINVAL;
  227. list_for_each_entry(c, &bch_cache_sets, list) {
  228. v = bch_cached_dev_attach(dc, c);
  229. if (!v)
  230. return size;
  231. }
  232. pr_err("Can't attach %s: cache set not found", buf);
  233. size = v;
  234. }
  235. if (attr == &sysfs_detach && dc->disk.c)
  236. bch_cached_dev_detach(dc);
  237. if (attr == &sysfs_stop)
  238. bcache_device_stop(&dc->disk);
  239. return size;
  240. }
  241. STORE(bch_cached_dev)
  242. {
  243. struct cached_dev *dc = container_of(kobj, struct cached_dev,
  244. disk.kobj);
  245. mutex_lock(&bch_register_lock);
  246. size = __cached_dev_store(kobj, attr, buf, size);
  247. if (attr == &sysfs_writeback_running)
  248. bch_writeback_queue(dc);
  249. if (attr == &sysfs_writeback_percent)
  250. schedule_delayed_work(&dc->writeback_rate_update,
  251. dc->writeback_rate_update_seconds * HZ);
  252. mutex_unlock(&bch_register_lock);
  253. return size;
  254. }
  255. static struct attribute *bch_cached_dev_files[] = {
  256. &sysfs_attach,
  257. &sysfs_detach,
  258. &sysfs_stop,
  259. #if 0
  260. &sysfs_data_csum,
  261. #endif
  262. &sysfs_cache_mode,
  263. &sysfs_writeback_metadata,
  264. &sysfs_writeback_running,
  265. &sysfs_writeback_delay,
  266. &sysfs_writeback_percent,
  267. &sysfs_writeback_rate,
  268. &sysfs_writeback_rate_update_seconds,
  269. &sysfs_writeback_rate_d_term,
  270. &sysfs_writeback_rate_p_term_inverse,
  271. &sysfs_writeback_rate_debug,
  272. &sysfs_dirty_data,
  273. &sysfs_stripe_size,
  274. &sysfs_partial_stripes_expensive,
  275. &sysfs_sequential_cutoff,
  276. &sysfs_clear_stats,
  277. &sysfs_running,
  278. &sysfs_state,
  279. &sysfs_label,
  280. &sysfs_readahead,
  281. #ifdef CONFIG_BCACHE_DEBUG
  282. &sysfs_verify,
  283. &sysfs_bypass_torture_test,
  284. #endif
  285. NULL
  286. };
  287. KTYPE(bch_cached_dev);
  288. SHOW(bch_flash_dev)
  289. {
  290. struct bcache_device *d = container_of(kobj, struct bcache_device,
  291. kobj);
  292. struct uuid_entry *u = &d->c->uuids[d->id];
  293. sysfs_printf(data_csum, "%i", d->data_csum);
  294. sysfs_hprint(size, u->sectors << 9);
  295. if (attr == &sysfs_label) {
  296. memcpy(buf, u->label, SB_LABEL_SIZE);
  297. buf[SB_LABEL_SIZE + 1] = '\0';
  298. strcat(buf, "\n");
  299. return strlen(buf);
  300. }
  301. return 0;
  302. }
  303. STORE(__bch_flash_dev)
  304. {
  305. struct bcache_device *d = container_of(kobj, struct bcache_device,
  306. kobj);
  307. struct uuid_entry *u = &d->c->uuids[d->id];
  308. sysfs_strtoul(data_csum, d->data_csum);
  309. if (attr == &sysfs_size) {
  310. uint64_t v;
  311. strtoi_h_or_return(buf, v);
  312. u->sectors = v >> 9;
  313. bch_uuid_write(d->c);
  314. set_capacity(d->disk, u->sectors);
  315. }
  316. if (attr == &sysfs_label) {
  317. memcpy(u->label, buf, SB_LABEL_SIZE);
  318. bch_uuid_write(d->c);
  319. }
  320. if (attr == &sysfs_unregister) {
  321. set_bit(BCACHE_DEV_DETACHING, &d->flags);
  322. bcache_device_stop(d);
  323. }
  324. return size;
  325. }
  326. STORE_LOCKED(bch_flash_dev)
  327. static struct attribute *bch_flash_dev_files[] = {
  328. &sysfs_unregister,
  329. #if 0
  330. &sysfs_data_csum,
  331. #endif
  332. &sysfs_label,
  333. &sysfs_size,
  334. NULL
  335. };
  336. KTYPE(bch_flash_dev);
  337. struct bset_stats_op {
  338. struct btree_op op;
  339. size_t nodes;
  340. struct bset_stats stats;
  341. };
  342. static int bch_btree_bset_stats(struct btree_op *b_op, struct btree *b)
  343. {
  344. struct bset_stats_op *op = container_of(b_op, struct bset_stats_op, op);
  345. op->nodes++;
  346. bch_btree_keys_stats(&b->keys, &op->stats);
  347. return MAP_CONTINUE;
  348. }
  349. static int bch_bset_print_stats(struct cache_set *c, char *buf)
  350. {
  351. struct bset_stats_op op;
  352. int ret;
  353. memset(&op, 0, sizeof(op));
  354. bch_btree_op_init(&op.op, -1);
  355. ret = bch_btree_map_nodes(&op.op, c, &ZERO_KEY, bch_btree_bset_stats);
  356. if (ret < 0)
  357. return ret;
  358. return snprintf(buf, PAGE_SIZE,
  359. "btree nodes: %zu\n"
  360. "written sets: %zu\n"
  361. "unwritten sets: %zu\n"
  362. "written key bytes: %zu\n"
  363. "unwritten key bytes: %zu\n"
  364. "floats: %zu\n"
  365. "failed: %zu\n",
  366. op.nodes,
  367. op.stats.sets_written, op.stats.sets_unwritten,
  368. op.stats.bytes_written, op.stats.bytes_unwritten,
  369. op.stats.floats, op.stats.failed);
  370. }
  371. static unsigned bch_root_usage(struct cache_set *c)
  372. {
  373. unsigned bytes = 0;
  374. struct bkey *k;
  375. struct btree *b;
  376. struct btree_iter iter;
  377. goto lock_root;
  378. do {
  379. rw_unlock(false, b);
  380. lock_root:
  381. b = c->root;
  382. rw_lock(false, b, b->level);
  383. } while (b != c->root);
  384. for_each_key_filter(&b->keys, k, &iter, bch_ptr_bad)
  385. bytes += bkey_bytes(k);
  386. rw_unlock(false, b);
  387. return (bytes * 100) / btree_bytes(c);
  388. }
  389. static size_t bch_cache_size(struct cache_set *c)
  390. {
  391. size_t ret = 0;
  392. struct btree *b;
  393. mutex_lock(&c->bucket_lock);
  394. list_for_each_entry(b, &c->btree_cache, list)
  395. ret += 1 << (b->keys.page_order + PAGE_SHIFT);
  396. mutex_unlock(&c->bucket_lock);
  397. return ret;
  398. }
  399. static unsigned bch_cache_max_chain(struct cache_set *c)
  400. {
  401. unsigned ret = 0;
  402. struct hlist_head *h;
  403. mutex_lock(&c->bucket_lock);
  404. for (h = c->bucket_hash;
  405. h < c->bucket_hash + (1 << BUCKET_HASH_BITS);
  406. h++) {
  407. unsigned i = 0;
  408. struct hlist_node *p;
  409. hlist_for_each(p, h)
  410. i++;
  411. ret = max(ret, i);
  412. }
  413. mutex_unlock(&c->bucket_lock);
  414. return ret;
  415. }
  416. static unsigned bch_btree_used(struct cache_set *c)
  417. {
  418. return div64_u64(c->gc_stats.key_bytes * 100,
  419. (c->gc_stats.nodes ?: 1) * btree_bytes(c));
  420. }
  421. static unsigned bch_average_key_size(struct cache_set *c)
  422. {
  423. return c->gc_stats.nkeys
  424. ? div64_u64(c->gc_stats.data, c->gc_stats.nkeys)
  425. : 0;
  426. }
  427. SHOW(__bch_cache_set)
  428. {
  429. struct cache_set *c = container_of(kobj, struct cache_set, kobj);
  430. sysfs_print(synchronous, CACHE_SYNC(&c->sb));
  431. sysfs_print(journal_delay_ms, c->journal_delay_ms);
  432. sysfs_hprint(bucket_size, bucket_bytes(c));
  433. sysfs_hprint(block_size, block_bytes(c));
  434. sysfs_print(tree_depth, c->root->level);
  435. sysfs_print(root_usage_percent, bch_root_usage(c));
  436. sysfs_hprint(btree_cache_size, bch_cache_size(c));
  437. sysfs_print(btree_cache_max_chain, bch_cache_max_chain(c));
  438. sysfs_print(cache_available_percent, 100 - c->gc_stats.in_use);
  439. sysfs_print_time_stats(&c->btree_gc_time, btree_gc, sec, ms);
  440. sysfs_print_time_stats(&c->btree_split_time, btree_split, sec, us);
  441. sysfs_print_time_stats(&c->sort.time, btree_sort, ms, us);
  442. sysfs_print_time_stats(&c->btree_read_time, btree_read, ms, us);
  443. sysfs_print(btree_used_percent, bch_btree_used(c));
  444. sysfs_print(btree_nodes, c->gc_stats.nodes);
  445. sysfs_hprint(average_key_size, bch_average_key_size(c));
  446. sysfs_print(cache_read_races,
  447. atomic_long_read(&c->cache_read_races));
  448. sysfs_print(writeback_keys_done,
  449. atomic_long_read(&c->writeback_keys_done));
  450. sysfs_print(writeback_keys_failed,
  451. atomic_long_read(&c->writeback_keys_failed));
  452. if (attr == &sysfs_errors)
  453. return bch_snprint_string_list(buf, PAGE_SIZE, error_actions,
  454. c->on_error);
  455. /* See count_io_errors for why 88 */
  456. sysfs_print(io_error_halflife, c->error_decay * 88);
  457. sysfs_print(io_error_limit, c->error_limit >> IO_ERROR_SHIFT);
  458. sysfs_hprint(congested,
  459. ((uint64_t) bch_get_congested(c)) << 9);
  460. sysfs_print(congested_read_threshold_us,
  461. c->congested_read_threshold_us);
  462. sysfs_print(congested_write_threshold_us,
  463. c->congested_write_threshold_us);
  464. sysfs_print(active_journal_entries, fifo_used(&c->journal.pin));
  465. sysfs_printf(verify, "%i", c->verify);
  466. sysfs_printf(key_merging_disabled, "%i", c->key_merging_disabled);
  467. sysfs_printf(expensive_debug_checks,
  468. "%i", c->expensive_debug_checks);
  469. sysfs_printf(gc_always_rewrite, "%i", c->gc_always_rewrite);
  470. sysfs_printf(btree_shrinker_disabled, "%i", c->shrinker_disabled);
  471. sysfs_printf(copy_gc_enabled, "%i", c->copy_gc_enabled);
  472. if (attr == &sysfs_bset_tree_stats)
  473. return bch_bset_print_stats(c, buf);
  474. return 0;
  475. }
  476. SHOW_LOCKED(bch_cache_set)
  477. STORE(__bch_cache_set)
  478. {
  479. struct cache_set *c = container_of(kobj, struct cache_set, kobj);
  480. if (attr == &sysfs_unregister)
  481. bch_cache_set_unregister(c);
  482. if (attr == &sysfs_stop)
  483. bch_cache_set_stop(c);
  484. if (attr == &sysfs_synchronous) {
  485. bool sync = strtoul_or_return(buf);
  486. if (sync != CACHE_SYNC(&c->sb)) {
  487. SET_CACHE_SYNC(&c->sb, sync);
  488. bcache_write_super(c);
  489. }
  490. }
  491. if (attr == &sysfs_flash_vol_create) {
  492. int r;
  493. uint64_t v;
  494. strtoi_h_or_return(buf, v);
  495. r = bch_flash_dev_create(c, v);
  496. if (r)
  497. return r;
  498. }
  499. if (attr == &sysfs_clear_stats) {
  500. atomic_long_set(&c->writeback_keys_done, 0);
  501. atomic_long_set(&c->writeback_keys_failed, 0);
  502. memset(&c->gc_stats, 0, sizeof(struct gc_stat));
  503. bch_cache_accounting_clear(&c->accounting);
  504. }
  505. if (attr == &sysfs_trigger_gc)
  506. wake_up_gc(c);
  507. if (attr == &sysfs_prune_cache) {
  508. struct shrink_control sc;
  509. sc.gfp_mask = GFP_KERNEL;
  510. sc.nr_to_scan = strtoul_or_return(buf);
  511. c->shrink.scan_objects(&c->shrink, &sc);
  512. }
  513. sysfs_strtoul(congested_read_threshold_us,
  514. c->congested_read_threshold_us);
  515. sysfs_strtoul(congested_write_threshold_us,
  516. c->congested_write_threshold_us);
  517. if (attr == &sysfs_errors) {
  518. ssize_t v = bch_read_string_list(buf, error_actions);
  519. if (v < 0)
  520. return v;
  521. c->on_error = v;
  522. }
  523. if (attr == &sysfs_io_error_limit)
  524. c->error_limit = strtoul_or_return(buf) << IO_ERROR_SHIFT;
  525. /* See count_io_errors() for why 88 */
  526. if (attr == &sysfs_io_error_halflife)
  527. c->error_decay = strtoul_or_return(buf) / 88;
  528. sysfs_strtoul(journal_delay_ms, c->journal_delay_ms);
  529. sysfs_strtoul(verify, c->verify);
  530. sysfs_strtoul(key_merging_disabled, c->key_merging_disabled);
  531. sysfs_strtoul(expensive_debug_checks, c->expensive_debug_checks);
  532. sysfs_strtoul(gc_always_rewrite, c->gc_always_rewrite);
  533. sysfs_strtoul(btree_shrinker_disabled, c->shrinker_disabled);
  534. sysfs_strtoul(copy_gc_enabled, c->copy_gc_enabled);
  535. return size;
  536. }
  537. STORE_LOCKED(bch_cache_set)
  538. SHOW(bch_cache_set_internal)
  539. {
  540. struct cache_set *c = container_of(kobj, struct cache_set, internal);
  541. return bch_cache_set_show(&c->kobj, attr, buf);
  542. }
  543. STORE(bch_cache_set_internal)
  544. {
  545. struct cache_set *c = container_of(kobj, struct cache_set, internal);
  546. return bch_cache_set_store(&c->kobj, attr, buf, size);
  547. }
  548. static void bch_cache_set_internal_release(struct kobject *k)
  549. {
  550. }
  551. static struct attribute *bch_cache_set_files[] = {
  552. &sysfs_unregister,
  553. &sysfs_stop,
  554. &sysfs_synchronous,
  555. &sysfs_journal_delay_ms,
  556. &sysfs_flash_vol_create,
  557. &sysfs_bucket_size,
  558. &sysfs_block_size,
  559. &sysfs_tree_depth,
  560. &sysfs_root_usage_percent,
  561. &sysfs_btree_cache_size,
  562. &sysfs_cache_available_percent,
  563. &sysfs_average_key_size,
  564. &sysfs_errors,
  565. &sysfs_io_error_limit,
  566. &sysfs_io_error_halflife,
  567. &sysfs_congested,
  568. &sysfs_congested_read_threshold_us,
  569. &sysfs_congested_write_threshold_us,
  570. &sysfs_clear_stats,
  571. NULL
  572. };
  573. KTYPE(bch_cache_set);
  574. static struct attribute *bch_cache_set_internal_files[] = {
  575. &sysfs_active_journal_entries,
  576. sysfs_time_stats_attribute_list(btree_gc, sec, ms)
  577. sysfs_time_stats_attribute_list(btree_split, sec, us)
  578. sysfs_time_stats_attribute_list(btree_sort, ms, us)
  579. sysfs_time_stats_attribute_list(btree_read, ms, us)
  580. &sysfs_btree_nodes,
  581. &sysfs_btree_used_percent,
  582. &sysfs_btree_cache_max_chain,
  583. &sysfs_bset_tree_stats,
  584. &sysfs_cache_read_races,
  585. &sysfs_writeback_keys_done,
  586. &sysfs_writeback_keys_failed,
  587. &sysfs_trigger_gc,
  588. &sysfs_prune_cache,
  589. #ifdef CONFIG_BCACHE_DEBUG
  590. &sysfs_verify,
  591. &sysfs_key_merging_disabled,
  592. &sysfs_expensive_debug_checks,
  593. #endif
  594. &sysfs_gc_always_rewrite,
  595. &sysfs_btree_shrinker_disabled,
  596. &sysfs_copy_gc_enabled,
  597. NULL
  598. };
  599. KTYPE(bch_cache_set_internal);
  600. SHOW(__bch_cache)
  601. {
  602. struct cache *ca = container_of(kobj, struct cache, kobj);
  603. sysfs_hprint(bucket_size, bucket_bytes(ca));
  604. sysfs_hprint(block_size, block_bytes(ca));
  605. sysfs_print(nbuckets, ca->sb.nbuckets);
  606. sysfs_print(discard, ca->discard);
  607. sysfs_hprint(written, atomic_long_read(&ca->sectors_written) << 9);
  608. sysfs_hprint(btree_written,
  609. atomic_long_read(&ca->btree_sectors_written) << 9);
  610. sysfs_hprint(metadata_written,
  611. (atomic_long_read(&ca->meta_sectors_written) +
  612. atomic_long_read(&ca->btree_sectors_written)) << 9);
  613. sysfs_print(io_errors,
  614. atomic_read(&ca->io_errors) >> IO_ERROR_SHIFT);
  615. if (attr == &sysfs_cache_replacement_policy)
  616. return bch_snprint_string_list(buf, PAGE_SIZE,
  617. cache_replacement_policies,
  618. CACHE_REPLACEMENT(&ca->sb));
  619. if (attr == &sysfs_priority_stats) {
  620. int cmp(const void *l, const void *r)
  621. { return *((uint16_t *) r) - *((uint16_t *) l); }
  622. struct bucket *b;
  623. size_t n = ca->sb.nbuckets, i;
  624. size_t unused = 0, available = 0, dirty = 0, meta = 0;
  625. uint64_t sum = 0;
  626. /* Compute 31 quantiles */
  627. uint16_t q[31], *p, *cached;
  628. ssize_t ret;
  629. cached = p = vmalloc(ca->sb.nbuckets * sizeof(uint16_t));
  630. if (!p)
  631. return -ENOMEM;
  632. mutex_lock(&ca->set->bucket_lock);
  633. for_each_bucket(b, ca) {
  634. if (!GC_SECTORS_USED(b))
  635. unused++;
  636. if (GC_MARK(b) == GC_MARK_RECLAIMABLE)
  637. available++;
  638. if (GC_MARK(b) == GC_MARK_DIRTY)
  639. dirty++;
  640. if (GC_MARK(b) == GC_MARK_METADATA)
  641. meta++;
  642. }
  643. for (i = ca->sb.first_bucket; i < n; i++)
  644. p[i] = ca->buckets[i].prio;
  645. mutex_unlock(&ca->set->bucket_lock);
  646. sort(p, n, sizeof(uint16_t), cmp, NULL);
  647. while (n &&
  648. !cached[n - 1])
  649. --n;
  650. unused = ca->sb.nbuckets - n;
  651. while (cached < p + n &&
  652. *cached == BTREE_PRIO)
  653. cached++, n--;
  654. for (i = 0; i < n; i++)
  655. sum += INITIAL_PRIO - cached[i];
  656. if (n)
  657. do_div(sum, n);
  658. for (i = 0; i < ARRAY_SIZE(q); i++)
  659. q[i] = INITIAL_PRIO - cached[n * (i + 1) /
  660. (ARRAY_SIZE(q) + 1)];
  661. vfree(p);
  662. ret = scnprintf(buf, PAGE_SIZE,
  663. "Unused: %zu%%\n"
  664. "Clean: %zu%%\n"
  665. "Dirty: %zu%%\n"
  666. "Metadata: %zu%%\n"
  667. "Average: %llu\n"
  668. "Sectors per Q: %zu\n"
  669. "Quantiles: [",
  670. unused * 100 / (size_t) ca->sb.nbuckets,
  671. available * 100 / (size_t) ca->sb.nbuckets,
  672. dirty * 100 / (size_t) ca->sb.nbuckets,
  673. meta * 100 / (size_t) ca->sb.nbuckets, sum,
  674. n * ca->sb.bucket_size / (ARRAY_SIZE(q) + 1));
  675. for (i = 0; i < ARRAY_SIZE(q); i++)
  676. ret += scnprintf(buf + ret, PAGE_SIZE - ret,
  677. "%u ", q[i]);
  678. ret--;
  679. ret += scnprintf(buf + ret, PAGE_SIZE - ret, "]\n");
  680. return ret;
  681. }
  682. return 0;
  683. }
  684. SHOW_LOCKED(bch_cache)
  685. STORE(__bch_cache)
  686. {
  687. struct cache *ca = container_of(kobj, struct cache, kobj);
  688. if (attr == &sysfs_discard) {
  689. bool v = strtoul_or_return(buf);
  690. if (blk_queue_discard(bdev_get_queue(ca->bdev)))
  691. ca->discard = v;
  692. if (v != CACHE_DISCARD(&ca->sb)) {
  693. SET_CACHE_DISCARD(&ca->sb, v);
  694. bcache_write_super(ca->set);
  695. }
  696. }
  697. if (attr == &sysfs_cache_replacement_policy) {
  698. ssize_t v = bch_read_string_list(buf, cache_replacement_policies);
  699. if (v < 0)
  700. return v;
  701. if ((unsigned) v != CACHE_REPLACEMENT(&ca->sb)) {
  702. mutex_lock(&ca->set->bucket_lock);
  703. SET_CACHE_REPLACEMENT(&ca->sb, v);
  704. mutex_unlock(&ca->set->bucket_lock);
  705. bcache_write_super(ca->set);
  706. }
  707. }
  708. if (attr == &sysfs_clear_stats) {
  709. atomic_long_set(&ca->sectors_written, 0);
  710. atomic_long_set(&ca->btree_sectors_written, 0);
  711. atomic_long_set(&ca->meta_sectors_written, 0);
  712. atomic_set(&ca->io_count, 0);
  713. atomic_set(&ca->io_errors, 0);
  714. }
  715. return size;
  716. }
  717. STORE_LOCKED(bch_cache)
  718. static struct attribute *bch_cache_files[] = {
  719. &sysfs_bucket_size,
  720. &sysfs_block_size,
  721. &sysfs_nbuckets,
  722. &sysfs_priority_stats,
  723. &sysfs_discard,
  724. &sysfs_written,
  725. &sysfs_btree_written,
  726. &sysfs_metadata_written,
  727. &sysfs_io_errors,
  728. &sysfs_clear_stats,
  729. &sysfs_cache_replacement_policy,
  730. NULL
  731. };
  732. KTYPE(bch_cache);