backing-dev.c 26 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998
  1. #include <linux/wait.h>
  2. #include <linux/backing-dev.h>
  3. #include <linux/kthread.h>
  4. #include <linux/freezer.h>
  5. #include <linux/fs.h>
  6. #include <linux/pagemap.h>
  7. #include <linux/mm.h>
  8. #include <linux/sched.h>
  9. #include <linux/module.h>
  10. #include <linux/writeback.h>
  11. #include <linux/device.h>
  12. #include <trace/events/writeback.h>
  13. static atomic_long_t bdi_seq = ATOMIC_LONG_INIT(0);
  14. struct backing_dev_info noop_backing_dev_info = {
  15. .name = "noop",
  16. .capabilities = BDI_CAP_NO_ACCT_AND_WRITEBACK,
  17. };
  18. EXPORT_SYMBOL_GPL(noop_backing_dev_info);
  19. static struct class *bdi_class;
  20. /*
  21. * bdi_lock protects updates to bdi_list. bdi_list has RCU reader side
  22. * locking.
  23. */
  24. DEFINE_SPINLOCK(bdi_lock);
  25. LIST_HEAD(bdi_list);
  26. /* bdi_wq serves all asynchronous writeback tasks */
  27. struct workqueue_struct *bdi_wq;
  28. #ifdef CONFIG_DEBUG_FS
  29. #include <linux/debugfs.h>
  30. #include <linux/seq_file.h>
  31. static struct dentry *bdi_debug_root;
  32. static void bdi_debug_init(void)
  33. {
  34. bdi_debug_root = debugfs_create_dir("bdi", NULL);
  35. }
  36. static int bdi_debug_stats_show(struct seq_file *m, void *v)
  37. {
  38. struct backing_dev_info *bdi = m->private;
  39. struct bdi_writeback *wb = &bdi->wb;
  40. unsigned long background_thresh;
  41. unsigned long dirty_thresh;
  42. unsigned long wb_thresh;
  43. unsigned long nr_dirty, nr_io, nr_more_io, nr_dirty_time;
  44. struct inode *inode;
  45. nr_dirty = nr_io = nr_more_io = nr_dirty_time = 0;
  46. spin_lock(&wb->list_lock);
  47. list_for_each_entry(inode, &wb->b_dirty, i_io_list)
  48. nr_dirty++;
  49. list_for_each_entry(inode, &wb->b_io, i_io_list)
  50. nr_io++;
  51. list_for_each_entry(inode, &wb->b_more_io, i_io_list)
  52. nr_more_io++;
  53. list_for_each_entry(inode, &wb->b_dirty_time, i_io_list)
  54. if (inode->i_state & I_DIRTY_TIME)
  55. nr_dirty_time++;
  56. spin_unlock(&wb->list_lock);
  57. global_dirty_limits(&background_thresh, &dirty_thresh);
  58. wb_thresh = wb_calc_thresh(wb, dirty_thresh);
  59. #define K(x) ((x) << (PAGE_SHIFT - 10))
  60. seq_printf(m,
  61. "BdiWriteback: %10lu kB\n"
  62. "BdiReclaimable: %10lu kB\n"
  63. "BdiDirtyThresh: %10lu kB\n"
  64. "DirtyThresh: %10lu kB\n"
  65. "BackgroundThresh: %10lu kB\n"
  66. "BdiDirtied: %10lu kB\n"
  67. "BdiWritten: %10lu kB\n"
  68. "BdiWriteBandwidth: %10lu kBps\n"
  69. "b_dirty: %10lu\n"
  70. "b_io: %10lu\n"
  71. "b_more_io: %10lu\n"
  72. "b_dirty_time: %10lu\n"
  73. "bdi_list: %10u\n"
  74. "state: %10lx\n",
  75. (unsigned long) K(wb_stat(wb, WB_WRITEBACK)),
  76. (unsigned long) K(wb_stat(wb, WB_RECLAIMABLE)),
  77. K(wb_thresh),
  78. K(dirty_thresh),
  79. K(background_thresh),
  80. (unsigned long) K(wb_stat(wb, WB_DIRTIED)),
  81. (unsigned long) K(wb_stat(wb, WB_WRITTEN)),
  82. (unsigned long) K(wb->write_bandwidth),
  83. nr_dirty,
  84. nr_io,
  85. nr_more_io,
  86. nr_dirty_time,
  87. !list_empty(&bdi->bdi_list), bdi->wb.state);
  88. #undef K
  89. return 0;
  90. }
  91. static int bdi_debug_stats_open(struct inode *inode, struct file *file)
  92. {
  93. return single_open(file, bdi_debug_stats_show, inode->i_private);
  94. }
  95. static const struct file_operations bdi_debug_stats_fops = {
  96. .open = bdi_debug_stats_open,
  97. .read = seq_read,
  98. .llseek = seq_lseek,
  99. .release = single_release,
  100. };
  101. static void bdi_debug_register(struct backing_dev_info *bdi, const char *name)
  102. {
  103. bdi->debug_dir = debugfs_create_dir(name, bdi_debug_root);
  104. bdi->debug_stats = debugfs_create_file("stats", 0444, bdi->debug_dir,
  105. bdi, &bdi_debug_stats_fops);
  106. }
  107. static void bdi_debug_unregister(struct backing_dev_info *bdi)
  108. {
  109. debugfs_remove(bdi->debug_stats);
  110. debugfs_remove(bdi->debug_dir);
  111. }
  112. #else
  113. static inline void bdi_debug_init(void)
  114. {
  115. }
  116. static inline void bdi_debug_register(struct backing_dev_info *bdi,
  117. const char *name)
  118. {
  119. }
  120. static inline void bdi_debug_unregister(struct backing_dev_info *bdi)
  121. {
  122. }
  123. #endif
  124. static ssize_t read_ahead_kb_store(struct device *dev,
  125. struct device_attribute *attr,
  126. const char *buf, size_t count)
  127. {
  128. struct backing_dev_info *bdi = dev_get_drvdata(dev);
  129. unsigned long read_ahead_kb;
  130. ssize_t ret;
  131. ret = kstrtoul(buf, 10, &read_ahead_kb);
  132. if (ret < 0)
  133. return ret;
  134. bdi->ra_pages = read_ahead_kb >> (PAGE_SHIFT - 10);
  135. return count;
  136. }
  137. #define K(pages) ((pages) << (PAGE_SHIFT - 10))
  138. #define BDI_SHOW(name, expr) \
  139. static ssize_t name##_show(struct device *dev, \
  140. struct device_attribute *attr, char *page) \
  141. { \
  142. struct backing_dev_info *bdi = dev_get_drvdata(dev); \
  143. \
  144. return snprintf(page, PAGE_SIZE-1, "%lld\n", (long long)expr); \
  145. } \
  146. static DEVICE_ATTR_RW(name);
  147. BDI_SHOW(read_ahead_kb, K(bdi->ra_pages))
  148. static ssize_t min_ratio_store(struct device *dev,
  149. struct device_attribute *attr, const char *buf, size_t count)
  150. {
  151. struct backing_dev_info *bdi = dev_get_drvdata(dev);
  152. unsigned int ratio;
  153. ssize_t ret;
  154. ret = kstrtouint(buf, 10, &ratio);
  155. if (ret < 0)
  156. return ret;
  157. ret = bdi_set_min_ratio(bdi, ratio);
  158. if (!ret)
  159. ret = count;
  160. return ret;
  161. }
  162. BDI_SHOW(min_ratio, bdi->min_ratio)
  163. static ssize_t max_ratio_store(struct device *dev,
  164. struct device_attribute *attr, const char *buf, size_t count)
  165. {
  166. struct backing_dev_info *bdi = dev_get_drvdata(dev);
  167. unsigned int ratio;
  168. ssize_t ret;
  169. ret = kstrtouint(buf, 10, &ratio);
  170. if (ret < 0)
  171. return ret;
  172. ret = bdi_set_max_ratio(bdi, ratio);
  173. if (!ret)
  174. ret = count;
  175. return ret;
  176. }
  177. BDI_SHOW(max_ratio, bdi->max_ratio)
  178. static ssize_t stable_pages_required_show(struct device *dev,
  179. struct device_attribute *attr,
  180. char *page)
  181. {
  182. struct backing_dev_info *bdi = dev_get_drvdata(dev);
  183. return snprintf(page, PAGE_SIZE-1, "%d\n",
  184. bdi_cap_stable_pages_required(bdi) ? 1 : 0);
  185. }
  186. static DEVICE_ATTR_RO(stable_pages_required);
  187. static struct attribute *bdi_dev_attrs[] = {
  188. &dev_attr_read_ahead_kb.attr,
  189. &dev_attr_min_ratio.attr,
  190. &dev_attr_max_ratio.attr,
  191. &dev_attr_stable_pages_required.attr,
  192. NULL,
  193. };
  194. ATTRIBUTE_GROUPS(bdi_dev);
  195. static __init int bdi_class_init(void)
  196. {
  197. bdi_class = class_create(THIS_MODULE, "bdi");
  198. if (IS_ERR(bdi_class))
  199. return PTR_ERR(bdi_class);
  200. bdi_class->dev_groups = bdi_dev_groups;
  201. bdi_debug_init();
  202. return 0;
  203. }
  204. postcore_initcall(bdi_class_init);
  205. static int __init default_bdi_init(void)
  206. {
  207. int err;
  208. bdi_wq = alloc_workqueue("writeback", WQ_MEM_RECLAIM | WQ_FREEZABLE |
  209. WQ_UNBOUND | WQ_SYSFS, 0);
  210. if (!bdi_wq)
  211. return -ENOMEM;
  212. err = bdi_init(&noop_backing_dev_info);
  213. return err;
  214. }
  215. subsys_initcall(default_bdi_init);
  216. /*
  217. * This function is used when the first inode for this wb is marked dirty. It
  218. * wakes-up the corresponding bdi thread which should then take care of the
  219. * periodic background write-out of dirty inodes. Since the write-out would
  220. * starts only 'dirty_writeback_interval' centisecs from now anyway, we just
  221. * set up a timer which wakes the bdi thread up later.
  222. *
  223. * Note, we wouldn't bother setting up the timer, but this function is on the
  224. * fast-path (used by '__mark_inode_dirty()'), so we save few context switches
  225. * by delaying the wake-up.
  226. *
  227. * We have to be careful not to postpone flush work if it is scheduled for
  228. * earlier. Thus we use queue_delayed_work().
  229. */
  230. void wb_wakeup_delayed(struct bdi_writeback *wb)
  231. {
  232. unsigned long timeout;
  233. timeout = msecs_to_jiffies(dirty_writeback_interval * 10);
  234. spin_lock_bh(&wb->work_lock);
  235. if (test_bit(WB_registered, &wb->state))
  236. queue_delayed_work(bdi_wq, &wb->dwork, timeout);
  237. spin_unlock_bh(&wb->work_lock);
  238. }
  239. /*
  240. * Initial write bandwidth: 100 MB/s
  241. */
  242. #define INIT_BW (100 << (20 - PAGE_SHIFT))
  243. static int wb_init(struct bdi_writeback *wb, struct backing_dev_info *bdi,
  244. int blkcg_id, gfp_t gfp)
  245. {
  246. int i, err;
  247. memset(wb, 0, sizeof(*wb));
  248. wb->bdi = bdi;
  249. wb->last_old_flush = jiffies;
  250. INIT_LIST_HEAD(&wb->b_dirty);
  251. INIT_LIST_HEAD(&wb->b_io);
  252. INIT_LIST_HEAD(&wb->b_more_io);
  253. INIT_LIST_HEAD(&wb->b_dirty_time);
  254. spin_lock_init(&wb->list_lock);
  255. wb->bw_time_stamp = jiffies;
  256. wb->balanced_dirty_ratelimit = INIT_BW;
  257. wb->dirty_ratelimit = INIT_BW;
  258. wb->write_bandwidth = INIT_BW;
  259. wb->avg_write_bandwidth = INIT_BW;
  260. spin_lock_init(&wb->work_lock);
  261. INIT_LIST_HEAD(&wb->work_list);
  262. INIT_DELAYED_WORK(&wb->dwork, wb_workfn);
  263. wb->congested = wb_congested_get_create(bdi, blkcg_id, gfp);
  264. if (!wb->congested)
  265. return -ENOMEM;
  266. err = fprop_local_init_percpu(&wb->completions, gfp);
  267. if (err)
  268. goto out_put_cong;
  269. for (i = 0; i < NR_WB_STAT_ITEMS; i++) {
  270. err = percpu_counter_init(&wb->stat[i], 0, gfp);
  271. if (err)
  272. goto out_destroy_stat;
  273. }
  274. return 0;
  275. out_destroy_stat:
  276. while (--i)
  277. percpu_counter_destroy(&wb->stat[i]);
  278. fprop_local_destroy_percpu(&wb->completions);
  279. out_put_cong:
  280. wb_congested_put(wb->congested);
  281. return err;
  282. }
  283. /*
  284. * Remove bdi from the global list and shutdown any threads we have running
  285. */
  286. static void wb_shutdown(struct bdi_writeback *wb)
  287. {
  288. /* Make sure nobody queues further work */
  289. spin_lock_bh(&wb->work_lock);
  290. if (!test_and_clear_bit(WB_registered, &wb->state)) {
  291. spin_unlock_bh(&wb->work_lock);
  292. return;
  293. }
  294. spin_unlock_bh(&wb->work_lock);
  295. /*
  296. * Drain work list and shutdown the delayed_work. !WB_registered
  297. * tells wb_workfn() that @wb is dying and its work_list needs to
  298. * be drained no matter what.
  299. */
  300. mod_delayed_work(bdi_wq, &wb->dwork, 0);
  301. flush_delayed_work(&wb->dwork);
  302. WARN_ON(!list_empty(&wb->work_list));
  303. }
  304. static void wb_exit(struct bdi_writeback *wb)
  305. {
  306. int i;
  307. WARN_ON(delayed_work_pending(&wb->dwork));
  308. for (i = 0; i < NR_WB_STAT_ITEMS; i++)
  309. percpu_counter_destroy(&wb->stat[i]);
  310. fprop_local_destroy_percpu(&wb->completions);
  311. wb_congested_put(wb->congested);
  312. }
  313. #ifdef CONFIG_CGROUP_WRITEBACK
  314. #include <linux/memcontrol.h>
  315. /*
  316. * cgwb_lock protects bdi->cgwb_tree, bdi->cgwb_congested_tree,
  317. * blkcg->cgwb_list, and memcg->cgwb_list. bdi->cgwb_tree is also RCU
  318. * protected. cgwb_release_wait is used to wait for the completion of cgwb
  319. * releases from bdi destruction path.
  320. */
  321. static DEFINE_SPINLOCK(cgwb_lock);
  322. static DECLARE_WAIT_QUEUE_HEAD(cgwb_release_wait);
  323. /**
  324. * wb_congested_get_create - get or create a wb_congested
  325. * @bdi: associated bdi
  326. * @blkcg_id: ID of the associated blkcg
  327. * @gfp: allocation mask
  328. *
  329. * Look up the wb_congested for @blkcg_id on @bdi. If missing, create one.
  330. * The returned wb_congested has its reference count incremented. Returns
  331. * NULL on failure.
  332. */
  333. struct bdi_writeback_congested *
  334. wb_congested_get_create(struct backing_dev_info *bdi, int blkcg_id, gfp_t gfp)
  335. {
  336. struct bdi_writeback_congested *new_congested = NULL, *congested;
  337. struct rb_node **node, *parent;
  338. unsigned long flags;
  339. retry:
  340. spin_lock_irqsave(&cgwb_lock, flags);
  341. node = &bdi->cgwb_congested_tree.rb_node;
  342. parent = NULL;
  343. while (*node != NULL) {
  344. parent = *node;
  345. congested = container_of(parent, struct bdi_writeback_congested,
  346. rb_node);
  347. if (congested->blkcg_id < blkcg_id)
  348. node = &parent->rb_left;
  349. else if (congested->blkcg_id > blkcg_id)
  350. node = &parent->rb_right;
  351. else
  352. goto found;
  353. }
  354. if (new_congested) {
  355. /* !found and storage for new one already allocated, insert */
  356. congested = new_congested;
  357. new_congested = NULL;
  358. rb_link_node(&congested->rb_node, parent, node);
  359. rb_insert_color(&congested->rb_node, &bdi->cgwb_congested_tree);
  360. goto found;
  361. }
  362. spin_unlock_irqrestore(&cgwb_lock, flags);
  363. /* allocate storage for new one and retry */
  364. new_congested = kzalloc(sizeof(*new_congested), gfp);
  365. if (!new_congested)
  366. return NULL;
  367. atomic_set(&new_congested->refcnt, 0);
  368. new_congested->bdi = bdi;
  369. new_congested->blkcg_id = blkcg_id;
  370. goto retry;
  371. found:
  372. atomic_inc(&congested->refcnt);
  373. spin_unlock_irqrestore(&cgwb_lock, flags);
  374. kfree(new_congested);
  375. return congested;
  376. }
  377. /**
  378. * wb_congested_put - put a wb_congested
  379. * @congested: wb_congested to put
  380. *
  381. * Put @congested and destroy it if the refcnt reaches zero.
  382. */
  383. void wb_congested_put(struct bdi_writeback_congested *congested)
  384. {
  385. unsigned long flags;
  386. local_irq_save(flags);
  387. if (!atomic_dec_and_lock(&congested->refcnt, &cgwb_lock)) {
  388. local_irq_restore(flags);
  389. return;
  390. }
  391. /* bdi might already have been destroyed leaving @congested unlinked */
  392. if (congested->bdi) {
  393. rb_erase(&congested->rb_node,
  394. &congested->bdi->cgwb_congested_tree);
  395. congested->bdi = NULL;
  396. }
  397. spin_unlock_irqrestore(&cgwb_lock, flags);
  398. kfree(congested);
  399. }
  400. static void cgwb_release_workfn(struct work_struct *work)
  401. {
  402. struct bdi_writeback *wb = container_of(work, struct bdi_writeback,
  403. release_work);
  404. struct backing_dev_info *bdi = wb->bdi;
  405. wb_shutdown(wb);
  406. css_put(wb->memcg_css);
  407. css_put(wb->blkcg_css);
  408. fprop_local_destroy_percpu(&wb->memcg_completions);
  409. percpu_ref_exit(&wb->refcnt);
  410. wb_exit(wb);
  411. kfree_rcu(wb, rcu);
  412. if (atomic_dec_and_test(&bdi->usage_cnt))
  413. wake_up_all(&cgwb_release_wait);
  414. }
  415. static void cgwb_release(struct percpu_ref *refcnt)
  416. {
  417. struct bdi_writeback *wb = container_of(refcnt, struct bdi_writeback,
  418. refcnt);
  419. schedule_work(&wb->release_work);
  420. }
  421. static void cgwb_kill(struct bdi_writeback *wb)
  422. {
  423. lockdep_assert_held(&cgwb_lock);
  424. WARN_ON(!radix_tree_delete(&wb->bdi->cgwb_tree, wb->memcg_css->id));
  425. list_del(&wb->memcg_node);
  426. list_del(&wb->blkcg_node);
  427. percpu_ref_kill(&wb->refcnt);
  428. }
  429. static int cgwb_create(struct backing_dev_info *bdi,
  430. struct cgroup_subsys_state *memcg_css, gfp_t gfp)
  431. {
  432. struct mem_cgroup *memcg;
  433. struct cgroup_subsys_state *blkcg_css;
  434. struct blkcg *blkcg;
  435. struct list_head *memcg_cgwb_list, *blkcg_cgwb_list;
  436. struct bdi_writeback *wb;
  437. unsigned long flags;
  438. int ret = 0;
  439. memcg = mem_cgroup_from_css(memcg_css);
  440. blkcg_css = cgroup_get_e_css(memcg_css->cgroup, &io_cgrp_subsys);
  441. blkcg = css_to_blkcg(blkcg_css);
  442. memcg_cgwb_list = mem_cgroup_cgwb_list(memcg);
  443. blkcg_cgwb_list = &blkcg->cgwb_list;
  444. /* look up again under lock and discard on blkcg mismatch */
  445. spin_lock_irqsave(&cgwb_lock, flags);
  446. wb = radix_tree_lookup(&bdi->cgwb_tree, memcg_css->id);
  447. if (wb && wb->blkcg_css != blkcg_css) {
  448. cgwb_kill(wb);
  449. wb = NULL;
  450. }
  451. spin_unlock_irqrestore(&cgwb_lock, flags);
  452. if (wb)
  453. goto out_put;
  454. /* need to create a new one */
  455. wb = kmalloc(sizeof(*wb), gfp);
  456. if (!wb)
  457. return -ENOMEM;
  458. ret = wb_init(wb, bdi, blkcg_css->id, gfp);
  459. if (ret)
  460. goto err_free;
  461. ret = percpu_ref_init(&wb->refcnt, cgwb_release, 0, gfp);
  462. if (ret)
  463. goto err_wb_exit;
  464. ret = fprop_local_init_percpu(&wb->memcg_completions, gfp);
  465. if (ret)
  466. goto err_ref_exit;
  467. wb->memcg_css = memcg_css;
  468. wb->blkcg_css = blkcg_css;
  469. INIT_WORK(&wb->release_work, cgwb_release_workfn);
  470. set_bit(WB_registered, &wb->state);
  471. /*
  472. * The root wb determines the registered state of the whole bdi and
  473. * memcg_cgwb_list and blkcg_cgwb_list's next pointers indicate
  474. * whether they're still online. Don't link @wb if any is dead.
  475. * See wb_memcg_offline() and wb_blkcg_offline().
  476. */
  477. ret = -ENODEV;
  478. spin_lock_irqsave(&cgwb_lock, flags);
  479. if (test_bit(WB_registered, &bdi->wb.state) &&
  480. blkcg_cgwb_list->next && memcg_cgwb_list->next) {
  481. /* we might have raced another instance of this function */
  482. ret = radix_tree_insert(&bdi->cgwb_tree, memcg_css->id, wb);
  483. if (!ret) {
  484. atomic_inc(&bdi->usage_cnt);
  485. list_add(&wb->memcg_node, memcg_cgwb_list);
  486. list_add(&wb->blkcg_node, blkcg_cgwb_list);
  487. css_get(memcg_css);
  488. css_get(blkcg_css);
  489. }
  490. }
  491. spin_unlock_irqrestore(&cgwb_lock, flags);
  492. if (ret) {
  493. if (ret == -EEXIST)
  494. ret = 0;
  495. goto err_fprop_exit;
  496. }
  497. goto out_put;
  498. err_fprop_exit:
  499. fprop_local_destroy_percpu(&wb->memcg_completions);
  500. err_ref_exit:
  501. percpu_ref_exit(&wb->refcnt);
  502. err_wb_exit:
  503. wb_exit(wb);
  504. err_free:
  505. kfree(wb);
  506. out_put:
  507. css_put(blkcg_css);
  508. return ret;
  509. }
  510. /**
  511. * wb_get_create - get wb for a given memcg, create if necessary
  512. * @bdi: target bdi
  513. * @memcg_css: cgroup_subsys_state of the target memcg (must have positive ref)
  514. * @gfp: allocation mask to use
  515. *
  516. * Try to get the wb for @memcg_css on @bdi. If it doesn't exist, try to
  517. * create one. The returned wb has its refcount incremented.
  518. *
  519. * This function uses css_get() on @memcg_css and thus expects its refcnt
  520. * to be positive on invocation. IOW, rcu_read_lock() protection on
  521. * @memcg_css isn't enough. try_get it before calling this function.
  522. *
  523. * A wb is keyed by its associated memcg. As blkcg implicitly enables
  524. * memcg on the default hierarchy, memcg association is guaranteed to be
  525. * more specific (equal or descendant to the associated blkcg) and thus can
  526. * identify both the memcg and blkcg associations.
  527. *
  528. * Because the blkcg associated with a memcg may change as blkcg is enabled
  529. * and disabled closer to root in the hierarchy, each wb keeps track of
  530. * both the memcg and blkcg associated with it and verifies the blkcg on
  531. * each lookup. On mismatch, the existing wb is discarded and a new one is
  532. * created.
  533. */
  534. struct bdi_writeback *wb_get_create(struct backing_dev_info *bdi,
  535. struct cgroup_subsys_state *memcg_css,
  536. gfp_t gfp)
  537. {
  538. struct bdi_writeback *wb;
  539. might_sleep_if(gfp & __GFP_WAIT);
  540. if (!memcg_css->parent)
  541. return &bdi->wb;
  542. do {
  543. rcu_read_lock();
  544. wb = radix_tree_lookup(&bdi->cgwb_tree, memcg_css->id);
  545. if (wb) {
  546. struct cgroup_subsys_state *blkcg_css;
  547. /* see whether the blkcg association has changed */
  548. blkcg_css = cgroup_get_e_css(memcg_css->cgroup,
  549. &io_cgrp_subsys);
  550. if (unlikely(wb->blkcg_css != blkcg_css ||
  551. !wb_tryget(wb)))
  552. wb = NULL;
  553. css_put(blkcg_css);
  554. }
  555. rcu_read_unlock();
  556. } while (!wb && !cgwb_create(bdi, memcg_css, gfp));
  557. return wb;
  558. }
  559. static int cgwb_bdi_init(struct backing_dev_info *bdi)
  560. {
  561. int ret;
  562. INIT_RADIX_TREE(&bdi->cgwb_tree, GFP_ATOMIC);
  563. bdi->cgwb_congested_tree = RB_ROOT;
  564. atomic_set(&bdi->usage_cnt, 1);
  565. ret = wb_init(&bdi->wb, bdi, 1, GFP_KERNEL);
  566. if (!ret) {
  567. bdi->wb.memcg_css = mem_cgroup_root_css;
  568. bdi->wb.blkcg_css = blkcg_root_css;
  569. }
  570. return ret;
  571. }
  572. static void cgwb_bdi_destroy(struct backing_dev_info *bdi)
  573. {
  574. struct radix_tree_iter iter;
  575. struct bdi_writeback_congested *congested, *congested_n;
  576. void **slot;
  577. WARN_ON(test_bit(WB_registered, &bdi->wb.state));
  578. spin_lock_irq(&cgwb_lock);
  579. radix_tree_for_each_slot(slot, &bdi->cgwb_tree, &iter, 0)
  580. cgwb_kill(*slot);
  581. rbtree_postorder_for_each_entry_safe(congested, congested_n,
  582. &bdi->cgwb_congested_tree, rb_node) {
  583. rb_erase(&congested->rb_node, &bdi->cgwb_congested_tree);
  584. congested->bdi = NULL; /* mark @congested unlinked */
  585. }
  586. spin_unlock_irq(&cgwb_lock);
  587. /*
  588. * All cgwb's and their congested states must be shutdown and
  589. * released before returning. Drain the usage counter to wait for
  590. * all cgwb's and cgwb_congested's ever created on @bdi.
  591. */
  592. atomic_dec(&bdi->usage_cnt);
  593. wait_event(cgwb_release_wait, !atomic_read(&bdi->usage_cnt));
  594. }
  595. /**
  596. * wb_memcg_offline - kill all wb's associated with a memcg being offlined
  597. * @memcg: memcg being offlined
  598. *
  599. * Also prevents creation of any new wb's associated with @memcg.
  600. */
  601. void wb_memcg_offline(struct mem_cgroup *memcg)
  602. {
  603. LIST_HEAD(to_destroy);
  604. struct list_head *memcg_cgwb_list = mem_cgroup_cgwb_list(memcg);
  605. struct bdi_writeback *wb, *next;
  606. spin_lock_irq(&cgwb_lock);
  607. list_for_each_entry_safe(wb, next, memcg_cgwb_list, memcg_node)
  608. cgwb_kill(wb);
  609. memcg_cgwb_list->next = NULL; /* prevent new wb's */
  610. spin_unlock_irq(&cgwb_lock);
  611. }
  612. /**
  613. * wb_blkcg_offline - kill all wb's associated with a blkcg being offlined
  614. * @blkcg: blkcg being offlined
  615. *
  616. * Also prevents creation of any new wb's associated with @blkcg.
  617. */
  618. void wb_blkcg_offline(struct blkcg *blkcg)
  619. {
  620. LIST_HEAD(to_destroy);
  621. struct bdi_writeback *wb, *next;
  622. spin_lock_irq(&cgwb_lock);
  623. list_for_each_entry_safe(wb, next, &blkcg->cgwb_list, blkcg_node)
  624. cgwb_kill(wb);
  625. blkcg->cgwb_list.next = NULL; /* prevent new wb's */
  626. spin_unlock_irq(&cgwb_lock);
  627. }
  628. #else /* CONFIG_CGROUP_WRITEBACK */
  629. static int cgwb_bdi_init(struct backing_dev_info *bdi)
  630. {
  631. int err;
  632. bdi->wb_congested = kzalloc(sizeof(*bdi->wb_congested), GFP_KERNEL);
  633. if (!bdi->wb_congested)
  634. return -ENOMEM;
  635. err = wb_init(&bdi->wb, bdi, 1, GFP_KERNEL);
  636. if (err) {
  637. kfree(bdi->wb_congested);
  638. return err;
  639. }
  640. return 0;
  641. }
  642. static void cgwb_bdi_destroy(struct backing_dev_info *bdi) { }
  643. #endif /* CONFIG_CGROUP_WRITEBACK */
  644. int bdi_init(struct backing_dev_info *bdi)
  645. {
  646. bdi->dev = NULL;
  647. bdi->min_ratio = 0;
  648. bdi->max_ratio = 100;
  649. bdi->max_prop_frac = FPROP_FRAC_BASE;
  650. INIT_LIST_HEAD(&bdi->bdi_list);
  651. init_waitqueue_head(&bdi->wb_waitq);
  652. return cgwb_bdi_init(bdi);
  653. }
  654. EXPORT_SYMBOL(bdi_init);
  655. int bdi_register(struct backing_dev_info *bdi, struct device *parent,
  656. const char *fmt, ...)
  657. {
  658. va_list args;
  659. struct device *dev;
  660. if (bdi->dev) /* The driver needs to use separate queues per device */
  661. return 0;
  662. va_start(args, fmt);
  663. dev = device_create_vargs(bdi_class, parent, MKDEV(0, 0), bdi, fmt, args);
  664. va_end(args);
  665. if (IS_ERR(dev))
  666. return PTR_ERR(dev);
  667. bdi->dev = dev;
  668. bdi_debug_register(bdi, dev_name(dev));
  669. set_bit(WB_registered, &bdi->wb.state);
  670. spin_lock_bh(&bdi_lock);
  671. list_add_tail_rcu(&bdi->bdi_list, &bdi_list);
  672. spin_unlock_bh(&bdi_lock);
  673. trace_writeback_bdi_register(bdi);
  674. return 0;
  675. }
  676. EXPORT_SYMBOL(bdi_register);
  677. int bdi_register_dev(struct backing_dev_info *bdi, dev_t dev)
  678. {
  679. return bdi_register(bdi, NULL, "%u:%u", MAJOR(dev), MINOR(dev));
  680. }
  681. EXPORT_SYMBOL(bdi_register_dev);
  682. /*
  683. * Remove bdi from bdi_list, and ensure that it is no longer visible
  684. */
  685. static void bdi_remove_from_list(struct backing_dev_info *bdi)
  686. {
  687. spin_lock_bh(&bdi_lock);
  688. list_del_rcu(&bdi->bdi_list);
  689. spin_unlock_bh(&bdi_lock);
  690. synchronize_rcu_expedited();
  691. }
  692. void bdi_destroy(struct backing_dev_info *bdi)
  693. {
  694. /* make sure nobody finds us on the bdi_list anymore */
  695. bdi_remove_from_list(bdi);
  696. wb_shutdown(&bdi->wb);
  697. cgwb_bdi_destroy(bdi);
  698. if (bdi->dev) {
  699. bdi_debug_unregister(bdi);
  700. device_unregister(bdi->dev);
  701. bdi->dev = NULL;
  702. }
  703. wb_exit(&bdi->wb);
  704. }
  705. EXPORT_SYMBOL(bdi_destroy);
  706. /*
  707. * For use from filesystems to quickly init and register a bdi associated
  708. * with dirty writeback
  709. */
  710. int bdi_setup_and_register(struct backing_dev_info *bdi, char *name)
  711. {
  712. int err;
  713. bdi->name = name;
  714. bdi->capabilities = 0;
  715. err = bdi_init(bdi);
  716. if (err)
  717. return err;
  718. err = bdi_register(bdi, NULL, "%.28s-%ld", name,
  719. atomic_long_inc_return(&bdi_seq));
  720. if (err) {
  721. bdi_destroy(bdi);
  722. return err;
  723. }
  724. return 0;
  725. }
  726. EXPORT_SYMBOL(bdi_setup_and_register);
  727. static wait_queue_head_t congestion_wqh[2] = {
  728. __WAIT_QUEUE_HEAD_INITIALIZER(congestion_wqh[0]),
  729. __WAIT_QUEUE_HEAD_INITIALIZER(congestion_wqh[1])
  730. };
  731. static atomic_t nr_wb_congested[2];
  732. void clear_wb_congested(struct bdi_writeback_congested *congested, int sync)
  733. {
  734. wait_queue_head_t *wqh = &congestion_wqh[sync];
  735. enum wb_state bit;
  736. bit = sync ? WB_sync_congested : WB_async_congested;
  737. if (test_and_clear_bit(bit, &congested->state))
  738. atomic_dec(&nr_wb_congested[sync]);
  739. smp_mb__after_atomic();
  740. if (waitqueue_active(wqh))
  741. wake_up(wqh);
  742. }
  743. EXPORT_SYMBOL(clear_wb_congested);
  744. void set_wb_congested(struct bdi_writeback_congested *congested, int sync)
  745. {
  746. enum wb_state bit;
  747. bit = sync ? WB_sync_congested : WB_async_congested;
  748. if (!test_and_set_bit(bit, &congested->state))
  749. atomic_inc(&nr_wb_congested[sync]);
  750. }
  751. EXPORT_SYMBOL(set_wb_congested);
  752. /**
  753. * congestion_wait - wait for a backing_dev to become uncongested
  754. * @sync: SYNC or ASYNC IO
  755. * @timeout: timeout in jiffies
  756. *
  757. * Waits for up to @timeout jiffies for a backing_dev (any backing_dev) to exit
  758. * write congestion. If no backing_devs are congested then just wait for the
  759. * next write to be completed.
  760. */
  761. long congestion_wait(int sync, long timeout)
  762. {
  763. long ret;
  764. unsigned long start = jiffies;
  765. DEFINE_WAIT(wait);
  766. wait_queue_head_t *wqh = &congestion_wqh[sync];
  767. prepare_to_wait(wqh, &wait, TASK_UNINTERRUPTIBLE);
  768. ret = io_schedule_timeout(timeout);
  769. finish_wait(wqh, &wait);
  770. trace_writeback_congestion_wait(jiffies_to_usecs(timeout),
  771. jiffies_to_usecs(jiffies - start));
  772. return ret;
  773. }
  774. EXPORT_SYMBOL(congestion_wait);
  775. /**
  776. * wait_iff_congested - Conditionally wait for a backing_dev to become uncongested or a zone to complete writes
  777. * @zone: A zone to check if it is heavily congested
  778. * @sync: SYNC or ASYNC IO
  779. * @timeout: timeout in jiffies
  780. *
  781. * In the event of a congested backing_dev (any backing_dev) and the given
  782. * @zone has experienced recent congestion, this waits for up to @timeout
  783. * jiffies for either a BDI to exit congestion of the given @sync queue
  784. * or a write to complete.
  785. *
  786. * In the absence of zone congestion, cond_resched() is called to yield
  787. * the processor if necessary but otherwise does not sleep.
  788. *
  789. * The return value is 0 if the sleep is for the full timeout. Otherwise,
  790. * it is the number of jiffies that were still remaining when the function
  791. * returned. return_value == timeout implies the function did not sleep.
  792. */
  793. long wait_iff_congested(struct zone *zone, int sync, long timeout)
  794. {
  795. long ret;
  796. unsigned long start = jiffies;
  797. DEFINE_WAIT(wait);
  798. wait_queue_head_t *wqh = &congestion_wqh[sync];
  799. /*
  800. * If there is no congestion, or heavy congestion is not being
  801. * encountered in the current zone, yield if necessary instead
  802. * of sleeping on the congestion queue
  803. */
  804. if (atomic_read(&nr_wb_congested[sync]) == 0 ||
  805. !test_bit(ZONE_CONGESTED, &zone->flags)) {
  806. cond_resched();
  807. /* In case we scheduled, work out time remaining */
  808. ret = timeout - (jiffies - start);
  809. if (ret < 0)
  810. ret = 0;
  811. goto out;
  812. }
  813. /* Sleep until uncongested or a write happens */
  814. prepare_to_wait(wqh, &wait, TASK_UNINTERRUPTIBLE);
  815. ret = io_schedule_timeout(timeout);
  816. finish_wait(wqh, &wait);
  817. out:
  818. trace_writeback_wait_iff_congested(jiffies_to_usecs(timeout),
  819. jiffies_to_usecs(jiffies - start));
  820. return ret;
  821. }
  822. EXPORT_SYMBOL(wait_iff_congested);
  823. int pdflush_proc_obsolete(struct ctl_table *table, int write,
  824. void __user *buffer, size_t *lenp, loff_t *ppos)
  825. {
  826. char kbuf[] = "0\n";
  827. if (*ppos || *lenp < sizeof(kbuf)) {
  828. *lenp = 0;
  829. return 0;
  830. }
  831. if (copy_to_user(buffer, kbuf, sizeof(kbuf)))
  832. return -EFAULT;
  833. printk_once(KERN_WARNING "%s exported in /proc is scheduled for removal\n",
  834. table->procname);
  835. *lenp = 2;
  836. *ppos += *lenp;
  837. return 2;
  838. }