|
@@ -1541,16 +1541,6 @@ pause:
|
|
|
bdi_start_background_writeback(bdi);
|
|
|
}
|
|
|
|
|
|
-void set_page_dirty_balance(struct page *page)
|
|
|
-{
|
|
|
- if (set_page_dirty(page)) {
|
|
|
- struct address_space *mapping = page_mapping(page);
|
|
|
-
|
|
|
- if (mapping)
|
|
|
- balance_dirty_pages_ratelimited(mapping);
|
|
|
- }
|
|
|
-}
|
|
|
-
|
|
|
static DEFINE_PER_CPU(int, bdp_ratelimits);
|
|
|
|
|
|
/*
|
|
@@ -2123,32 +2113,25 @@ EXPORT_SYMBOL(account_page_dirtied);
|
|
|
* page dirty in that case, but not all the buffers. This is a "bottom-up"
|
|
|
* dirtying, whereas __set_page_dirty_buffers() is a "top-down" dirtying.
|
|
|
*
|
|
|
- * Most callers have locked the page, which pins the address_space in memory.
|
|
|
- * But zap_pte_range() does not lock the page, however in that case the
|
|
|
- * mapping is pinned by the vma's ->vm_file reference.
|
|
|
- *
|
|
|
- * We take care to handle the case where the page was truncated from the
|
|
|
- * mapping by re-checking page_mapping() inside tree_lock.
|
|
|
+ * The caller must ensure this doesn't race with truncation. Most will simply
|
|
|
+ * hold the page lock, but e.g. zap_pte_range() calls with the page mapped and
|
|
|
+ * the pte lock held, which also locks out truncation.
|
|
|
*/
|
|
|
int __set_page_dirty_nobuffers(struct page *page)
|
|
|
{
|
|
|
if (!TestSetPageDirty(page)) {
|
|
|
struct address_space *mapping = page_mapping(page);
|
|
|
- struct address_space *mapping2;
|
|
|
unsigned long flags;
|
|
|
|
|
|
if (!mapping)
|
|
|
return 1;
|
|
|
|
|
|
spin_lock_irqsave(&mapping->tree_lock, flags);
|
|
|
- mapping2 = page_mapping(page);
|
|
|
- if (mapping2) { /* Race with truncate? */
|
|
|
- BUG_ON(mapping2 != mapping);
|
|
|
- WARN_ON_ONCE(!PagePrivate(page) && !PageUptodate(page));
|
|
|
- account_page_dirtied(page, mapping);
|
|
|
- radix_tree_tag_set(&mapping->page_tree,
|
|
|
- page_index(page), PAGECACHE_TAG_DIRTY);
|
|
|
- }
|
|
|
+ BUG_ON(page_mapping(page) != mapping);
|
|
|
+ WARN_ON_ONCE(!PagePrivate(page) && !PageUptodate(page));
|
|
|
+ account_page_dirtied(page, mapping);
|
|
|
+ radix_tree_tag_set(&mapping->page_tree, page_index(page),
|
|
|
+ PAGECACHE_TAG_DIRTY);
|
|
|
spin_unlock_irqrestore(&mapping->tree_lock, flags);
|
|
|
if (mapping->host) {
|
|
|
/* !PageAnon && !swapper_space */
|
|
@@ -2305,12 +2288,10 @@ int clear_page_dirty_for_io(struct page *page)
|
|
|
/*
|
|
|
* We carefully synchronise fault handlers against
|
|
|
* installing a dirty pte and marking the page dirty
|
|
|
- * at this point. We do this by having them hold the
|
|
|
- * page lock at some point after installing their
|
|
|
- * pte, but before marking the page dirty.
|
|
|
- * Pages are always locked coming in here, so we get
|
|
|
- * the desired exclusion. See mm/memory.c:do_wp_page()
|
|
|
- * for more comments.
|
|
|
+ * at this point. We do this by having them hold the
|
|
|
+ * page lock while dirtying the page, and pages are
|
|
|
+ * always locked coming in here, so we get the desired
|
|
|
+ * exclusion.
|
|
|
*/
|
|
|
if (TestClearPageDirty(page)) {
|
|
|
dec_zone_page_state(page, NR_FILE_DIRTY);
|