Merge branch 'for-4.2/writeback' of git://git.kernel.dk/linux-block
Pull cgroup writeback support from Jens Axboe: "This is the big pull request for adding cgroup writeback support. This code has been in development for a long time, and it has been simmering in for-next for a good chunk of this cycle too. This is one of those problems that has been talked about for at least half a decade, finally there's a solution and code to go with it. Also see last weeks writeup on LWN: http://lwn.net/Articles/648292/" * 'for-4.2/writeback' of git://git.kernel.dk/linux-block: (85 commits) writeback, blkio: add documentation for cgroup writeback support vfs, writeback: replace FS_CGROUP_WRITEBACK with SB_I_CGROUPWB writeback: do foreign inode detection iff cgroup writeback is enabled v9fs: fix error handling in v9fs_session_init() bdi: fix wrong error return value in cgwb_create() buffer: remove unusued 'ret' variable writeback: disassociate inodes from dying bdi_writebacks writeback: implement foreign cgroup inode bdi_writeback switching writeback: add lockdep annotation to inode_to_wb() writeback: use unlocked_inode_to_wb transaction in inode_congested() writeback: implement unlocked_inode_to_wb transaction and use it for stat updates writeback: implement [locked_]inode_to_wb_and_lock_list() writeback: implement foreign cgroup inode detection writeback: make writeback_control track the inode being written back writeback: relocate wb[_try]_get(), wb_put(), inode_{attach|detach}_wb() mm: vmscan: disable memcg direct reclaim stalling if cgroup writeback support is in use writeback: implement memcg writeback domain based throttling writeback: reset wb_domain->dirty_limit[_tstmp] when memcg domain size changes writeback: implement memcg wb_domain writeback: update wb_over_bg_thresh() to use wb_domain aware operations ...
This commit is contained in:
@@ -1873,6 +1873,7 @@ xfs_vm_set_page_dirty(
|
||||
loff_t end_offset;
|
||||
loff_t offset;
|
||||
int newly_dirty;
|
||||
struct mem_cgroup *memcg;
|
||||
|
||||
if (unlikely(!mapping))
|
||||
return !TestSetPageDirty(page);
|
||||
@@ -1892,6 +1893,11 @@ xfs_vm_set_page_dirty(
|
||||
offset += 1 << inode->i_blkbits;
|
||||
} while (bh != head);
|
||||
}
|
||||
/*
|
||||
* Use mem_group_begin_page_stat() to keep PageDirty synchronized with
|
||||
* per-memcg dirty page counters.
|
||||
*/
|
||||
memcg = mem_cgroup_begin_page_stat(page);
|
||||
newly_dirty = !TestSetPageDirty(page);
|
||||
spin_unlock(&mapping->private_lock);
|
||||
|
||||
@@ -1902,13 +1908,15 @@ xfs_vm_set_page_dirty(
|
||||
spin_lock_irqsave(&mapping->tree_lock, flags);
|
||||
if (page->mapping) { /* Race with truncate? */
|
||||
WARN_ON_ONCE(!PageUptodate(page));
|
||||
account_page_dirtied(page, mapping);
|
||||
account_page_dirtied(page, mapping, memcg);
|
||||
radix_tree_tag_set(&mapping->page_tree,
|
||||
page_index(page), PAGECACHE_TAG_DIRTY);
|
||||
}
|
||||
spin_unlock_irqrestore(&mapping->tree_lock, flags);
|
||||
__mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
|
||||
}
|
||||
mem_cgroup_end_page_stat(memcg);
|
||||
if (newly_dirty)
|
||||
__mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
|
||||
return newly_dirty;
|
||||
}
|
||||
|
||||
|
@@ -41,6 +41,7 @@
|
||||
#include <linux/dcache.h>
|
||||
#include <linux/falloc.h>
|
||||
#include <linux/pagevec.h>
|
||||
#include <linux/backing-dev.h>
|
||||
|
||||
static const struct vm_operations_struct xfs_file_vm_ops;
|
||||
|
||||
|
Reference in New Issue
Block a user