fs: Convert buffer to XArray

Mostly comment fixes, but one use of __xa_set_mark.

Signed-off-by: Matthew Wilcox <willy@infradead.org>
This commit is contained in:
Matthew Wilcox 2017-12-04 10:40:41 -05:00
parent 0a943c65e7
commit ec82e1c1c8
1 changed files with 7 additions and 7 deletions

View File

@ -562,7 +562,7 @@ void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode)
EXPORT_SYMBOL(mark_buffer_dirty_inode); EXPORT_SYMBOL(mark_buffer_dirty_inode);
/* /*
* Mark the page dirty, and set it dirty in the radix tree, and mark the inode * Mark the page dirty, and set it dirty in the page cache, and mark the inode
* dirty. * dirty.
* *
* If warn is true, then emit a warning if the page is not uptodate and has * If warn is true, then emit a warning if the page is not uptodate and has
@ -579,8 +579,8 @@ void __set_page_dirty(struct page *page, struct address_space *mapping,
if (page->mapping) { /* Race with truncate? */ if (page->mapping) { /* Race with truncate? */
WARN_ON_ONCE(warn && !PageUptodate(page)); WARN_ON_ONCE(warn && !PageUptodate(page));
account_page_dirtied(page, mapping); account_page_dirtied(page, mapping);
radix_tree_tag_set(&mapping->i_pages, __xa_set_mark(&mapping->i_pages, page_index(page),
page_index(page), PAGECACHE_TAG_DIRTY); PAGECACHE_TAG_DIRTY);
} }
xa_unlock_irqrestore(&mapping->i_pages, flags); xa_unlock_irqrestore(&mapping->i_pages, flags);
} }
@ -1050,7 +1050,7 @@ __getblk_slow(struct block_device *bdev, sector_t block,
* The relationship between dirty buffers and dirty pages: * The relationship between dirty buffers and dirty pages:
* *
* Whenever a page has any dirty buffers, the page's dirty bit is set, and * Whenever a page has any dirty buffers, the page's dirty bit is set, and
* the page is tagged dirty in its radix tree. * the page is tagged dirty in the page cache.
* *
* At all times, the dirtiness of the buffers represents the dirtiness of * At all times, the dirtiness of the buffers represents the dirtiness of
* subsections of the page. If the page has buffers, the page dirty bit is * subsections of the page. If the page has buffers, the page dirty bit is
@ -1073,9 +1073,9 @@ __getblk_slow(struct block_device *bdev, sector_t block,
* mark_buffer_dirty - mark a buffer_head as needing writeout * mark_buffer_dirty - mark a buffer_head as needing writeout
* @bh: the buffer_head to mark dirty * @bh: the buffer_head to mark dirty
* *
* mark_buffer_dirty() will set the dirty bit against the buffer, then set its * mark_buffer_dirty() will set the dirty bit against the buffer, then set
* backing page dirty, then tag the page as dirty in its address_space's radix * its backing page dirty, then tag the page as dirty in the page cache
* tree and then attach the address_space's inode to its superblock's dirty * and then attach the address_space's inode to its superblock's dirty
* inode list. * inode list.
* *
* mark_buffer_dirty() is atomic. It takes bh->b_page->mapping->private_lock, * mark_buffer_dirty() is atomic. It takes bh->b_page->mapping->private_lock,