nfs: page group support in nfs_mark_uptodate

Change how nfs_mark_uptodate checks to see if writes cover a whole page.

This patch should have no effect yet since all page groups currently
have one request, but will come into play when pg_test functions are
modified to split pages into sub-page regions.

Signed-off-by: Weston Andros Adamson <dros@primarydata.com>
Signed-off-by: Trond Myklebust <trond.myklebust@primarydata.com>
This commit is contained in:
Weston Andros Adamson 2014-05-15 11:56:48 -04:00 committed by Trond Myklebust
parent 20633f042f
commit d72ddcbab6
1 changed files with 67 additions and 7 deletions

View File

@ -154,18 +154,78 @@ static void nfs_set_pageerror(struct page *page)
nfs_zap_mapping(page_file_mapping(page)->host, page_file_mapping(page)); nfs_zap_mapping(page_file_mapping(page)->host, page_file_mapping(page));
} }
/*
* nfs_page_group_search_locked
* @head - head request of page group
* @page_offset - offset into page
*
* Search page group with head @head to find a request that contains the
* page offset @page_offset.
*
* Returns a pointer to the first matching nfs request, or NULL if no
* match is found.
*
* Must be called with the page group lock held
*/
static struct nfs_page *
nfs_page_group_search_locked(struct nfs_page *head, unsigned int page_offset)
{
struct nfs_page *req;
WARN_ON_ONCE(head != head->wb_head);
WARN_ON_ONCE(!test_bit(PG_HEADLOCK, &head->wb_head->wb_flags));
req = head;
do {
if (page_offset >= req->wb_pgbase &&
page_offset < (req->wb_pgbase + req->wb_bytes))
return req;
req = req->wb_this_page;
} while (req != head);
return NULL;
}
/*
* nfs_page_group_covers_page
* @head - head request of page group
*
* Return true if the page group with head @head covers the whole page,
* returns false otherwise
*/
static bool nfs_page_group_covers_page(struct nfs_page *req)
{
struct nfs_page *tmp;
unsigned int pos = 0;
unsigned int len = nfs_page_length(req->wb_page);
nfs_page_group_lock(req);
do {
tmp = nfs_page_group_search_locked(req->wb_head, pos);
if (tmp) {
/* no way this should happen */
WARN_ON_ONCE(tmp->wb_pgbase != pos);
pos += tmp->wb_bytes - (pos - tmp->wb_pgbase);
}
} while (tmp && pos < len);
nfs_page_group_unlock(req);
WARN_ON_ONCE(pos > len);
return pos == len;
}
/* We can set the PG_uptodate flag if we see that a write request /* We can set the PG_uptodate flag if we see that a write request
* covers the full page. * covers the full page.
*/ */
static void nfs_mark_uptodate(struct page *page, unsigned int base, unsigned int count) static void nfs_mark_uptodate(struct nfs_page *req)
{ {
if (PageUptodate(page)) if (PageUptodate(req->wb_page))
return; return;
if (base != 0) if (!nfs_page_group_covers_page(req))
return; return;
if (count != nfs_page_length(page)) SetPageUptodate(req->wb_page);
return;
SetPageUptodate(page);
} }
static int wb_priority(struct writeback_control *wbc) static int wb_priority(struct writeback_control *wbc)
@ -796,7 +856,7 @@ static int nfs_writepage_setup(struct nfs_open_context *ctx, struct page *page,
return PTR_ERR(req); return PTR_ERR(req);
/* Update file length */ /* Update file length */
nfs_grow_file(page, offset, count); nfs_grow_file(page, offset, count);
nfs_mark_uptodate(page, req->wb_pgbase, req->wb_bytes); nfs_mark_uptodate(req);
nfs_mark_request_dirty(req); nfs_mark_request_dirty(req);
nfs_unlock_and_release_request(req); nfs_unlock_and_release_request(req);
return 0; return 0;