mirror of https://gitee.com/openkylin/linux.git
[XFS] Use the right offset when ensuring a delayed allocate conversion has covered the offset originally requested. Can cause data corruption when multiple processes are performing writeout on different areas of the same file. Quite difficult to hit though.
SGI Modid: xfs-linux:xfs-kern:22377a Signed-off-by: Nathan Scott <nathans@sgi.com> Signed-off-by: Christoph Hellwig <hch@sgi.com> .
This commit is contained in:
parent
775bf6c99a
commit
24e17b5fb9
|
@ -558,7 +558,8 @@ xfs_submit_page(
|
|||
int i;
|
||||
|
||||
BUG_ON(PageWriteback(page));
|
||||
set_page_writeback(page);
|
||||
if (bh_count)
|
||||
set_page_writeback(page);
|
||||
if (clear_dirty)
|
||||
clear_page_dirty(page);
|
||||
unlock_page(page);
|
||||
|
@ -578,9 +579,6 @@ xfs_submit_page(
|
|||
|
||||
if (probed_page && clear_dirty)
|
||||
wbc->nr_to_write--; /* Wrote an "extra" page */
|
||||
} else {
|
||||
end_page_writeback(page);
|
||||
wbc->pages_skipped++; /* We didn't write this page */
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -602,21 +600,26 @@ xfs_convert_page(
|
|||
{
|
||||
struct buffer_head *bh_arr[MAX_BUF_PER_PAGE], *bh, *head;
|
||||
xfs_iomap_t *mp = iomapp, *tmp;
|
||||
unsigned long end, offset;
|
||||
pgoff_t end_index;
|
||||
int i = 0, index = 0;
|
||||
unsigned long offset, end_offset;
|
||||
int index = 0;
|
||||
int bbits = inode->i_blkbits;
|
||||
int len, page_dirty;
|
||||
|
||||
end_index = i_size_read(inode) >> PAGE_CACHE_SHIFT;
|
||||
if (page->index < end_index) {
|
||||
end = PAGE_CACHE_SIZE;
|
||||
} else {
|
||||
end = i_size_read(inode) & (PAGE_CACHE_SIZE-1);
|
||||
}
|
||||
end_offset = (i_size_read(inode) & (PAGE_CACHE_SIZE - 1));
|
||||
|
||||
/*
|
||||
* page_dirty is initially a count of buffers on the page before
|
||||
* EOF and is decrememted as we move each into a cleanable state.
|
||||
*/
|
||||
len = 1 << inode->i_blkbits;
|
||||
end_offset = max(end_offset, PAGE_CACHE_SIZE);
|
||||
end_offset = roundup(end_offset, len);
|
||||
page_dirty = end_offset / len;
|
||||
|
||||
offset = 0;
|
||||
bh = head = page_buffers(page);
|
||||
do {
|
||||
offset = i << bbits;
|
||||
if (offset >= end)
|
||||
if (offset >= end_offset)
|
||||
break;
|
||||
if (!(PageUptodate(page) || buffer_uptodate(bh)))
|
||||
continue;
|
||||
|
@ -625,6 +628,7 @@ xfs_convert_page(
|
|||
if (startio) {
|
||||
lock_buffer(bh);
|
||||
bh_arr[index++] = bh;
|
||||
page_dirty--;
|
||||
}
|
||||
continue;
|
||||
}
|
||||
|
@ -657,10 +661,11 @@ xfs_convert_page(
|
|||
unlock_buffer(bh);
|
||||
mark_buffer_dirty(bh);
|
||||
}
|
||||
} while (i++, (bh = bh->b_this_page) != head);
|
||||
page_dirty--;
|
||||
} while (offset += len, (bh = bh->b_this_page) != head);
|
||||
|
||||
if (startio) {
|
||||
xfs_submit_page(page, wbc, bh_arr, index, 1, index == i);
|
||||
if (startio && index) {
|
||||
xfs_submit_page(page, wbc, bh_arr, index, 1, !page_dirty);
|
||||
} else {
|
||||
unlock_page(page);
|
||||
}
|
||||
|
@ -743,19 +748,22 @@ xfs_page_state_convert(
|
|||
}
|
||||
}
|
||||
|
||||
offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
|
||||
end_offset = min_t(unsigned long long,
|
||||
offset + PAGE_CACHE_SIZE, i_size_read(inode));
|
||||
|
||||
bh = head = page_buffers(page);
|
||||
iomp = NULL;
|
||||
(loff_t)(page->index + 1) << PAGE_CACHE_SHIFT, offset);
|
||||
offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
|
||||
|
||||
/*
|
||||
* page_dirty is initially a count of buffers on the page and
|
||||
* is decrememted as we move each into a cleanable state.
|
||||
* page_dirty is initially a count of buffers on the page before
|
||||
* EOF and is decrememted as we move each into a cleanable state.
|
||||
*/
|
||||
len = bh->b_size;
|
||||
page_dirty = PAGE_CACHE_SIZE / len;
|
||||
len = 1 << inode->i_blkbits;
|
||||
p_offset = max(p_offset, PAGE_CACHE_SIZE);
|
||||
p_offset = roundup(p_offset, len);
|
||||
page_dirty = p_offset / len;
|
||||
|
||||
iomp = NULL;
|
||||
p_offset = 0;
|
||||
bh = head = page_buffers(page);
|
||||
|
||||
do {
|
||||
if (offset >= end_offset)
|
||||
|
@ -877,8 +885,10 @@ xfs_page_state_convert(
|
|||
if (uptodate && bh == head)
|
||||
SetPageUptodate(page);
|
||||
|
||||
if (startio)
|
||||
xfs_submit_page(page, wbc, bh_arr, cnt, 0, 1);
|
||||
if (startio) {
|
||||
WARN_ON(page_dirty);
|
||||
xfs_submit_page(page, wbc, bh_arr, cnt, 0, !page_dirty);
|
||||
}
|
||||
|
||||
if (iomp) {
|
||||
offset = (iomp->iomap_offset + iomp->iomap_bsize - 1) >>
|
||||
|
|
|
@ -308,7 +308,8 @@ xfs_iomap(
|
|||
break;
|
||||
}
|
||||
|
||||
error = XFS_IOMAP_WRITE_ALLOCATE(mp, io, &imap, &nimaps);
|
||||
error = XFS_IOMAP_WRITE_ALLOCATE(mp, io, offset, count,
|
||||
&imap, &nimaps);
|
||||
break;
|
||||
case BMAPI_UNWRITTEN:
|
||||
lockmode = 0;
|
||||
|
@ -746,6 +747,8 @@ xfs_iomap_write_delay(
|
|||
int
|
||||
xfs_iomap_write_allocate(
|
||||
xfs_inode_t *ip,
|
||||
loff_t offset,
|
||||
size_t count,
|
||||
xfs_bmbt_irec_t *map,
|
||||
int *retmap)
|
||||
{
|
||||
|
@ -770,9 +773,9 @@ xfs_iomap_write_allocate(
|
|||
if ((error = XFS_QM_DQATTACH(mp, ip, 0)))
|
||||
return XFS_ERROR(error);
|
||||
|
||||
offset_fsb = map->br_startoff;
|
||||
offset_fsb = XFS_B_TO_FSBT(mp, offset);
|
||||
count_fsb = map->br_blockcount;
|
||||
map_start_fsb = offset_fsb;
|
||||
map_start_fsb = map->br_startoff;
|
||||
|
||||
XFS_STATS_ADD(xs_xstrat_bytes, XFS_FSB_TO_B(mp, count_fsb));
|
||||
|
||||
|
@ -868,9 +871,9 @@ xfs_iomap_write_allocate(
|
|||
imap[i].br_startoff,
|
||||
imap[i].br_blockcount,imap[i].br_state);
|
||||
}
|
||||
if ((map->br_startoff >= imap[i].br_startoff) &&
|
||||
(map->br_startoff < (imap[i].br_startoff +
|
||||
imap[i].br_blockcount))) {
|
||||
if ((offset_fsb >= imap[i].br_startoff) &&
|
||||
(offset_fsb < (imap[i].br_startoff +
|
||||
imap[i].br_blockcount))) {
|
||||
*map = imap[i];
|
||||
*retmap = 1;
|
||||
XFS_STATS_INC(xs_xstrat_quick);
|
||||
|
@ -883,9 +886,8 @@ xfs_iomap_write_allocate(
|
|||
* file, just surrounding data, try again.
|
||||
*/
|
||||
nimaps--;
|
||||
offset_fsb = imap[nimaps].br_startoff +
|
||||
imap[nimaps].br_blockcount;
|
||||
map_start_fsb = offset_fsb;
|
||||
map_start_fsb = imap[nimaps].br_startoff +
|
||||
imap[nimaps].br_blockcount;
|
||||
}
|
||||
|
||||
trans_cancel:
|
||||
|
|
|
@ -29,9 +29,6 @@
|
|||
*
|
||||
* http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
|
||||
*/
|
||||
|
||||
|
||||
|
||||
#ifndef __XFS_IOMAP_H__
|
||||
#define __XFS_IOMAP_H__
|
||||
|
||||
|
@ -56,7 +53,7 @@ typedef enum {
|
|||
BMAPI_UNWRITTEN = (1 << 3), /* unwritten extents to real extents */
|
||||
/* modifiers */
|
||||
BMAPI_IGNSTATE = (1 << 4), /* ignore unwritten state on read */
|
||||
BMAPI_DIRECT = (1 << 5), /* direct instead of buffered write */
|
||||
BMAPI_DIRECT = (1 << 5), /* direct instead of buffered write */
|
||||
BMAPI_MMAP = (1 << 6), /* allocate for mmap write */
|
||||
BMAPI_SYNC = (1 << 7), /* sync write to flush delalloc space */
|
||||
BMAPI_TRYLOCK = (1 << 8), /* non-blocking request */
|
||||
|
@ -100,7 +97,7 @@ extern int xfs_iomap_write_direct(struct xfs_inode *, loff_t, size_t,
|
|||
int, struct xfs_bmbt_irec *, int *, int);
|
||||
extern int xfs_iomap_write_delay(struct xfs_inode *, loff_t, size_t, int,
|
||||
struct xfs_bmbt_irec *, int *);
|
||||
extern int xfs_iomap_write_allocate(struct xfs_inode *,
|
||||
extern int xfs_iomap_write_allocate(struct xfs_inode *, loff_t, size_t,
|
||||
struct xfs_bmbt_irec *, int *);
|
||||
extern int xfs_iomap_write_unwritten(struct xfs_inode *, loff_t, size_t);
|
||||
|
||||
|
|
|
@ -216,7 +216,8 @@ typedef int (*xfs_iomap_write_delay_t)(
|
|||
void *, loff_t, size_t, int,
|
||||
struct xfs_bmbt_irec *, int *);
|
||||
typedef int (*xfs_iomap_write_allocate_t)(
|
||||
void *, struct xfs_bmbt_irec *, int *);
|
||||
void *, loff_t, size_t,
|
||||
struct xfs_bmbt_irec *, int *);
|
||||
typedef int (*xfs_iomap_write_unwritten_t)(
|
||||
void *, loff_t, size_t);
|
||||
typedef uint (*xfs_lck_map_shared_t)(void *);
|
||||
|
@ -258,9 +259,9 @@ typedef struct xfs_ioops {
|
|||
#define XFS_IOMAP_WRITE_DELAY(mp, io, offset, count, flags, mval, nmap) \
|
||||
(*(mp)->m_io_ops.xfs_iomap_write_delay) \
|
||||
((io)->io_obj, offset, count, flags, mval, nmap)
|
||||
#define XFS_IOMAP_WRITE_ALLOCATE(mp, io, mval, nmap) \
|
||||
#define XFS_IOMAP_WRITE_ALLOCATE(mp, io, offset, count, mval, nmap) \
|
||||
(*(mp)->m_io_ops.xfs_iomap_write_allocate) \
|
||||
((io)->io_obj, mval, nmap)
|
||||
((io)->io_obj, offset, count, mval, nmap)
|
||||
#define XFS_IOMAP_WRITE_UNWRITTEN(mp, io, offset, count) \
|
||||
(*(mp)->m_io_ops.xfs_iomap_write_unwritten) \
|
||||
((io)->io_obj, offset, count)
|
||||
|
|
Loading…
Reference in New Issue