hugetlbfs: zero partial pages during fallocate hole punch
hugetlbfs fallocate support was originally added with commit 70c3547e36
("hugetlbfs: add hugetlbfs_fallocate()"). Initial support only operated
on whole hugetlb pages. This makes sense for populating files as other
interfaces such as mmap and truncate require hugetlb page size alignment.
Only operating on whole hugetlb pages for the hole punch case was a
simplification and there was no compelling use case to zero partial pages.
In a recent discussion[1] it was assumed that hugetlbfs hole punch would
zero partial hugetlb pages as that is in line with the man page
description saying 'partial filesystem blocks are zeroed'. However, the
hugetlbfs hole punch code actually does this:
hole_start = round_up(offset, hpage_size);
hole_end = round_down(offset + len, hpage_size);
Modify code to zero partial hugetlb pages in hole punch range. It is
possible that application code could note a change in behavior. However,
that would imply the code is passing in an unaligned range and expecting
only whole pages be removed. This is unlikely as the fallocate
documentation states the opposite.
The current hugetlbfs fallocate hole punch behavior is tested with the
libhugetlbfs test fallocate_align[2]. This test will be updated to
validate partial page zeroing.
[1] https://lore.kernel.org/linux-mm/20571829-9d3d-0b48-817c-b6b15565f651@redhat.com/
[2] https://github.com/libhugetlbfs/libhugetlbfs/blob/master/tests/fallocate_align.c
Link: https://lkml.kernel.org/r/YqeiMlZDKI1Kabfe@monkey
Signed-off-by: Mike Kravetz <mike.kravetz@oracle.com>
Reviewed-by: Muchun Song <songmuchun@bytedance.com>
Cc: David Hildenbrand <david@redhat.com>
Cc: Naoya Horiguchi <naoya.horiguchi@linux.dev>
Cc: Axel Rasmussen <axelrasmussen@google.com>
Cc: Dave Hansen <dave.hansen@intel.com>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Matthew Wilcox <willy@infradead.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
df4ae285a3
commit
68d32527d3
|
@ -600,41 +600,79 @@ static void hugetlb_vmtruncate(struct inode *inode, loff_t offset)
|
|||
remove_inode_hugepages(inode, offset, LLONG_MAX);
|
||||
}
|
||||
|
||||
static void hugetlbfs_zero_partial_page(struct hstate *h,
|
||||
struct address_space *mapping,
|
||||
loff_t start,
|
||||
loff_t end)
|
||||
{
|
||||
pgoff_t idx = start >> huge_page_shift(h);
|
||||
struct folio *folio;
|
||||
|
||||
folio = filemap_lock_folio(mapping, idx);
|
||||
if (!folio)
|
||||
return;
|
||||
|
||||
start = start & ~huge_page_mask(h);
|
||||
end = end & ~huge_page_mask(h);
|
||||
if (!end)
|
||||
end = huge_page_size(h);
|
||||
|
||||
folio_zero_segment(folio, (size_t)start, (size_t)end);
|
||||
|
||||
folio_unlock(folio);
|
||||
folio_put(folio);
|
||||
}
|
||||
|
||||
static long hugetlbfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
|
||||
{
|
||||
struct hugetlbfs_inode_info *info = HUGETLBFS_I(inode);
|
||||
struct address_space *mapping = inode->i_mapping;
|
||||
struct hstate *h = hstate_inode(inode);
|
||||
loff_t hpage_size = huge_page_size(h);
|
||||
loff_t hole_start, hole_end;
|
||||
|
||||
/*
|
||||
* For hole punch round up the beginning offset of the hole and
|
||||
* round down the end.
|
||||
* hole_start and hole_end indicate the full pages within the hole.
|
||||
*/
|
||||
hole_start = round_up(offset, hpage_size);
|
||||
hole_end = round_down(offset + len, hpage_size);
|
||||
|
||||
inode_lock(inode);
|
||||
|
||||
/* protected by i_rwsem */
|
||||
if (info->seals & (F_SEAL_WRITE | F_SEAL_FUTURE_WRITE)) {
|
||||
inode_unlock(inode);
|
||||
return -EPERM;
|
||||
}
|
||||
|
||||
i_mmap_lock_write(mapping);
|
||||
|
||||
/* If range starts before first full page, zero partial page. */
|
||||
if (offset < hole_start)
|
||||
hugetlbfs_zero_partial_page(h, mapping,
|
||||
offset, min(offset + len, hole_start));
|
||||
|
||||
/* Unmap users of full pages in the hole. */
|
||||
if (hole_end > hole_start) {
|
||||
struct address_space *mapping = inode->i_mapping;
|
||||
struct hugetlbfs_inode_info *info = HUGETLBFS_I(inode);
|
||||
|
||||
inode_lock(inode);
|
||||
|
||||
/* protected by i_rwsem */
|
||||
if (info->seals & (F_SEAL_WRITE | F_SEAL_FUTURE_WRITE)) {
|
||||
inode_unlock(inode);
|
||||
return -EPERM;
|
||||
}
|
||||
|
||||
i_mmap_lock_write(mapping);
|
||||
if (!RB_EMPTY_ROOT(&mapping->i_mmap.rb_root))
|
||||
hugetlb_vmdelete_list(&mapping->i_mmap,
|
||||
hole_start >> PAGE_SHIFT,
|
||||
hole_end >> PAGE_SHIFT, 0);
|
||||
i_mmap_unlock_write(mapping);
|
||||
remove_inode_hugepages(inode, hole_start, hole_end);
|
||||
inode_unlock(inode);
|
||||
}
|
||||
|
||||
/* If range extends beyond last full page, zero partial page. */
|
||||
if ((offset + len) > hole_end && (offset + len) > hole_start)
|
||||
hugetlbfs_zero_partial_page(h, mapping,
|
||||
hole_end, offset + len);
|
||||
|
||||
i_mmap_unlock_write(mapping);
|
||||
|
||||
/* Remove full pages from the file. */
|
||||
if (hole_end > hole_start)
|
||||
remove_inode_hugepages(inode, hole_start, hole_end);
|
||||
|
||||
inode_unlock(inode);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in New Issue