diff --git a/include/linux/fs.h b/include/linux/fs.h index 90a80de37ad4..894ff2451793 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -849,6 +849,12 @@ static inline void filemap_invalidate_unlock_shared( void lock_two_nondirectories(struct inode *, struct inode*); void unlock_two_nondirectories(struct inode *, struct inode*); +void filemap_invalidate_lock_two(struct address_space *mapping1, + struct address_space *mapping2); +void filemap_invalidate_unlock_two(struct address_space *mapping1, + struct address_space *mapping2); + + /* * NOTE: in a 32bit arch with a preemptable kernel and * an UP compile the i_size_read/write must be atomic diff --git a/mm/filemap.c b/mm/filemap.c index f7f9b87d2cd0..0fad08331cf4 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -1009,6 +1009,44 @@ struct page *__page_cache_alloc(gfp_t gfp) EXPORT_SYMBOL(__page_cache_alloc); #endif +/* + * filemap_invalidate_lock_two - lock invalidate_lock for two mappings + * + * Lock exclusively invalidate_lock of any passed mapping that is not NULL. + * + * @mapping1: the first mapping to lock + * @mapping2: the second mapping to lock + */ +void filemap_invalidate_lock_two(struct address_space *mapping1, + struct address_space *mapping2) +{ + if (mapping1 > mapping2) + swap(mapping1, mapping2); + if (mapping1) + down_write(&mapping1->invalidate_lock); + if (mapping2 && mapping1 != mapping2) + down_write_nested(&mapping2->invalidate_lock, 1); +} +EXPORT_SYMBOL(filemap_invalidate_lock_two); + +/* + * filemap_invalidate_unlock_two - unlock invalidate_lock for two mappings + * + * Unlock exclusive invalidate_lock of any passed mapping that is not NULL. + * + * @mapping1: the first mapping to unlock + * @mapping2: the second mapping to unlock + */ +void filemap_invalidate_unlock_two(struct address_space *mapping1, + struct address_space *mapping2) +{ + if (mapping1) + up_write(&mapping1->invalidate_lock); + if (mapping2 && mapping1 != mapping2) + up_write(&mapping2->invalidate_lock); +} +EXPORT_SYMBOL(filemap_invalidate_unlock_two); + /* * In order to wait for pages to become available there must be * waitqueues associated with pages. By using a hash table of