Merge branch 'akpm'(patches from Andrew)

Merge fixes from Andrew Morton:
 "10 fixes"

The lockdep hlist conversion is in the locking tree too, waiting for the
next merge window.  Andrew thought it should go in now.  I'll take it,
since it fixes a real problem and looks trivially correct (famous last
words).

* emailed patches from Andrew Morton <akpm@linux-foundation.org>:
  arch/x86/Kconfig: CONFIG_X86_UV should depend on CONFIG_EFI
  mm: fix pfn_t vs highmem
  kernel/locking/lockdep.c: convert hash tables to hlists
  mm,thp: fix spellos in describing __HAVE_ARCH_FLUSH_PMD_TLB_RANGE
  mm,thp: khugepaged: call pte flush at the time of collapse
  mm/backing-dev.c: fix error path in wb_init()
  mm, dax: check for pmd_none() after split_huge_pmd()
  vsprintf: kptr_restrict is okay in IRQ when 2
  mm: fix filemap.c kernel doc warning
  ubsan: cosmetic fix to Kconfig text
This commit is contained in:
Linus Torvalds 2016-02-12 13:12:27 -08:00
commit 29f1bf34ed
13 changed files with 61 additions and 56 deletions

View File

@ -475,6 +475,7 @@ config X86_UV
depends on X86_64
depends on X86_EXTENDED_PLATFORM
depends on NUMA
depends on EFI
depends on X86_X2APIC
depends on PCI
---help---

View File

@ -66,7 +66,7 @@ struct lock_class {
/*
* class-hash:
*/
struct list_head hash_entry;
struct hlist_node hash_entry;
/*
* global list of all lock-classes:
@ -199,7 +199,7 @@ struct lock_chain {
u8 irq_context;
u8 depth;
u16 base;
struct list_head entry;
struct hlist_node entry;
u64 chain_key;
};

View File

@ -10,7 +10,7 @@
* backing is indicated by flags in the high bits of the value.
*/
typedef struct {
unsigned long val;
u64 val;
} pfn_t;
#endif

View File

@ -9,14 +9,13 @@
* PFN_DEV - pfn is not covered by system memmap by default
* PFN_MAP - pfn has a dynamic page mapping established by a device driver
*/
#define PFN_FLAGS_MASK (((unsigned long) ~PAGE_MASK) \
<< (BITS_PER_LONG - PAGE_SHIFT))
#define PFN_SG_CHAIN (1UL << (BITS_PER_LONG - 1))
#define PFN_SG_LAST (1UL << (BITS_PER_LONG - 2))
#define PFN_DEV (1UL << (BITS_PER_LONG - 3))
#define PFN_MAP (1UL << (BITS_PER_LONG - 4))
#define PFN_FLAGS_MASK (((u64) ~PAGE_MASK) << (BITS_PER_LONG_LONG - PAGE_SHIFT))
#define PFN_SG_CHAIN (1ULL << (BITS_PER_LONG_LONG - 1))
#define PFN_SG_LAST (1ULL << (BITS_PER_LONG_LONG - 2))
#define PFN_DEV (1ULL << (BITS_PER_LONG_LONG - 3))
#define PFN_MAP (1ULL << (BITS_PER_LONG_LONG - 4))
static inline pfn_t __pfn_to_pfn_t(unsigned long pfn, unsigned long flags)
static inline pfn_t __pfn_to_pfn_t(unsigned long pfn, u64 flags)
{
pfn_t pfn_t = { .val = pfn | (flags & PFN_FLAGS_MASK), };
@ -29,7 +28,7 @@ static inline pfn_t pfn_to_pfn_t(unsigned long pfn)
return __pfn_to_pfn_t(pfn, 0);
}
extern pfn_t phys_to_pfn_t(phys_addr_t addr, unsigned long flags);
extern pfn_t phys_to_pfn_t(phys_addr_t addr, u64 flags);
static inline bool pfn_t_has_page(pfn_t pfn)
{
@ -87,7 +86,7 @@ static inline pmd_t pfn_t_pmd(pfn_t pfn, pgprot_t pgprot)
#ifdef __HAVE_ARCH_PTE_DEVMAP
static inline bool pfn_t_devmap(pfn_t pfn)
{
const unsigned long flags = PFN_DEV|PFN_MAP;
const u64 flags = PFN_DEV|PFN_MAP;
return (pfn.val & flags) == flags;
}

View File

@ -292,7 +292,7 @@ LIST_HEAD(all_lock_classes);
#define __classhashfn(key) hash_long((unsigned long)key, CLASSHASH_BITS)
#define classhashentry(key) (classhash_table + __classhashfn((key)))
static struct list_head classhash_table[CLASSHASH_SIZE];
static struct hlist_head classhash_table[CLASSHASH_SIZE];
/*
* We put the lock dependency chains into a hash-table as well, to cache
@ -303,7 +303,7 @@ static struct list_head classhash_table[CLASSHASH_SIZE];
#define __chainhashfn(chain) hash_long(chain, CHAINHASH_BITS)
#define chainhashentry(chain) (chainhash_table + __chainhashfn((chain)))
static struct list_head chainhash_table[CHAINHASH_SIZE];
static struct hlist_head chainhash_table[CHAINHASH_SIZE];
/*
* The hash key of the lock dependency chains is a hash itself too:
@ -666,7 +666,7 @@ static inline struct lock_class *
look_up_lock_class(struct lockdep_map *lock, unsigned int subclass)
{
struct lockdep_subclass_key *key;
struct list_head *hash_head;
struct hlist_head *hash_head;
struct lock_class *class;
#ifdef CONFIG_DEBUG_LOCKDEP
@ -719,7 +719,7 @@ look_up_lock_class(struct lockdep_map *lock, unsigned int subclass)
if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
return NULL;
list_for_each_entry_rcu(class, hash_head, hash_entry) {
hlist_for_each_entry_rcu(class, hash_head, hash_entry) {
if (class->key == key) {
/*
* Huh! same key, different name? Did someone trample
@ -742,7 +742,7 @@ static inline struct lock_class *
register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
{
struct lockdep_subclass_key *key;
struct list_head *hash_head;
struct hlist_head *hash_head;
struct lock_class *class;
DEBUG_LOCKS_WARN_ON(!irqs_disabled());
@ -774,7 +774,7 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
* We have to do the hash-walk again, to avoid races
* with another CPU:
*/
list_for_each_entry_rcu(class, hash_head, hash_entry) {
hlist_for_each_entry_rcu(class, hash_head, hash_entry) {
if (class->key == key)
goto out_unlock_set;
}
@ -805,7 +805,7 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
* We use RCU's safe list-add method to make
* parallel walking of the hash-list safe:
*/
list_add_tail_rcu(&class->hash_entry, hash_head);
hlist_add_head_rcu(&class->hash_entry, hash_head);
/*
* Add it to the global list of classes:
*/
@ -2017,7 +2017,7 @@ static inline int lookup_chain_cache(struct task_struct *curr,
u64 chain_key)
{
struct lock_class *class = hlock_class(hlock);
struct list_head *hash_head = chainhashentry(chain_key);
struct hlist_head *hash_head = chainhashentry(chain_key);
struct lock_chain *chain;
struct held_lock *hlock_curr;
int i, j;
@ -2033,7 +2033,7 @@ static inline int lookup_chain_cache(struct task_struct *curr,
* We can walk it lock-free, because entries only get added
* to the hash:
*/
list_for_each_entry_rcu(chain, hash_head, entry) {
hlist_for_each_entry_rcu(chain, hash_head, entry) {
if (chain->chain_key == chain_key) {
cache_hit:
debug_atomic_inc(chain_lookup_hits);
@ -2057,7 +2057,7 @@ static inline int lookup_chain_cache(struct task_struct *curr,
/*
* We have to walk the chain again locked - to avoid duplicates:
*/
list_for_each_entry(chain, hash_head, entry) {
hlist_for_each_entry(chain, hash_head, entry) {
if (chain->chain_key == chain_key) {
graph_unlock();
goto cache_hit;
@ -2091,7 +2091,7 @@ static inline int lookup_chain_cache(struct task_struct *curr,
}
chain_hlocks[chain->base + j] = class - lock_classes;
}
list_add_tail_rcu(&chain->entry, hash_head);
hlist_add_head_rcu(&chain->entry, hash_head);
debug_atomic_inc(chain_lookup_misses);
inc_chains();
@ -3875,7 +3875,7 @@ void lockdep_reset(void)
nr_process_chains = 0;
debug_locks = 1;
for (i = 0; i < CHAINHASH_SIZE; i++)
INIT_LIST_HEAD(chainhash_table + i);
INIT_HLIST_HEAD(chainhash_table + i);
raw_local_irq_restore(flags);
}
@ -3894,7 +3894,7 @@ static void zap_class(struct lock_class *class)
/*
* Unhash the class and remove it from the all_lock_classes list:
*/
list_del_rcu(&class->hash_entry);
hlist_del_rcu(&class->hash_entry);
list_del_rcu(&class->lock_entry);
RCU_INIT_POINTER(class->key, NULL);
@ -3917,7 +3917,7 @@ static inline int within(const void *addr, void *start, unsigned long size)
void lockdep_free_key_range(void *start, unsigned long size)
{
struct lock_class *class;
struct list_head *head;
struct hlist_head *head;
unsigned long flags;
int i;
int locked;
@ -3930,9 +3930,7 @@ void lockdep_free_key_range(void *start, unsigned long size)
*/
for (i = 0; i < CLASSHASH_SIZE; i++) {
head = classhash_table + i;
if (list_empty(head))
continue;
list_for_each_entry_rcu(class, head, hash_entry) {
hlist_for_each_entry_rcu(class, head, hash_entry) {
if (within(class->key, start, size))
zap_class(class);
else if (within(class->name, start, size))
@ -3962,7 +3960,7 @@ void lockdep_free_key_range(void *start, unsigned long size)
void lockdep_reset_lock(struct lockdep_map *lock)
{
struct lock_class *class;
struct list_head *head;
struct hlist_head *head;
unsigned long flags;
int i, j;
int locked;
@ -3987,9 +3985,7 @@ void lockdep_reset_lock(struct lockdep_map *lock)
locked = graph_lock();
for (i = 0; i < CLASSHASH_SIZE; i++) {
head = classhash_table + i;
if (list_empty(head))
continue;
list_for_each_entry_rcu(class, head, hash_entry) {
hlist_for_each_entry_rcu(class, head, hash_entry) {
int match = 0;
for (j = 0; j < NR_LOCKDEP_CACHING_CLASSES; j++)
@ -4027,10 +4023,10 @@ void lockdep_init(void)
return;
for (i = 0; i < CLASSHASH_SIZE; i++)
INIT_LIST_HEAD(classhash_table + i);
INIT_HLIST_HEAD(classhash_table + i);
for (i = 0; i < CHAINHASH_SIZE; i++)
INIT_LIST_HEAD(chainhash_table + i);
INIT_HLIST_HEAD(chainhash_table + i);
lockdep_initialized = 1;
}

View File

@ -150,7 +150,7 @@ void devm_memunmap(struct device *dev, void *addr)
}
EXPORT_SYMBOL(devm_memunmap);
pfn_t phys_to_pfn_t(phys_addr_t addr, unsigned long flags)
pfn_t phys_to_pfn_t(phys_addr_t addr, u64 flags)
{
return __pfn_to_pfn_t(addr >> PAGE_SHIFT, flags);
}

View File

@ -18,6 +18,8 @@ config UBSAN_SANITIZE_ALL
This option activates instrumentation for the entire kernel.
If you don't enable this option, you have to explicitly specify
UBSAN_SANITIZE := y for the files/directories you want to check for UB.
Enabling this option will get kernel image size increased
significantly.
config UBSAN_ALIGNMENT
bool "Enable checking of pointers alignment"
@ -25,5 +27,5 @@ config UBSAN_ALIGNMENT
default y if !HAVE_EFFICIENT_UNALIGNED_ACCESS
help
This option enables detection of unaligned memory accesses.
Enabling this option on architectures that support unalligned
Enabling this option on architectures that support unaligned
accesses may produce a lot of false positives.

View File

@ -1590,22 +1590,23 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
return buf;
}
case 'K':
/*
* %pK cannot be used in IRQ context because its test
* for CAP_SYSLOG would be meaningless.
*/
if (kptr_restrict && (in_irq() || in_serving_softirq() ||
in_nmi())) {
if (spec.field_width == -1)
spec.field_width = default_width;
return string(buf, end, "pK-error", spec);
}
switch (kptr_restrict) {
case 0:
/* Always print %pK values */
break;
case 1: {
const struct cred *cred;
/*
* kptr_restrict==1 cannot be used in IRQ context
* because its test for CAP_SYSLOG would be meaningless.
*/
if (in_irq() || in_serving_softirq() || in_nmi()) {
if (spec.field_width == -1)
spec.field_width = default_width;
return string(buf, end, "pK-error", spec);
}
/*
* Only print the real pointer value if the current
* process has CAP_SYSLOG and is running with the
@ -1615,8 +1616,7 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
* leak pointer values if a binary opens a file using
* %pK and then elevates privileges before reading it.
*/
const struct cred *cred = current_cred();
cred = current_cred();
if (!has_capability_noaudit(current, CAP_SYSLOG) ||
!uid_eq(cred->euid, cred->uid) ||
!gid_eq(cred->egid, cred->gid))

View File

@ -328,7 +328,7 @@ static int wb_init(struct bdi_writeback *wb, struct backing_dev_info *bdi,
return 0;
out_destroy_stat:
while (--i)
while (i--)
percpu_counter_destroy(&wb->stat[i]);
fprop_local_destroy_percpu(&wb->completions);
out_put_cong:

View File

@ -1890,6 +1890,7 @@ EXPORT_SYMBOL(generic_file_read_iter);
* page_cache_read - adds requested page to the page cache if not already there
* @file: file to read
* @offset: page index
* @gfp_mask: memory allocation flags
*
* This adds the requested page to the page cache if it isn't already there,
* and schedules an I/O to read in its contents from disk.

View File

@ -160,9 +160,11 @@ static inline unsigned long change_pmd_range(struct vm_area_struct *vma,
}
if (pmd_trans_huge(*pmd) || pmd_devmap(*pmd)) {
if (next - addr != HPAGE_PMD_SIZE)
if (next - addr != HPAGE_PMD_SIZE) {
split_huge_pmd(vma, pmd, addr);
else {
if (pmd_none(*pmd))
continue;
} else {
int nr_ptes = change_huge_pmd(vma, pmd, addr,
newprot, prot_numa);

View File

@ -210,6 +210,8 @@ unsigned long move_page_tables(struct vm_area_struct *vma,
}
}
split_huge_pmd(vma, old_pmd, old_addr);
if (pmd_none(*old_pmd))
continue;
VM_BUG_ON(pmd_trans_huge(*old_pmd));
}
if (pmd_none(*new_pmd) && __pte_alloc(new_vma->vm_mm, new_vma,

View File

@ -90,9 +90,9 @@ pte_t ptep_clear_flush(struct vm_area_struct *vma, unsigned long address,
* ARCHes with special requirements for evicting THP backing TLB entries can
* implement this. Otherwise also, it can help optimize normal TLB flush in
* THP regime. stock flush_tlb_range() typically has optimization to nuke the
* entire TLB TLB if flush span is greater than a threshhold, which will
* entire TLB if flush span is greater than a threshold, which will
* likely be true for a single huge page. Thus a single thp flush will
* invalidate the entire TLB which is not desitable.
* invalidate the entire TLB which is not desirable.
* e.g. see arch/arc: flush_pmd_tlb_range
*/
#define flush_pmd_tlb_range(vma, addr, end) flush_tlb_range(vma, addr, end)
@ -195,7 +195,9 @@ pmd_t pmdp_collapse_flush(struct vm_area_struct *vma, unsigned long address,
VM_BUG_ON(address & ~HPAGE_PMD_MASK);
VM_BUG_ON(pmd_trans_huge(*pmdp));
pmd = pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp);
flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
/* collapse entails shooting down ptes not pmd */
flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
return pmd;
}
#endif