Merge branch 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull x86 fixes from Ingo Molnar: "EFI fixes, and FPU fix, a ticket spinlock boundary condition fix and two build fixes" * 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: x86/fpu: Always restore_xinit_state() when use_eager_cpu() x86: Make cpu_tss available to external modules efi: Fix error handling in add_sysfs_runtime_map_entry() x86/spinlocks: Fix regression in spinlock contention detection x86/mm: Clean up types in xlate_dev_mem_ptr() x86/efi: Store upper bits of command line buffer address in ext_cmd_line_ptr efivarfs: Ensure VariableName is NUL-terminated
This commit is contained in:
commit
3d54ac9e35
|
@ -1109,6 +1109,8 @@ struct boot_params *make_boot_params(struct efi_config *c)
|
|||
if (!cmdline_ptr)
|
||||
goto fail;
|
||||
hdr->cmd_line_ptr = (unsigned long)cmdline_ptr;
|
||||
/* Fill in upper bits of command line address, NOP on 32 bit */
|
||||
boot_params->ext_cmd_line_ptr = (u64)(unsigned long)cmdline_ptr >> 32;
|
||||
|
||||
hdr->ramdisk_image = 0;
|
||||
hdr->ramdisk_size = 0;
|
||||
|
|
|
@ -169,7 +169,7 @@ static inline int arch_spin_is_contended(arch_spinlock_t *lock)
|
|||
struct __raw_tickets tmp = READ_ONCE(lock->tickets);
|
||||
|
||||
tmp.head &= ~TICKET_SLOWPATH_FLAG;
|
||||
return (tmp.tail - tmp.head) > TICKET_LOCK_INC;
|
||||
return (__ticket_t)(tmp.tail - tmp.head) > TICKET_LOCK_INC;
|
||||
}
|
||||
#define arch_spin_is_contended arch_spin_is_contended
|
||||
|
||||
|
|
|
@ -57,7 +57,7 @@ __visible DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, cpu_tss) = {
|
|||
.io_bitmap = { [0 ... IO_BITMAP_LONGS] = ~0 },
|
||||
#endif
|
||||
};
|
||||
EXPORT_PER_CPU_SYMBOL_GPL(cpu_tss);
|
||||
EXPORT_PER_CPU_SYMBOL(cpu_tss);
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
static DEFINE_PER_CPU(unsigned char, is_idle);
|
||||
|
@ -156,11 +156,13 @@ void flush_thread(void)
|
|||
/* FPU state will be reallocated lazily at the first use. */
|
||||
drop_fpu(tsk);
|
||||
free_thread_xstate(tsk);
|
||||
} else if (!used_math()) {
|
||||
/* kthread execs. TODO: cleanup this horror. */
|
||||
if (WARN_ON(init_fpu(tsk)))
|
||||
force_sig(SIGKILL, tsk);
|
||||
user_fpu_begin();
|
||||
} else {
|
||||
if (!tsk_used_math(tsk)) {
|
||||
/* kthread execs. TODO: cleanup this horror. */
|
||||
if (WARN_ON(init_fpu(tsk)))
|
||||
force_sig(SIGKILL, tsk);
|
||||
user_fpu_begin();
|
||||
}
|
||||
restore_init_xstate();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -351,18 +351,20 @@ int arch_ioremap_pmd_supported(void)
|
|||
*/
|
||||
void *xlate_dev_mem_ptr(phys_addr_t phys)
|
||||
{
|
||||
void *addr;
|
||||
unsigned long start = phys & PAGE_MASK;
|
||||
unsigned long start = phys & PAGE_MASK;
|
||||
unsigned long offset = phys & ~PAGE_MASK;
|
||||
unsigned long vaddr;
|
||||
|
||||
/* If page is RAM, we can use __va. Otherwise ioremap and unmap. */
|
||||
if (page_is_ram(start >> PAGE_SHIFT))
|
||||
return __va(phys);
|
||||
|
||||
addr = (void __force *)ioremap_cache(start, PAGE_SIZE);
|
||||
if (addr)
|
||||
addr = (void *)((unsigned long)addr | (phys & ~PAGE_MASK));
|
||||
vaddr = (unsigned long)ioremap_cache(start, PAGE_SIZE);
|
||||
/* Only add the offset on success and return NULL if the ioremap() failed: */
|
||||
if (vaddr)
|
||||
vaddr += offset;
|
||||
|
||||
return addr;
|
||||
return (void *)vaddr;
|
||||
}
|
||||
|
||||
void unxlate_dev_mem_ptr(phys_addr_t phys, void *addr)
|
||||
|
|
|
@ -120,7 +120,8 @@ add_sysfs_runtime_map_entry(struct kobject *kobj, int nr)
|
|||
entry = kzalloc(sizeof(*entry), GFP_KERNEL);
|
||||
if (!entry) {
|
||||
kset_unregister(map_kset);
|
||||
return entry;
|
||||
map_kset = NULL;
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
|
||||
memcpy(&entry->md, efi_runtime_map + nr * efi_memdesc_size,
|
||||
|
@ -132,6 +133,7 @@ add_sysfs_runtime_map_entry(struct kobject *kobj, int nr)
|
|||
if (ret) {
|
||||
kobject_put(&entry->kobj);
|
||||
kset_unregister(map_kset);
|
||||
map_kset = NULL;
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
|
@ -195,8 +197,6 @@ int __init efi_runtime_map_init(struct kobject *efi_kobj)
|
|||
entry = *(map_entries + j);
|
||||
kobject_put(&entry->kobj);
|
||||
}
|
||||
if (map_kset)
|
||||
kset_unregister(map_kset);
|
||||
out:
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -121,7 +121,7 @@ static int efivarfs_callback(efi_char16_t *name16, efi_guid_t vendor,
|
|||
int len, i;
|
||||
int err = -ENOMEM;
|
||||
|
||||
entry = kmalloc(sizeof(*entry), GFP_KERNEL);
|
||||
entry = kzalloc(sizeof(*entry), GFP_KERNEL);
|
||||
if (!entry)
|
||||
return err;
|
||||
|
||||
|
|
Loading…
Reference in New Issue