Merge branch 'x86-cleanups-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull x86 cleanups from Ingo Molnar: "Various cleanups and simplifications, none of them really stands out, they are all over the place" * 'x86-cleanups-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: x86/uaccess: Remove unused __addr_ok() macro x86/smpboot: Remove unused phys_id variable x86/mm/dump_pagetables: Remove the unused prev_pud variable x86/fpu: Move init_xstate_size() to __init section x86/cpu_entry_area: Move percpu_setup_debug_store() to __init section x86/mtrr: Remove unused variable x86/boot/compressed/64: Explain paging_prepare()'s return value x86/resctrl: Remove duplicate MSR_MISC_FEATURE_CONTROL definition x86/asm/suspend: Drop ENTRY from local data x86/hw_breakpoints, kprobes: Remove kprobes ifdeffery x86/boot: Save several bytes in decompressor x86/trap: Remove useless declaration x86/mm/tlb: Remove unused cpu variable x86/events: Mark expected switch-case fall-throughs x86/asm-prototypes: Remove duplicate include <asm/page.h> x86/kernel: Mark expected switch-case fall-throughs x86/insn-eval: Mark expected switch-case fall-through x86/platform/UV: Replace kmalloc() and memset() with k[cz]alloc() calls x86/e820: Replace kmalloc() + memcpy() with kmemdup()
This commit is contained in:
commit
bcd49c3dd1
|
@ -358,8 +358,11 @@ ENTRY(startup_64)
|
|||
* paging_prepare() sets up the trampoline and checks if we need to
|
||||
* enable 5-level paging.
|
||||
*
|
||||
* Address of the trampoline is returned in RAX.
|
||||
* Non zero RDX on return means we need to enable 5-level paging.
|
||||
* paging_prepare() returns a two-quadword structure which lands
|
||||
* into RDX:RAX:
|
||||
* - Address of the trampoline is returned in RAX.
|
||||
* - Non zero RDX means trampoline needs to enable 5-level
|
||||
* paging.
|
||||
*
|
||||
* RSI holds real mode data and needs to be preserved across
|
||||
* this function call.
|
||||
|
@ -565,7 +568,7 @@ adjust_got:
|
|||
*
|
||||
* RDI contains the return address (might be above 4G).
|
||||
* ECX contains the base address of the trampoline memory.
|
||||
* Non zero RDX on return means we need to enable 5-level paging.
|
||||
* Non zero RDX means trampoline needs to enable 5-level paging.
|
||||
*/
|
||||
ENTRY(trampoline_32bit_src)
|
||||
/* Set up data and stack segments */
|
||||
|
@ -655,8 +658,6 @@ no_longmode:
|
|||
.data
|
||||
gdt64:
|
||||
.word gdt_end - gdt
|
||||
.long 0
|
||||
.word 0
|
||||
.quad 0
|
||||
gdt:
|
||||
.word gdt_end - gdt
|
||||
|
|
|
@ -4220,6 +4220,8 @@ __init int intel_pmu_init(void)
|
|||
|
||||
case INTEL_FAM6_CORE2_MEROM:
|
||||
x86_add_quirk(intel_clovertown_quirk);
|
||||
/* fall through */
|
||||
|
||||
case INTEL_FAM6_CORE2_MEROM_L:
|
||||
case INTEL_FAM6_CORE2_PENRYN:
|
||||
case INTEL_FAM6_CORE2_DUNNINGTON:
|
||||
|
|
|
@ -931,6 +931,7 @@ static int branch_type(unsigned long from, unsigned long to, int abort)
|
|||
ret = X86_BR_ZERO_CALL;
|
||||
break;
|
||||
}
|
||||
/* fall through */
|
||||
case 0x9a: /* call far absolute */
|
||||
ret = X86_BR_CALL;
|
||||
break;
|
||||
|
|
|
@ -7,7 +7,6 @@
|
|||
|
||||
#include <asm-generic/asm-prototypes.h>
|
||||
|
||||
#include <asm/page.h>
|
||||
#include <asm/pgtable.h>
|
||||
#include <asm/special_insns.h>
|
||||
#include <asm/preempt.h>
|
||||
|
|
|
@ -742,7 +742,6 @@ enum idle_boot_override {IDLE_NO_OVERRIDE=0, IDLE_HALT, IDLE_NOMWAIT,
|
|||
extern void enable_sep_cpu(void);
|
||||
extern int sysenter_setup(void);
|
||||
|
||||
void early_trap_pf_init(void);
|
||||
|
||||
/* Defined in head.S */
|
||||
extern struct desc_ptr early_gdt_descr;
|
||||
|
|
|
@ -34,10 +34,7 @@ static inline void set_fs(mm_segment_t fs)
|
|||
}
|
||||
|
||||
#define segment_eq(a, b) ((a).seg == (b).seg)
|
||||
|
||||
#define user_addr_max() (current->thread.addr_limit.seg)
|
||||
#define __addr_ok(addr) \
|
||||
((unsigned long __force)(addr) < user_addr_max())
|
||||
|
||||
/*
|
||||
* Test whether a block of memory is a valid user space address.
|
||||
|
|
|
@ -90,7 +90,7 @@ ret_point:
|
|||
.data
|
||||
ALIGN
|
||||
ENTRY(saved_magic) .long 0
|
||||
ENTRY(saved_eip) .long 0
|
||||
saved_eip: .long 0
|
||||
|
||||
# saved registers
|
||||
saved_idt: .long 0,0
|
||||
|
|
|
@ -125,12 +125,12 @@ ENTRY(do_suspend_lowlevel)
|
|||
ENDPROC(do_suspend_lowlevel)
|
||||
|
||||
.data
|
||||
ENTRY(saved_rbp) .quad 0
|
||||
ENTRY(saved_rsi) .quad 0
|
||||
ENTRY(saved_rdi) .quad 0
|
||||
ENTRY(saved_rbx) .quad 0
|
||||
saved_rbp: .quad 0
|
||||
saved_rsi: .quad 0
|
||||
saved_rdi: .quad 0
|
||||
saved_rbx: .quad 0
|
||||
|
||||
ENTRY(saved_rip) .quad 0
|
||||
ENTRY(saved_rsp) .quad 0
|
||||
saved_rip: .quad 0
|
||||
saved_rsp: .quad 0
|
||||
|
||||
ENTRY(saved_magic) .quad 0
|
||||
|
|
|
@ -812,6 +812,7 @@ static int irq_polarity(int idx)
|
|||
return IOAPIC_POL_HIGH;
|
||||
case MP_IRQPOL_RESERVED:
|
||||
pr_warn("IOAPIC: Invalid polarity: 2, defaulting to low\n");
|
||||
/* fall through */
|
||||
case MP_IRQPOL_ACTIVE_LOW:
|
||||
default: /* Pointless default required due to do gcc stupidity */
|
||||
return IOAPIC_POL_LOW;
|
||||
|
@ -859,6 +860,7 @@ static int irq_trigger(int idx)
|
|||
return IOAPIC_EDGE;
|
||||
case MP_IRQTRIG_RESERVED:
|
||||
pr_warn("IOAPIC: Invalid trigger mode 2 defaulting to level\n");
|
||||
/* fall through */
|
||||
case MP_IRQTRIG_LEVEL:
|
||||
default: /* Pointless default required due to do gcc stupidity */
|
||||
return IOAPIC_LEVEL;
|
||||
|
|
|
@ -248,6 +248,7 @@ amd_cpuid4(int leaf, union _cpuid4_leaf_eax *eax,
|
|||
switch (leaf) {
|
||||
case 1:
|
||||
l1 = &l1i;
|
||||
/* fall through */
|
||||
case 0:
|
||||
if (!l1->val)
|
||||
return;
|
||||
|
|
|
@ -296,7 +296,7 @@ range_to_mtrr_with_hole(struct var_mtrr_state *state, unsigned long basek,
|
|||
unsigned long sizek)
|
||||
{
|
||||
unsigned long hole_basek, hole_sizek;
|
||||
unsigned long second_basek, second_sizek;
|
||||
unsigned long second_sizek;
|
||||
unsigned long range0_basek, range0_sizek;
|
||||
unsigned long range_basek, range_sizek;
|
||||
unsigned long chunk_sizek;
|
||||
|
@ -304,7 +304,6 @@ range_to_mtrr_with_hole(struct var_mtrr_state *state, unsigned long basek,
|
|||
|
||||
hole_basek = 0;
|
||||
hole_sizek = 0;
|
||||
second_basek = 0;
|
||||
second_sizek = 0;
|
||||
chunk_sizek = state->chunk_sizek;
|
||||
gran_sizek = state->gran_sizek;
|
||||
|
|
|
@ -33,13 +33,6 @@
|
|||
#define CREATE_TRACE_POINTS
|
||||
#include "pseudo_lock_event.h"
|
||||
|
||||
/*
|
||||
* MSR_MISC_FEATURE_CONTROL register enables the modification of hardware
|
||||
* prefetcher state. Details about this register can be found in the MSR
|
||||
* tables for specific platforms found in Intel's SDM.
|
||||
*/
|
||||
#define MSR_MISC_FEATURE_CONTROL 0x000001a4
|
||||
|
||||
/*
|
||||
* The bits needed to disable hardware prefetching varies based on the
|
||||
* platform. During initialization we will discover which bits to use.
|
||||
|
|
|
@ -671,21 +671,18 @@ __init void e820__reallocate_tables(void)
|
|||
int size;
|
||||
|
||||
size = offsetof(struct e820_table, entries) + sizeof(struct e820_entry)*e820_table->nr_entries;
|
||||
n = kmalloc(size, GFP_KERNEL);
|
||||
n = kmemdup(e820_table, size, GFP_KERNEL);
|
||||
BUG_ON(!n);
|
||||
memcpy(n, e820_table, size);
|
||||
e820_table = n;
|
||||
|
||||
size = offsetof(struct e820_table, entries) + sizeof(struct e820_entry)*e820_table_kexec->nr_entries;
|
||||
n = kmalloc(size, GFP_KERNEL);
|
||||
n = kmemdup(e820_table_kexec, size, GFP_KERNEL);
|
||||
BUG_ON(!n);
|
||||
memcpy(n, e820_table_kexec, size);
|
||||
e820_table_kexec = n;
|
||||
|
||||
size = offsetof(struct e820_table, entries) + sizeof(struct e820_entry)*e820_table_firmware->nr_entries;
|
||||
n = kmalloc(size, GFP_KERNEL);
|
||||
n = kmemdup(e820_table_firmware, size, GFP_KERNEL);
|
||||
BUG_ON(!n);
|
||||
memcpy(n, e820_table_firmware, size);
|
||||
e820_table_firmware = n;
|
||||
}
|
||||
|
||||
|
|
|
@ -669,7 +669,7 @@ static bool is_supported_xstate_size(unsigned int test_xstate_size)
|
|||
return false;
|
||||
}
|
||||
|
||||
static int init_xstate_size(void)
|
||||
static int __init init_xstate_size(void)
|
||||
{
|
||||
/* Recompute the context size for enabled features: */
|
||||
unsigned int possible_xstate_size;
|
||||
|
|
|
@ -261,12 +261,8 @@ static int arch_build_bp_info(struct perf_event *bp,
|
|||
* allow kernel breakpoints at all.
|
||||
*/
|
||||
if (attr->bp_addr >= TASK_SIZE_MAX) {
|
||||
#ifdef CONFIG_KPROBES
|
||||
if (within_kprobe_blacklist(attr->bp_addr))
|
||||
return -EINVAL;
|
||||
#else
|
||||
return -EINVAL;
|
||||
#endif
|
||||
}
|
||||
|
||||
hw->type = X86_BREAKPOINT_EXECUTE;
|
||||
|
@ -279,6 +275,7 @@ static int arch_build_bp_info(struct perf_event *bp,
|
|||
hw->len = X86_BREAKPOINT_LEN_X;
|
||||
return 0;
|
||||
}
|
||||
/* fall through */
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
|
|
|
@ -467,6 +467,7 @@ int kgdb_arch_handle_exception(int e_vector, int signo, int err_code,
|
|||
ptr = &remcomInBuffer[1];
|
||||
if (kgdb_hex2long(&ptr, &addr))
|
||||
linux_regs->ip = addr;
|
||||
/* fall through */
|
||||
case 'D':
|
||||
case 'k':
|
||||
/* clear the trace bit */
|
||||
|
|
|
@ -150,7 +150,7 @@ static inline void smpboot_restore_warm_reset_vector(void)
|
|||
*/
|
||||
static void smp_callin(void)
|
||||
{
|
||||
int cpuid, phys_id;
|
||||
int cpuid;
|
||||
|
||||
/*
|
||||
* If waken up by an INIT in an 82489DX configuration
|
||||
|
@ -160,11 +160,6 @@ static void smp_callin(void)
|
|||
*/
|
||||
cpuid = smp_processor_id();
|
||||
|
||||
/*
|
||||
* (This works even if the APIC is not enabled.)
|
||||
*/
|
||||
phys_id = read_apic_id();
|
||||
|
||||
/*
|
||||
* the boot CPU has finished the init stage and is spinning
|
||||
* on callin_map until we finish. We are free to set up this
|
||||
|
|
|
@ -745,6 +745,7 @@ static int branch_setup_xol_ops(struct arch_uprobe *auprobe, struct insn *insn)
|
|||
* OPCODE1() of the "short" jmp which checks the same condition.
|
||||
*/
|
||||
opc1 = OPCODE2(insn) - 0x10;
|
||||
/* fall through */
|
||||
default:
|
||||
if (!is_cond_jmp_opcode(opc1))
|
||||
return -ENOSYS;
|
||||
|
|
|
@ -179,6 +179,8 @@ static int resolve_default_seg(struct insn *insn, struct pt_regs *regs, int off)
|
|||
if (insn->addr_bytes == 2)
|
||||
return -EINVAL;
|
||||
|
||||
/* fall through */
|
||||
|
||||
case -EDOM:
|
||||
case offsetof(struct pt_regs, bx):
|
||||
case offsetof(struct pt_regs, si):
|
||||
|
|
|
@ -52,7 +52,7 @@ cea_map_percpu_pages(void *cea_vaddr, void *ptr, int pages, pgprot_t prot)
|
|||
cea_set_pte(cea_vaddr, per_cpu_ptr_to_phys(ptr), prot);
|
||||
}
|
||||
|
||||
static void percpu_setup_debug_store(int cpu)
|
||||
static void __init percpu_setup_debug_store(int cpu)
|
||||
{
|
||||
#ifdef CONFIG_CPU_SUP_INTEL
|
||||
int npages;
|
||||
|
|
|
@ -444,7 +444,6 @@ static void walk_pud_level(struct seq_file *m, struct pg_state *st, p4d_t addr,
|
|||
int i;
|
||||
pud_t *start, *pud_start;
|
||||
pgprotval_t prot, eff;
|
||||
pud_t *prev_pud = NULL;
|
||||
|
||||
pud_start = start = (pud_t *)p4d_page_vaddr(addr);
|
||||
|
||||
|
@ -462,7 +461,6 @@ static void walk_pud_level(struct seq_file *m, struct pg_state *st, p4d_t addr,
|
|||
} else
|
||||
note_page(m, st, __pgprot(0), 0, 3);
|
||||
|
||||
prev_pud = start;
|
||||
start++;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -685,9 +685,6 @@ void native_flush_tlb_others(const struct cpumask *cpumask,
|
|||
* that UV should be updated so that smp_call_function_many(),
|
||||
* etc, are optimal on UV.
|
||||
*/
|
||||
unsigned int cpu;
|
||||
|
||||
cpu = smp_processor_id();
|
||||
cpumask = uv_flush_tlb_others(cpumask, info);
|
||||
if (cpumask)
|
||||
smp_call_function_many(cpumask, flush_tlb_func_remote,
|
||||
|
|
|
@ -2010,8 +2010,7 @@ static void make_per_cpu_thp(struct bau_control *smaster)
|
|||
int cpu;
|
||||
size_t hpsz = sizeof(struct hub_and_pnode) * num_possible_cpus();
|
||||
|
||||
smaster->thp = kmalloc_node(hpsz, GFP_KERNEL, smaster->osnode);
|
||||
memset(smaster->thp, 0, hpsz);
|
||||
smaster->thp = kzalloc_node(hpsz, GFP_KERNEL, smaster->osnode);
|
||||
for_each_present_cpu(cpu) {
|
||||
smaster->thp[cpu].pnode = uv_cpu_hub_info(cpu)->pnode;
|
||||
smaster->thp[cpu].uvhub = uv_cpu_hub_info(cpu)->numa_blade_id;
|
||||
|
@ -2135,15 +2134,12 @@ static int __init summarize_uvhub_sockets(int nuvhubs,
|
|||
static int __init init_per_cpu(int nuvhubs, int base_part_pnode)
|
||||
{
|
||||
unsigned char *uvhub_mask;
|
||||
void *vp;
|
||||
struct uvhub_desc *uvhub_descs;
|
||||
|
||||
if (is_uv3_hub() || is_uv2_hub() || is_uv1_hub())
|
||||
timeout_us = calculate_destination_timeout();
|
||||
|
||||
vp = kmalloc_array(nuvhubs, sizeof(struct uvhub_desc), GFP_KERNEL);
|
||||
uvhub_descs = (struct uvhub_desc *)vp;
|
||||
memset(uvhub_descs, 0, nuvhubs * sizeof(struct uvhub_desc));
|
||||
uvhub_descs = kcalloc(nuvhubs, sizeof(struct uvhub_desc), GFP_KERNEL);
|
||||
uvhub_mask = kzalloc((nuvhubs+7)/8, GFP_KERNEL);
|
||||
|
||||
if (get_cpu_topology(base_part_pnode, uvhub_descs, uvhub_mask))
|
||||
|
|
|
@ -442,6 +442,11 @@ static inline int enable_kprobe(struct kprobe *kp)
|
|||
{
|
||||
return -ENOSYS;
|
||||
}
|
||||
|
||||
static inline bool within_kprobe_blacklist(unsigned long addr)
|
||||
{
|
||||
return true;
|
||||
}
|
||||
#endif /* CONFIG_KPROBES */
|
||||
static inline int disable_kretprobe(struct kretprobe *rp)
|
||||
{
|
||||
|
|
Loading…
Reference in New Issue