Merge branch 'for-linus' of git://one.firstfloor.org/home/andi/git/linux-2.6

* 'for-linus' of git://one.firstfloor.org/home/andi/git/linux-2.6:
  [PATCH] x86-64: Fix race in exit_idle
  [PATCH] x86-64: Fix vgetcpu when CONFIG_HOTPLUG_CPU is disabled
  [PATCH] x86: Add acpi_user_timer_override option for Asus boards
  [PATCH] x86-64: setup saved_max_pfn correctly (kdump)
  [PATCH] x86-64: Handle reserve_bootmem_generic beyond end_pfn
  [PATCH] x86-64: shorten the x86_64 boot setup GDT to what the comment says
  [PATCH] x86-64: Fix PTRACE_[SG]ET_THREAD_AREA regression with ia32 emulation.
  [PATCH] x86-64: Fix partial page check to ensure unusable memory is not being marked usable.
  Revert "[PATCH] MMCONFIG and new Intel motherboards"
This commit is contained in:
Linus Torvalds 2006-11-14 15:23:17 -08:00
commit f5ad1a785f
17 changed files with 86 additions and 75 deletions

View File

@ -164,6 +164,10 @@ and is between 256 and 4096 characters. It is defined in the file
acpi_skip_timer_override [HW,ACPI]
Recognize and ignore IRQ0/pin2 Interrupt Override.
For broken nForce2 BIOS resulting in XT-PIC timer.
acpi_use_timer_override [HW,ACPI}
Use timer override. For some broken Nvidia NF5 boards
that require a timer override, but don't have
HPET
acpi_dbg_layer= [HW,ACPI]
Format: <int>

View File

@ -82,6 +82,7 @@ EXPORT_SYMBOL(acpi_strict);
acpi_interrupt_flags acpi_sci_flags __initdata;
int acpi_sci_override_gsi __initdata;
int acpi_skip_timer_override __initdata;
int acpi_use_timer_override __initdata;
#ifdef CONFIG_X86_LOCAL_APIC
static u64 acpi_lapic_addr __initdata = APIC_DEFAULT_PHYS_BASE;
@ -1300,6 +1301,13 @@ static int __init parse_acpi_skip_timer_override(char *arg)
return 0;
}
early_param("acpi_skip_timer_override", parse_acpi_skip_timer_override);
static int __init parse_acpi_use_timer_override(char *arg)
{
acpi_use_timer_override = 1;
return 0;
}
early_param("acpi_use_timer_override", parse_acpi_use_timer_override);
#endif /* CONFIG_X86_IO_APIC */
static int __init setup_acpi_sci(char *s)

View File

@ -27,11 +27,17 @@ static int __init check_bridge(int vendor, int device)
#ifdef CONFIG_ACPI
/* According to Nvidia all timer overrides are bogus unless HPET
is enabled. */
if (vendor == PCI_VENDOR_ID_NVIDIA) {
if (!acpi_use_timer_override && vendor == PCI_VENDOR_ID_NVIDIA) {
nvidia_hpet_detected = 0;
acpi_table_parse(ACPI_HPET, nvidia_hpet_check);
if (nvidia_hpet_detected == 0) {
acpi_skip_timer_override = 1;
printk(KERN_INFO "Nvidia board "
"detected. Ignoring ACPI "
"timer override.\n");
printk(KERN_INFO "If you got timer trouble "
"try acpi_use_timer_override\n");
}
}
#endif

View File

@ -836,13 +836,12 @@ gdt:
.word 0x9200 # data read/write
.word 0x00CF # granularity = 4096, 386
# (+5th nibble of limit)
gdt_end:
idt_48:
.word 0 # idt limit = 0
.word 0, 0 # idt base = 0L
gdt_48:
.word 0x8000 # gdt limit=2048,
# 256 GDT entries
.word gdt_end-gdt-1 # gdt limit
.word 0, 0 # gdt base (filled in later)
# Include video setup & detection code

View File

@ -244,6 +244,8 @@ asmlinkage long sys32_ptrace(long request, u32 pid, u32 addr, u32 data)
case PTRACE_DETACH:
case PTRACE_SYSCALL:
case PTRACE_SETOPTIONS:
case PTRACE_SET_THREAD_AREA:
case PTRACE_GET_THREAD_AREA:
return sys_ptrace(request, pid, addr, data);
default:

View File

@ -278,7 +278,7 @@ e820_register_active_regions(int nid, unsigned long start_pfn,
>> PAGE_SHIFT;
/* Skip map entries smaller than a page */
if (ei_startpfn > ei_endpfn)
if (ei_startpfn >= ei_endpfn)
continue;
/* Check if end_pfn_map should be updated */
@ -594,7 +594,9 @@ static int __init parse_memmap_opt(char *p)
* size before original memory map is
* reset.
*/
e820_register_active_regions(0, 0, -1UL);
saved_max_pfn = e820_end_of_ram();
remove_all_active_ranges();
#endif
end_pfn_map = 0;
e820.nr_map = 0;

View File

@ -45,7 +45,13 @@ static void nvidia_bugs(void)
/*
* All timer overrides on Nvidia are
* wrong unless HPET is enabled.
* Unfortunately that's not true on many Asus boards.
* We don't know yet how to detect this automatically, but
* at least allow a command line override.
*/
if (acpi_use_timer_override)
return;
nvidia_hpet_detected = 0;
acpi_table_parse(ACPI_HPET, nvidia_hpet_check);
if (nvidia_hpet_detected == 0) {
@ -53,6 +59,8 @@ static void nvidia_bugs(void)
printk(KERN_INFO "Nvidia board "
"detected. Ignoring ACPI "
"timer override.\n");
printk(KERN_INFO "If you got timer trouble "
"try acpi_use_timer_override\n");
}
#endif
/* RED-PEN skip them on mptables too? */

View File

@ -88,9 +88,8 @@ void enter_idle(void)
static void __exit_idle(void)
{
if (read_pda(isidle) == 0)
if (test_and_clear_bit_pda(0, isidle) == 0)
return;
write_pda(isidle, 0);
atomic_notifier_call_chain(&idle_notifier, IDLE_END, NULL);
}

View File

@ -376,9 +376,8 @@ int smp_call_function_single (int cpu, void (*func) (void *info), void *info,
/* prevent preemption and reschedule on another processor */
int me = get_cpu();
if (cpu == me) {
WARN_ON(1);
put_cpu();
return -EBUSY;
return 0;
}
spin_lock_bh(&call_lock);
__smp_call_function_single(cpu, func, info, nonatomic, wait);

View File

@ -876,15 +876,6 @@ static struct irqaction irq0 = {
timer_interrupt, IRQF_DISABLED, CPU_MASK_NONE, "timer", NULL, NULL
};
static int __cpuinit
time_cpu_notifier(struct notifier_block *nb, unsigned long action, void *hcpu)
{
unsigned cpu = (unsigned long) hcpu;
if (action == CPU_ONLINE)
vsyscall_set_cpu(cpu);
return NOTIFY_DONE;
}
void __init time_init(void)
{
if (nohpet)
@ -925,8 +916,6 @@ void __init time_init(void)
vxtime.last_tsc = get_cycles_sync();
set_cyc2ns_scale(cpu_khz);
setup_irq(0, &irq0);
hotcpu_notifier(time_cpu_notifier, 0);
time_cpu_notifier(NULL, CPU_ONLINE, (void *)(long)smp_processor_id());
#ifndef CONFIG_SMP
time_init_gtod();

View File

@ -27,6 +27,9 @@
#include <linux/jiffies.h>
#include <linux/sysctl.h>
#include <linux/getcpu.h>
#include <linux/cpu.h>
#include <linux/smp.h>
#include <linux/notifier.h>
#include <asm/vsyscall.h>
#include <asm/pgtable.h>
@ -243,32 +246,17 @@ static ctl_table kernel_root_table2[] = {
#endif
static void __cpuinit write_rdtscp_cb(void *info)
{
write_rdtscp_aux((unsigned long)info);
}
void __cpuinit vsyscall_set_cpu(int cpu)
/* Assume __initcall executes before all user space. Hopefully kmod
doesn't violate that. We'll find out if it does. */
static void __cpuinit vsyscall_set_cpu(int cpu)
{
unsigned long *d;
unsigned long node = 0;
#ifdef CONFIG_NUMA
node = cpu_to_node[cpu];
#endif
if (cpu_has(&cpu_data[cpu], X86_FEATURE_RDTSCP)) {
void *info = (void *)((node << 12) | cpu);
/* Can happen on preemptive kernel */
if (get_cpu() == cpu)
write_rdtscp_cb(info);
#ifdef CONFIG_SMP
else {
/* the notifier is unfortunately not executed on the
target CPU */
smp_call_function_single(cpu,write_rdtscp_cb,info,0,1);
}
#endif
put_cpu();
}
if (cpu_has(&cpu_data[cpu], X86_FEATURE_RDTSCP))
write_rdtscp_aux((node << 12) | cpu);
/* Store cpu number in limit so that it can be loaded quickly
in user space in vgetcpu.
@ -280,6 +268,21 @@ void __cpuinit vsyscall_set_cpu(int cpu)
*d |= (node >> 4) << 48;
}
static void __cpuinit cpu_vsyscall_init(void *arg)
{
/* preemption should be already off */
vsyscall_set_cpu(raw_smp_processor_id());
}
static int __cpuinit
cpu_vsyscall_notifier(struct notifier_block *n, unsigned long action, void *arg)
{
long cpu = (long)arg;
if (action == CPU_ONLINE)
smp_call_function_single(cpu, cpu_vsyscall_init, NULL, 0, 1);
return NOTIFY_DONE;
}
static void __init map_vsyscall(void)
{
extern char __vsyscall_0;
@ -299,6 +302,8 @@ static int __init vsyscall_init(void)
#ifdef CONFIG_SYSCTL
register_sysctl_table(kernel_root_table2, 0);
#endif
on_each_cpu(cpu_vsyscall_init, NULL, 0, 1);
hotcpu_notifier(cpu_vsyscall_notifier, 0);
return 0;
}

View File

@ -655,9 +655,22 @@ void free_initrd_mem(unsigned long start, unsigned long end)
void __init reserve_bootmem_generic(unsigned long phys, unsigned len)
{
/* Should check here against the e820 map to avoid double free */
#ifdef CONFIG_NUMA
int nid = phys_to_nid(phys);
#endif
unsigned long pfn = phys >> PAGE_SHIFT;
if (pfn >= end_pfn) {
/* This can happen with kdump kernels when accessing firmware
tables. */
if (pfn < end_pfn_map)
return;
printk(KERN_ERR "reserve_bootmem: illegal reserve %lx %u\n",
phys, len);
return;
}
/* Should check here against the e820 map to avoid double free */
#ifdef CONFIG_NUMA
reserve_bootmem_node(NODE_DATA(nid), phys, len);
#else
reserve_bootmem(phys, len);

View File

@ -163,37 +163,6 @@ static __init void unreachable_devices(void)
}
}
static __init void pci_mmcfg_insert_resources(void)
{
#define PCI_MMCFG_RESOURCE_NAME_LEN 19
int i;
struct resource *res;
char *names;
unsigned num_buses;
res = kcalloc(PCI_MMCFG_RESOURCE_NAME_LEN + sizeof(*res),
pci_mmcfg_config_num, GFP_KERNEL);
if (!res) {
printk(KERN_ERR "PCI: Unable to allocate MMCONFIG resources\n");
return;
}
names = (void *)&res[pci_mmcfg_config_num];
for (i = 0; i < pci_mmcfg_config_num; i++, res++) {
num_buses = pci_mmcfg_config[i].end_bus_number -
pci_mmcfg_config[i].start_bus_number + 1;
res->name = names;
snprintf(names, PCI_MMCFG_RESOURCE_NAME_LEN, "PCI MMCONFIG %u",
pci_mmcfg_config[i].pci_segment_group_number);
res->start = pci_mmcfg_config[i].base_address;
res->end = res->start + (num_buses << 20) - 1;
res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
insert_resource(&iomem_resource, res);
names += PCI_MMCFG_RESOURCE_NAME_LEN;
}
}
void __init pci_mmcfg_init(int type)
{
int i;
@ -237,7 +206,6 @@ void __init pci_mmcfg_init(int type)
}
unreachable_devices();
pci_mmcfg_insert_resources();
raw_pci_ops = &pci_mmcfg;
pci_probe = (pci_probe & ~PCI_PROBE_MASK) | PCI_PROBE_MMCONF;

View File

@ -132,6 +132,7 @@ extern int acpi_gsi_to_irq(u32 gsi, unsigned int *irq);
#ifdef CONFIG_X86_IO_APIC
extern int acpi_skip_timer_override;
extern int acpi_use_timer_override;
#endif
static inline void acpi_noirq_set(void) { acpi_noirq = 1; }

View File

@ -163,6 +163,7 @@ extern u8 x86_acpiid_to_apicid[];
#define ARCH_HAS_POWER_INIT 1
extern int acpi_skip_timer_override;
extern int acpi_use_timer_override;
#endif /*__KERNEL__*/

View File

@ -109,6 +109,15 @@ extern struct x8664_pda _proxy_pda;
#define sub_pda(field,val) pda_to_op("sub",field,val)
#define or_pda(field,val) pda_to_op("or",field,val)
/* This is not atomic against other CPUs -- CPU preemption needs to be off */
#define test_and_clear_bit_pda(bit,field) ({ \
int old__; \
asm volatile("btr %2,%%gs:%c3\n\tsbbl %0,%0" \
: "=r" (old__), "+m" (_proxy_pda.field) \
: "dIr" (bit), "i" (pda_offset(field)) : "memory"); \
old__; \
})
#endif
#define PDA_STACKOFFSET (5*8)

View File

@ -59,8 +59,6 @@ extern seqlock_t xtime_lock;
extern int sysctl_vsyscall;
extern void vsyscall_set_cpu(int cpu);
#define ARCH_HAVE_XTIME_LOCK 1
#endif /* __KERNEL__ */