mirror of https://gitee.com/openkylin/linux.git
Merge branch 'release' of git://git.kernel.org/pub/scm/linux/kernel/git/aegl/linux-2.6
* 'release' of git://git.kernel.org/pub/scm/linux/kernel/git/aegl/linux-2.6: [IA64] optimize pagefaults a little [IA64] Fix section conflict of ia64_mlogbuf_finish [IA64] s/scalibility/scalability/ [IA64] kdump on INIT needs multi-nodes sync-up (v.2) [IA64] wire up {signal,timer,event}fd syscalls [IA64] spelling fixes: arch/ia64/
This commit is contained in:
commit
5b58e21a27
|
@ -791,7 +791,7 @@ static __init int setup_additional_cpus(char *s)
|
||||||
early_param("additional_cpus", setup_additional_cpus);
|
early_param("additional_cpus", setup_additional_cpus);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* cpu_possible_map should be static, it cannot change as cpu's
|
* cpu_possible_map should be static, it cannot change as CPUs
|
||||||
* are onlined, or offlined. The reason is per-cpu data-structures
|
* are onlined, or offlined. The reason is per-cpu data-structures
|
||||||
* are allocated by some modules at init time, and dont expect to
|
* are allocated by some modules at init time, and dont expect to
|
||||||
* do this dynamically on cpu arrival/departure.
|
* do this dynamically on cpu arrival/departure.
|
||||||
|
|
|
@ -156,24 +156,30 @@ kdump_init_notifier(struct notifier_block *self, unsigned long val, void *data)
|
||||||
if (!kdump_on_init)
|
if (!kdump_on_init)
|
||||||
return NOTIFY_DONE;
|
return NOTIFY_DONE;
|
||||||
|
|
||||||
if (val != DIE_INIT_MONARCH_ENTER &&
|
if (val != DIE_INIT_MONARCH_LEAVE &&
|
||||||
val != DIE_INIT_SLAVE_ENTER &&
|
val != DIE_INIT_SLAVE_LEAVE &&
|
||||||
|
val != DIE_INIT_MONARCH_PROCESS &&
|
||||||
val != DIE_MCA_RENDZVOUS_LEAVE &&
|
val != DIE_MCA_RENDZVOUS_LEAVE &&
|
||||||
val != DIE_MCA_MONARCH_LEAVE)
|
val != DIE_MCA_MONARCH_LEAVE)
|
||||||
return NOTIFY_DONE;
|
return NOTIFY_DONE;
|
||||||
|
|
||||||
nd = (struct ia64_mca_notify_die *)args->err;
|
nd = (struct ia64_mca_notify_die *)args->err;
|
||||||
/* Reason code 1 means machine check rendezous*/
|
/* Reason code 1 means machine check rendezvous*/
|
||||||
if ((val == DIE_INIT_MONARCH_ENTER || val == DIE_INIT_SLAVE_ENTER) &&
|
if ((val == DIE_INIT_MONARCH_LEAVE || val == DIE_INIT_SLAVE_LEAVE
|
||||||
nd->sos->rv_rc == 1)
|
|| val == DIE_INIT_MONARCH_PROCESS) && nd->sos->rv_rc == 1)
|
||||||
return NOTIFY_DONE;
|
return NOTIFY_DONE;
|
||||||
|
|
||||||
switch (val) {
|
switch (val) {
|
||||||
case DIE_INIT_MONARCH_ENTER:
|
case DIE_INIT_MONARCH_PROCESS:
|
||||||
|
atomic_set(&kdump_in_progress, 1);
|
||||||
|
*(nd->monarch_cpu) = -1;
|
||||||
|
break;
|
||||||
|
case DIE_INIT_MONARCH_LEAVE:
|
||||||
machine_kdump_on_init();
|
machine_kdump_on_init();
|
||||||
break;
|
break;
|
||||||
case DIE_INIT_SLAVE_ENTER:
|
case DIE_INIT_SLAVE_LEAVE:
|
||||||
unw_init_running(kdump_cpu_freeze, NULL);
|
if (atomic_read(&kdump_in_progress))
|
||||||
|
unw_init_running(kdump_cpu_freeze, NULL);
|
||||||
break;
|
break;
|
||||||
case DIE_MCA_RENDZVOUS_LEAVE:
|
case DIE_MCA_RENDZVOUS_LEAVE:
|
||||||
if (atomic_read(&kdump_in_progress))
|
if (atomic_read(&kdump_in_progress))
|
||||||
|
@ -215,8 +221,10 @@ static ctl_table sys_table[] = {
|
||||||
static int
|
static int
|
||||||
machine_crash_setup(void)
|
machine_crash_setup(void)
|
||||||
{
|
{
|
||||||
|
/* be notified before default_monarch_init_process */
|
||||||
static struct notifier_block kdump_init_notifier_nb = {
|
static struct notifier_block kdump_init_notifier_nb = {
|
||||||
.notifier_call = kdump_init_notifier,
|
.notifier_call = kdump_init_notifier,
|
||||||
|
.priority = 1,
|
||||||
};
|
};
|
||||||
int ret;
|
int ret;
|
||||||
if((ret = register_die_notifier(&kdump_init_notifier_nb)) != 0)
|
if((ret = register_die_notifier(&kdump_init_notifier_nb)) != 0)
|
||||||
|
|
|
@ -1585,5 +1585,8 @@ sys_call_table:
|
||||||
data8 sys_getcpu
|
data8 sys_getcpu
|
||||||
data8 sys_epoll_pwait // 1305
|
data8 sys_epoll_pwait // 1305
|
||||||
data8 sys_utimensat
|
data8 sys_utimensat
|
||||||
|
data8 sys_signalfd
|
||||||
|
data8 sys_timerfd
|
||||||
|
data8 sys_eventfd
|
||||||
|
|
||||||
.org sys_call_table + 8*NR_syscalls // guard against failures to increase NR_syscalls
|
.org sys_call_table + 8*NR_syscalls // guard against failures to increase NR_syscalls
|
||||||
|
|
|
@ -4,7 +4,7 @@
|
||||||
* Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar
|
* Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar
|
||||||
*
|
*
|
||||||
* This file contains the code used by various IRQ handling routines:
|
* This file contains the code used by various IRQ handling routines:
|
||||||
* asking for different IRQ's should be done through these routines
|
* asking for different IRQs should be done through these routines
|
||||||
* instead of just grabbing them. Thus setups with different IRQ numbers
|
* instead of just grabbing them. Thus setups with different IRQ numbers
|
||||||
* shouldn't result in any weird surprises, and installing new handlers
|
* shouldn't result in any weird surprises, and installing new handlers
|
||||||
* should be easier.
|
* should be easier.
|
||||||
|
@ -12,7 +12,7 @@
|
||||||
* Copyright (C) Ashok Raj<ashok.raj@intel.com>, Intel Corporation 2004
|
* Copyright (C) Ashok Raj<ashok.raj@intel.com>, Intel Corporation 2004
|
||||||
*
|
*
|
||||||
* 4/14/2004: Added code to handle cpu migration and do safe irq
|
* 4/14/2004: Added code to handle cpu migration and do safe irq
|
||||||
* migration without lossing interrupts for iosapic
|
* migration without losing interrupts for iosapic
|
||||||
* architecture.
|
* architecture.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
@ -190,7 +190,7 @@ void fixup_irqs(void)
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Phase 1: Locate irq's bound to this cpu and
|
* Phase 1: Locate IRQs bound to this cpu and
|
||||||
* relocate them for cpu removal.
|
* relocate them for cpu removal.
|
||||||
*/
|
*/
|
||||||
migrate_irqs();
|
migrate_irqs();
|
||||||
|
|
|
@ -23,7 +23,7 @@ lsapic_noop_startup (unsigned int irq)
|
||||||
static void
|
static void
|
||||||
lsapic_noop (unsigned int irq)
|
lsapic_noop (unsigned int irq)
|
||||||
{
|
{
|
||||||
/* nuthing to do... */
|
/* nothing to do... */
|
||||||
}
|
}
|
||||||
|
|
||||||
static int lsapic_retrigger(unsigned int irq)
|
static int lsapic_retrigger(unsigned int irq)
|
||||||
|
|
|
@ -151,12 +151,12 @@ static uint __kprobes is_cmp_ctype_unc_inst(uint template, uint slot,
|
||||||
|
|
||||||
cmp_inst.l = kprobe_inst;
|
cmp_inst.l = kprobe_inst;
|
||||||
if ((cmp_inst.f.x2 == 0) || (cmp_inst.f.x2 == 1)) {
|
if ((cmp_inst.f.x2 == 0) || (cmp_inst.f.x2 == 1)) {
|
||||||
/* Integere compare - Register Register (A6 type)*/
|
/* Integer compare - Register Register (A6 type)*/
|
||||||
if ((cmp_inst.f.tb == 0) && (cmp_inst.f.ta == 0)
|
if ((cmp_inst.f.tb == 0) && (cmp_inst.f.ta == 0)
|
||||||
&&(cmp_inst.f.c == 1))
|
&&(cmp_inst.f.c == 1))
|
||||||
ctype_unc = 1;
|
ctype_unc = 1;
|
||||||
} else if ((cmp_inst.f.x2 == 2)||(cmp_inst.f.x2 == 3)) {
|
} else if ((cmp_inst.f.x2 == 2)||(cmp_inst.f.x2 == 3)) {
|
||||||
/* Integere compare - Immediate Register (A8 type)*/
|
/* Integer compare - Immediate Register (A8 type)*/
|
||||||
if ((cmp_inst.f.ta == 0) &&(cmp_inst.f.c == 1))
|
if ((cmp_inst.f.ta == 0) &&(cmp_inst.f.c == 1))
|
||||||
ctype_unc = 1;
|
ctype_unc = 1;
|
||||||
}
|
}
|
||||||
|
@ -820,7 +820,7 @@ static int __kprobes post_kprobes_handler(struct pt_regs *regs)
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int __kprobes kprobes_fault_handler(struct pt_regs *regs, int trapnr)
|
int __kprobes kprobes_fault_handler(struct pt_regs *regs, int trapnr)
|
||||||
{
|
{
|
||||||
struct kprobe *cur = kprobe_running();
|
struct kprobe *cur = kprobe_running();
|
||||||
struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
|
struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
|
||||||
|
@ -904,13 +904,6 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
|
||||||
if (post_kprobes_handler(args->regs))
|
if (post_kprobes_handler(args->regs))
|
||||||
ret = NOTIFY_STOP;
|
ret = NOTIFY_STOP;
|
||||||
break;
|
break;
|
||||||
case DIE_PAGE_FAULT:
|
|
||||||
/* kprobe_running() needs smp_processor_id() */
|
|
||||||
preempt_disable();
|
|
||||||
if (kprobe_running() &&
|
|
||||||
kprobes_fault_handler(args->regs, args->trapnr))
|
|
||||||
ret = NOTIFY_STOP;
|
|
||||||
preempt_enable();
|
|
||||||
default:
|
default:
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
@ -954,7 +947,7 @@ int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
|
||||||
/*
|
/*
|
||||||
* Callee owns the argument space and could overwrite it, eg
|
* Callee owns the argument space and could overwrite it, eg
|
||||||
* tail call optimization. So to be absolutely safe
|
* tail call optimization. So to be absolutely safe
|
||||||
* we save the argument space before transfering the control
|
* we save the argument space before transferring the control
|
||||||
* to instrumented jprobe function which runs in
|
* to instrumented jprobe function which runs in
|
||||||
* the process context
|
* the process context
|
||||||
*/
|
*/
|
||||||
|
|
|
@ -273,7 +273,6 @@ static void ia64_mlogbuf_finish(int wait)
|
||||||
|
|
||||||
mlogbuf_finished = 1;
|
mlogbuf_finished = 1;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(ia64_mlogbuf_finish);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Print buffered messages from INIT context.
|
* Print buffered messages from INIT context.
|
||||||
|
@ -1477,6 +1476,10 @@ default_monarch_init_process(struct notifier_block *self, unsigned long val, voi
|
||||||
struct task_struct *g, *t;
|
struct task_struct *g, *t;
|
||||||
if (val != DIE_INIT_MONARCH_PROCESS)
|
if (val != DIE_INIT_MONARCH_PROCESS)
|
||||||
return NOTIFY_DONE;
|
return NOTIFY_DONE;
|
||||||
|
#ifdef CONFIG_KEXEC
|
||||||
|
if (atomic_read(&kdump_in_progress))
|
||||||
|
return NOTIFY_DONE;
|
||||||
|
#endif
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* FIXME: mlogbuf will brim over with INIT stack dumps.
|
* FIXME: mlogbuf will brim over with INIT stack dumps.
|
||||||
|
|
|
@ -438,7 +438,7 @@ is_mca_global(peidx_table_t *peidx, pal_bus_check_info_t *pbci,
|
||||||
* @peidx: pointer of index of processor error section
|
* @peidx: pointer of index of processor error section
|
||||||
*
|
*
|
||||||
* Return value:
|
* Return value:
|
||||||
* target address on Success / 0 on Failue
|
* target address on Success / 0 on Failure
|
||||||
*/
|
*/
|
||||||
static u64
|
static u64
|
||||||
get_target_identifier(peidx_table_t *peidx)
|
get_target_identifier(peidx_table_t *peidx)
|
||||||
|
@ -701,7 +701,7 @@ recover_from_processor_error(int platform, slidx_table_t *slidx,
|
||||||
return fatal_mca("External bus check fatal status");
|
return fatal_mca("External bus check fatal status");
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* This is a local MCA and estimated as a recoverble error.
|
* This is a local MCA and estimated as a recoverable error.
|
||||||
*/
|
*/
|
||||||
if (platform)
|
if (platform)
|
||||||
return recover_from_platform_error(slidx, peidx, pbci, sos);
|
return recover_from_platform_error(slidx, peidx, pbci, sos);
|
||||||
|
|
|
@ -861,7 +861,7 @@ apply_relocate (Elf64_Shdr *sechdrs, const char *strtab, unsigned int symindex,
|
||||||
/*
|
/*
|
||||||
* Modules contain a single unwind table which covers both the core and the init text
|
* Modules contain a single unwind table which covers both the core and the init text
|
||||||
* sections but since the two are not contiguous, we need to split this table up such that
|
* sections but since the two are not contiguous, we need to split this table up such that
|
||||||
* we can register (and unregister) each "segment" seperately. Fortunately, this sounds
|
* we can register (and unregister) each "segment" separately. Fortunately, this sounds
|
||||||
* more complicated than it really is.
|
* more complicated than it really is.
|
||||||
*/
|
*/
|
||||||
static void
|
static void
|
||||||
|
|
|
@ -1318,7 +1318,7 @@ pfm_reserve_session(struct task_struct *task, int is_syswide, unsigned int cpu)
|
||||||
{
|
{
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
/*
|
/*
|
||||||
* validy checks on cpu_mask have been done upstream
|
* validity checks on cpu_mask have been done upstream
|
||||||
*/
|
*/
|
||||||
LOCK_PFS(flags);
|
LOCK_PFS(flags);
|
||||||
|
|
||||||
|
@ -1384,7 +1384,7 @@ pfm_unreserve_session(pfm_context_t *ctx, int is_syswide, unsigned int cpu)
|
||||||
{
|
{
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
/*
|
/*
|
||||||
* validy checks on cpu_mask have been done upstream
|
* validity checks on cpu_mask have been done upstream
|
||||||
*/
|
*/
|
||||||
LOCK_PFS(flags);
|
LOCK_PFS(flags);
|
||||||
|
|
||||||
|
@ -1835,7 +1835,7 @@ pfm_flush(struct file *filp, fl_owner_t id)
|
||||||
/*
|
/*
|
||||||
* remove our file from the async queue, if we use this mode.
|
* remove our file from the async queue, if we use this mode.
|
||||||
* This can be done without the context being protected. We come
|
* This can be done without the context being protected. We come
|
||||||
* here when the context has become unreacheable by other tasks.
|
* here when the context has become unreachable by other tasks.
|
||||||
*
|
*
|
||||||
* We may still have active monitoring at this point and we may
|
* We may still have active monitoring at this point and we may
|
||||||
* end up in pfm_overflow_handler(). However, fasync_helper()
|
* end up in pfm_overflow_handler(). However, fasync_helper()
|
||||||
|
@ -2132,7 +2132,7 @@ pfm_close(struct inode *inode, struct file *filp)
|
||||||
filp->private_data = NULL;
|
filp->private_data = NULL;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* if we free on the spot, the context is now completely unreacheable
|
* if we free on the spot, the context is now completely unreachable
|
||||||
* from the callers side. The monitored task side is also cut, so we
|
* from the callers side. The monitored task side is also cut, so we
|
||||||
* can freely cut.
|
* can freely cut.
|
||||||
*
|
*
|
||||||
|
@ -2562,7 +2562,7 @@ pfm_reset_pmu_state(pfm_context_t *ctx)
|
||||||
ctx->ctx_all_pmcs[0] = pmu_conf->impl_pmcs[0] & ~0x1;
|
ctx->ctx_all_pmcs[0] = pmu_conf->impl_pmcs[0] & ~0x1;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* bitmask of all PMDs that are accesible to this context
|
* bitmask of all PMDs that are accessible to this context
|
||||||
*/
|
*/
|
||||||
ctx->ctx_all_pmds[0] = pmu_conf->impl_pmds[0];
|
ctx->ctx_all_pmds[0] = pmu_conf->impl_pmds[0];
|
||||||
|
|
||||||
|
@ -3395,7 +3395,7 @@ pfm_read_pmds(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
|
||||||
if (unlikely(!PMD_IS_IMPL(cnum))) goto error;
|
if (unlikely(!PMD_IS_IMPL(cnum))) goto error;
|
||||||
/*
|
/*
|
||||||
* we can only read the register that we use. That includes
|
* we can only read the register that we use. That includes
|
||||||
* the one we explicitely initialize AND the one we want included
|
* the one we explicitly initialize AND the one we want included
|
||||||
* in the sampling buffer (smpl_regs).
|
* in the sampling buffer (smpl_regs).
|
||||||
*
|
*
|
||||||
* Having this restriction allows optimization in the ctxsw routine
|
* Having this restriction allows optimization in the ctxsw routine
|
||||||
|
@ -3715,7 +3715,7 @@ pfm_restart(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
|
||||||
* if non-blocking, then we ensure that the task will go into
|
* if non-blocking, then we ensure that the task will go into
|
||||||
* pfm_handle_work() before returning to user mode.
|
* pfm_handle_work() before returning to user mode.
|
||||||
*
|
*
|
||||||
* We cannot explicitely reset another task, it MUST always
|
* We cannot explicitly reset another task, it MUST always
|
||||||
* be done by the task itself. This works for system wide because
|
* be done by the task itself. This works for system wide because
|
||||||
* the tool that is controlling the session is logically doing
|
* the tool that is controlling the session is logically doing
|
||||||
* "self-monitoring".
|
* "self-monitoring".
|
||||||
|
@ -4644,7 +4644,7 @@ pfm_exit_thread(struct task_struct *task)
|
||||||
switch(state) {
|
switch(state) {
|
||||||
case PFM_CTX_UNLOADED:
|
case PFM_CTX_UNLOADED:
|
||||||
/*
|
/*
|
||||||
* only comes to thios function if pfm_context is not NULL, i.e., cannot
|
* only comes to this function if pfm_context is not NULL, i.e., cannot
|
||||||
* be in unloaded state
|
* be in unloaded state
|
||||||
*/
|
*/
|
||||||
printk(KERN_ERR "perfmon: pfm_exit_thread [%d] ctx unloaded\n", task->pid);
|
printk(KERN_ERR "perfmon: pfm_exit_thread [%d] ctx unloaded\n", task->pid);
|
||||||
|
@ -5247,7 +5247,7 @@ pfm_end_notify_user(pfm_context_t *ctx)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* main overflow processing routine.
|
* main overflow processing routine.
|
||||||
* it can be called from the interrupt path or explicitely during the context switch code
|
* it can be called from the interrupt path or explicitly during the context switch code
|
||||||
*/
|
*/
|
||||||
static void
|
static void
|
||||||
pfm_overflow_handler(struct task_struct *task, pfm_context_t *ctx, u64 pmc0, struct pt_regs *regs)
|
pfm_overflow_handler(struct task_struct *task, pfm_context_t *ctx, u64 pmc0, struct pt_regs *regs)
|
||||||
|
|
|
@ -181,7 +181,7 @@ static pmu_config_t pmu_conf_mck={
|
||||||
.pmc_desc = pfm_mck_pmc_desc,
|
.pmc_desc = pfm_mck_pmc_desc,
|
||||||
.num_ibrs = 8,
|
.num_ibrs = 8,
|
||||||
.num_dbrs = 8,
|
.num_dbrs = 8,
|
||||||
.use_rr_dbregs = 1 /* debug register are use for range retrictions */
|
.use_rr_dbregs = 1 /* debug register are use for range restrictions */
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -134,7 +134,7 @@ set_smp_redirect (int flag)
|
||||||
* interrupt redirection. The reason is this would require that
|
* interrupt redirection. The reason is this would require that
|
||||||
* All interrupts be stopped and hard bind the irq to a cpu.
|
* All interrupts be stopped and hard bind the irq to a cpu.
|
||||||
* Later when the interrupt is fired we need to set the redir hint
|
* Later when the interrupt is fired we need to set the redir hint
|
||||||
* on again in the vector. This is combersome for something that the
|
* on again in the vector. This is cumbersome for something that the
|
||||||
* user mode irq balancer will solve anyways.
|
* user mode irq balancer will solve anyways.
|
||||||
*/
|
*/
|
||||||
no_int_routing=1;
|
no_int_routing=1;
|
||||||
|
|
|
@ -162,7 +162,7 @@ static DEFINE_SPINLOCK(data_saved_lock);
|
||||||
/** salinfo_platform_oemdata - optional callback to decode oemdata from an error
|
/** salinfo_platform_oemdata - optional callback to decode oemdata from an error
|
||||||
* record.
|
* record.
|
||||||
* @sect_header: pointer to the start of the section to decode.
|
* @sect_header: pointer to the start of the section to decode.
|
||||||
* @oemdata: returns vmalloc area containing the decded output.
|
* @oemdata: returns vmalloc area containing the decoded output.
|
||||||
* @oemdata_size: returns length of decoded output (strlen).
|
* @oemdata_size: returns length of decoded output (strlen).
|
||||||
*
|
*
|
||||||
* Description: If user space asks for oem data to be decoded by the kernel
|
* Description: If user space asks for oem data to be decoded by the kernel
|
||||||
|
|
|
@ -576,7 +576,7 @@ setup_arch (char **cmdline_p)
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Display cpu info for all cpu's.
|
* Display cpu info for all CPUs.
|
||||||
*/
|
*/
|
||||||
static int
|
static int
|
||||||
show_cpuinfo (struct seq_file *m, void *v)
|
show_cpuinfo (struct seq_file *m, void *v)
|
||||||
|
@ -761,7 +761,7 @@ identify_cpu (struct cpuinfo_ia64 *c)
|
||||||
c->cpu = smp_processor_id();
|
c->cpu = smp_processor_id();
|
||||||
|
|
||||||
/* below default values will be overwritten by identify_siblings()
|
/* below default values will be overwritten by identify_siblings()
|
||||||
* for Multi-Threading/Multi-Core capable cpu's
|
* for Multi-Threading/Multi-Core capable CPUs
|
||||||
*/
|
*/
|
||||||
c->threads_per_core = c->cores_per_socket = c->num_log = 1;
|
c->threads_per_core = c->cores_per_socket = c->num_log = 1;
|
||||||
c->socket_id = -1;
|
c->socket_id = -1;
|
||||||
|
@ -947,7 +947,7 @@ cpu_init (void)
|
||||||
ia32_cpu_init();
|
ia32_cpu_init();
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/* Clear ITC to eliminiate sched_clock() overflows in human time. */
|
/* Clear ITC to eliminate sched_clock() overflows in human time. */
|
||||||
ia64_set_itc(0);
|
ia64_set_itc(0);
|
||||||
|
|
||||||
/* disable all local interrupt sources: */
|
/* disable all local interrupt sources: */
|
||||||
|
|
|
@ -186,7 +186,7 @@ handle_IPI (int irq, void *dev_id)
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Called with preeemption disabled.
|
* Called with preemption disabled.
|
||||||
*/
|
*/
|
||||||
static inline void
|
static inline void
|
||||||
send_IPI_single (int dest_cpu, int op)
|
send_IPI_single (int dest_cpu, int op)
|
||||||
|
@ -196,7 +196,7 @@ send_IPI_single (int dest_cpu, int op)
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Called with preeemption disabled.
|
* Called with preemption disabled.
|
||||||
*/
|
*/
|
||||||
static inline void
|
static inline void
|
||||||
send_IPI_allbutself (int op)
|
send_IPI_allbutself (int op)
|
||||||
|
@ -210,7 +210,7 @@ send_IPI_allbutself (int op)
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Called with preeemption disabled.
|
* Called with preemption disabled.
|
||||||
*/
|
*/
|
||||||
static inline void
|
static inline void
|
||||||
send_IPI_all (int op)
|
send_IPI_all (int op)
|
||||||
|
@ -223,7 +223,7 @@ send_IPI_all (int op)
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Called with preeemption disabled.
|
* Called with preemption disabled.
|
||||||
*/
|
*/
|
||||||
static inline void
|
static inline void
|
||||||
send_IPI_self (int op)
|
send_IPI_self (int op)
|
||||||
|
@ -252,7 +252,7 @@ kdump_smp_send_init(void)
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
/*
|
/*
|
||||||
* Called with preeemption disabled.
|
* Called with preemption disabled.
|
||||||
*/
|
*/
|
||||||
void
|
void
|
||||||
smp_send_reschedule (int cpu)
|
smp_send_reschedule (int cpu)
|
||||||
|
@ -261,7 +261,7 @@ smp_send_reschedule (int cpu)
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Called with preeemption disabled.
|
* Called with preemption disabled.
|
||||||
*/
|
*/
|
||||||
static void
|
static void
|
||||||
smp_send_local_flush_tlb (int cpu)
|
smp_send_local_flush_tlb (int cpu)
|
||||||
|
|
|
@ -694,7 +694,7 @@ int migrate_platform_irqs(unsigned int cpu)
|
||||||
set_cpei_target_cpu(new_cpei_cpu);
|
set_cpei_target_cpu(new_cpei_cpu);
|
||||||
desc = irq_desc + ia64_cpe_irq;
|
desc = irq_desc + ia64_cpe_irq;
|
||||||
/*
|
/*
|
||||||
* Switch for now, immediatly, we need to do fake intr
|
* Switch for now, immediately, we need to do fake intr
|
||||||
* as other interrupts, but need to study CPEI behaviour with
|
* as other interrupts, but need to study CPEI behaviour with
|
||||||
* polling before making changes.
|
* polling before making changes.
|
||||||
*/
|
*/
|
||||||
|
@ -840,7 +840,7 @@ __cpu_up (unsigned int cpu)
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Assume that CPU's have been discovered by some platform-dependent interface. For
|
* Assume that CPUs have been discovered by some platform-dependent interface. For
|
||||||
* SoftSDV/Lion, that would be ACPI.
|
* SoftSDV/Lion, that would be ACPI.
|
||||||
*
|
*
|
||||||
* Setup of the IPI irq handler is done in irq.c:init_IRQ_SMP().
|
* Setup of the IPI irq handler is done in irq.c:init_IRQ_SMP().
|
||||||
|
@ -854,7 +854,7 @@ init_smp_config(void)
|
||||||
} *ap_startup;
|
} *ap_startup;
|
||||||
long sal_ret;
|
long sal_ret;
|
||||||
|
|
||||||
/* Tell SAL where to drop the AP's. */
|
/* Tell SAL where to drop the APs. */
|
||||||
ap_startup = (struct fptr *) start_ap;
|
ap_startup = (struct fptr *) start_ap;
|
||||||
sal_ret = ia64_sal_set_vectors(SAL_VECTOR_OS_BOOT_RENDEZ,
|
sal_ret = ia64_sal_set_vectors(SAL_VECTOR_OS_BOOT_RENDEZ,
|
||||||
ia64_tpa(ap_startup->fp), ia64_tpa(ap_startup->gp), 0, 0, 0, 0);
|
ia64_tpa(ap_startup->fp), ia64_tpa(ap_startup->gp), 0, 0, 0, 0);
|
||||||
|
|
|
@ -304,7 +304,7 @@ handle_fpu_swa (int fp_fault, struct pt_regs *regs, unsigned long isr)
|
||||||
* Lower 4 bits are used as a count. Upper bits are a sequence
|
* Lower 4 bits are used as a count. Upper bits are a sequence
|
||||||
* number that is updated when count is reset. The cmpxchg will
|
* number that is updated when count is reset. The cmpxchg will
|
||||||
* fail is seqno has changed. This minimizes mutiple cpus
|
* fail is seqno has changed. This minimizes mutiple cpus
|
||||||
* reseting the count.
|
* resetting the count.
|
||||||
*/
|
*/
|
||||||
if (current_jiffies > last.time)
|
if (current_jiffies > last.time)
|
||||||
(void) cmpxchg_acq(&last.count, count, 16 + (count & ~15));
|
(void) cmpxchg_acq(&last.count, count, 16 + (count & ~15));
|
||||||
|
|
|
@ -2,7 +2,7 @@
|
||||||
* Copyright (C) 1999-2004 Hewlett-Packard Co
|
* Copyright (C) 1999-2004 Hewlett-Packard Co
|
||||||
* David Mosberger-Tang <davidm@hpl.hp.com>
|
* David Mosberger-Tang <davidm@hpl.hp.com>
|
||||||
* Copyright (C) 2003 Fenghua Yu <fenghua.yu@intel.com>
|
* Copyright (C) 2003 Fenghua Yu <fenghua.yu@intel.com>
|
||||||
* - Change pt_regs_off() to make it less dependant on pt_regs structure.
|
* - Change pt_regs_off() to make it less dependent on pt_regs structure.
|
||||||
*/
|
*/
|
||||||
/*
|
/*
|
||||||
* This file implements call frame unwind support for the Linux
|
* This file implements call frame unwind support for the Linux
|
||||||
|
|
|
@ -317,7 +317,7 @@ static void __meminit scatter_node_data(void)
|
||||||
* node_online_map is not set for hot-added nodes at this time,
|
* node_online_map is not set for hot-added nodes at this time,
|
||||||
* because we are halfway through initialization of the new node's
|
* because we are halfway through initialization of the new node's
|
||||||
* structures. If for_each_online_node() is used, a new node's
|
* structures. If for_each_online_node() is used, a new node's
|
||||||
* pg_data_ptrs will be not initialized. Insted of using it,
|
* pg_data_ptrs will be not initialized. Instead of using it,
|
||||||
* pgdat_list[] is checked.
|
* pgdat_list[] is checked.
|
||||||
*/
|
*/
|
||||||
for_each_node(node) {
|
for_each_node(node) {
|
||||||
|
|
|
@ -19,36 +19,24 @@
|
||||||
extern void die (char *, struct pt_regs *, long);
|
extern void die (char *, struct pt_regs *, long);
|
||||||
|
|
||||||
#ifdef CONFIG_KPROBES
|
#ifdef CONFIG_KPROBES
|
||||||
ATOMIC_NOTIFIER_HEAD(notify_page_fault_chain);
|
static inline int notify_page_fault(struct pt_regs *regs, int trap)
|
||||||
|
|
||||||
/* Hook to register for page fault notifications */
|
|
||||||
int register_page_fault_notifier(struct notifier_block *nb)
|
|
||||||
{
|
{
|
||||||
return atomic_notifier_chain_register(¬ify_page_fault_chain, nb);
|
int ret = 0;
|
||||||
}
|
|
||||||
|
|
||||||
int unregister_page_fault_notifier(struct notifier_block *nb)
|
if (!user_mode(regs)) {
|
||||||
{
|
/* kprobe_running() needs smp_processor_id() */
|
||||||
return atomic_notifier_chain_unregister(¬ify_page_fault_chain, nb);
|
preempt_disable();
|
||||||
}
|
if (kprobe_running() && kprobes_fault_handler(regs, trap))
|
||||||
|
ret = 1;
|
||||||
|
preempt_enable();
|
||||||
|
}
|
||||||
|
|
||||||
static inline int notify_page_fault(enum die_val val, const char *str,
|
return ret;
|
||||||
struct pt_regs *regs, long err, int trap, int sig)
|
|
||||||
{
|
|
||||||
struct die_args args = {
|
|
||||||
.regs = regs,
|
|
||||||
.str = str,
|
|
||||||
.err = err,
|
|
||||||
.trapnr = trap,
|
|
||||||
.signr = sig
|
|
||||||
};
|
|
||||||
return atomic_notifier_call_chain(¬ify_page_fault_chain, val, &args);
|
|
||||||
}
|
}
|
||||||
#else
|
#else
|
||||||
static inline int notify_page_fault(enum die_val val, const char *str,
|
static inline int notify_page_fault(struct pt_regs *regs, int trap)
|
||||||
struct pt_regs *regs, long err, int trap, int sig)
|
|
||||||
{
|
{
|
||||||
return NOTIFY_DONE;
|
return 0;
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
@ -117,8 +105,7 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re
|
||||||
/*
|
/*
|
||||||
* This is to handle the kprobes on user space access instructions
|
* This is to handle the kprobes on user space access instructions
|
||||||
*/
|
*/
|
||||||
if (notify_page_fault(DIE_PAGE_FAULT, "page fault", regs, code, TRAP_BRKPT,
|
if (notify_page_fault(regs, TRAP_BRKPT))
|
||||||
SIGSEGV) == NOTIFY_STOP)
|
|
||||||
return;
|
return;
|
||||||
|
|
||||||
down_read(&mm->mmap_sem);
|
down_read(&mm->mmap_sem);
|
||||||
|
|
|
@ -63,7 +63,7 @@ static inline void bte_start_transfer(struct bteinfo_s *bte, u64 len, u64 mode)
|
||||||
* Use the block transfer engine to move kernel memory from src to dest
|
* Use the block transfer engine to move kernel memory from src to dest
|
||||||
* using the assigned mode.
|
* using the assigned mode.
|
||||||
*
|
*
|
||||||
* Paramaters:
|
* Parameters:
|
||||||
* src - physical address of the transfer source.
|
* src - physical address of the transfer source.
|
||||||
* dest - physical address of the transfer destination.
|
* dest - physical address of the transfer destination.
|
||||||
* len - number of bytes to transfer from source to dest.
|
* len - number of bytes to transfer from source to dest.
|
||||||
|
@ -247,7 +247,7 @@ EXPORT_SYMBOL(bte_copy);
|
||||||
* use the block transfer engine to move kernel
|
* use the block transfer engine to move kernel
|
||||||
* memory from src to dest using the assigned mode.
|
* memory from src to dest using the assigned mode.
|
||||||
*
|
*
|
||||||
* Paramaters:
|
* Parameters:
|
||||||
* src - physical address of the transfer source.
|
* src - physical address of the transfer source.
|
||||||
* dest - physical address of the transfer destination.
|
* dest - physical address of the transfer destination.
|
||||||
* len - number of bytes to transfer from source to dest.
|
* len - number of bytes to transfer from source to dest.
|
||||||
|
@ -255,7 +255,7 @@ EXPORT_SYMBOL(bte_copy);
|
||||||
* for IBCT0/1 in the SGI documentation.
|
* for IBCT0/1 in the SGI documentation.
|
||||||
*
|
*
|
||||||
* NOTE: If the source, dest, and len are all cache line aligned,
|
* NOTE: If the source, dest, and len are all cache line aligned,
|
||||||
* then it would be _FAR_ preferrable to use bte_copy instead.
|
* then it would be _FAR_ preferable to use bte_copy instead.
|
||||||
*/
|
*/
|
||||||
bte_result_t bte_unaligned_copy(u64 src, u64 dest, u64 len, u64 mode)
|
bte_result_t bte_unaligned_copy(u64 src, u64 dest, u64 len, u64 mode)
|
||||||
{
|
{
|
||||||
|
@ -300,7 +300,7 @@ bte_result_t bte_unaligned_copy(u64 src, u64 dest, u64 len, u64 mode)
|
||||||
* a standard bte copy.
|
* a standard bte copy.
|
||||||
*
|
*
|
||||||
* One nasty exception to the above rule is when the
|
* One nasty exception to the above rule is when the
|
||||||
* source and destination are not symetrically
|
* source and destination are not symmetrically
|
||||||
* mis-aligned. If the source offset from the first
|
* mis-aligned. If the source offset from the first
|
||||||
* cache line is different from the destination offset,
|
* cache line is different from the destination offset,
|
||||||
* we make the first section be the entire transfer
|
* we make the first section be the entire transfer
|
||||||
|
@ -337,7 +337,7 @@ bte_result_t bte_unaligned_copy(u64 src, u64 dest, u64 len, u64 mode)
|
||||||
|
|
||||||
if (footBcopyDest == (headBcopyDest + headBcopyLen)) {
|
if (footBcopyDest == (headBcopyDest + headBcopyLen)) {
|
||||||
/*
|
/*
|
||||||
* We have two contigous bcopy
|
* We have two contiguous bcopy
|
||||||
* blocks. Merge them.
|
* blocks. Merge them.
|
||||||
*/
|
*/
|
||||||
headBcopyLen += footBcopyLen;
|
headBcopyLen += footBcopyLen;
|
||||||
|
@ -375,7 +375,7 @@ bte_result_t bte_unaligned_copy(u64 src, u64 dest, u64 len, u64 mode)
|
||||||
} else {
|
} else {
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* The transfer is not symetric, we will
|
* The transfer is not symmetric, we will
|
||||||
* allocate a buffer large enough for all the
|
* allocate a buffer large enough for all the
|
||||||
* data, bte_copy into that buffer and then
|
* data, bte_copy into that buffer and then
|
||||||
* bcopy to the destination.
|
* bcopy to the destination.
|
||||||
|
|
|
@ -105,7 +105,7 @@ int shub1_bte_error_handler(unsigned long _nodepda)
|
||||||
}
|
}
|
||||||
|
|
||||||
BTE_PRINTK(("eh:%p:%d Cleaning up\n", err_nodepda, smp_processor_id()));
|
BTE_PRINTK(("eh:%p:%d Cleaning up\n", err_nodepda, smp_processor_id()));
|
||||||
/* Reenable both bte interfaces */
|
/* Re-enable both bte interfaces */
|
||||||
imem.ii_imem_regval = REMOTE_HUB_L(nasid, IIO_IMEM);
|
imem.ii_imem_regval = REMOTE_HUB_L(nasid, IIO_IMEM);
|
||||||
imem.ii_imem_fld_s.i_b0_esd = imem.ii_imem_fld_s.i_b1_esd = 1;
|
imem.ii_imem_fld_s.i_b0_esd = imem.ii_imem_fld_s.i_b1_esd = 1;
|
||||||
REMOTE_HUB_S(nasid, IIO_IMEM, imem.ii_imem_regval);
|
REMOTE_HUB_S(nasid, IIO_IMEM, imem.ii_imem_regval);
|
||||||
|
@ -243,7 +243,7 @@ bte_crb_error_handler(cnodeid_t cnode, int btenum,
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* The caller has already figured out the error type, we save that
|
* The caller has already figured out the error type, we save that
|
||||||
* in the bte handle structure for the thread excercising the
|
* in the bte handle structure for the thread exercising the
|
||||||
* interface to consume.
|
* interface to consume.
|
||||||
*/
|
*/
|
||||||
bte->bh_error = ioe->ie_errortype + BTEFAIL_OFFSET;
|
bte->bh_error = ioe->ie_errortype + BTEFAIL_OFFSET;
|
||||||
|
|
|
@ -479,7 +479,7 @@ sn_io_early_init(void)
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* prime sn_pci_provider[]. Individial provider init routines will
|
* prime sn_pci_provider[]. Individual provider init routines will
|
||||||
* override their respective default entries.
|
* override their respective default entries.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
|
|
@ -167,7 +167,7 @@ void __init early_sn_setup(void)
|
||||||
* IO on SN2 is done via SAL calls, early_printk won't work without this.
|
* IO on SN2 is done via SAL calls, early_printk won't work without this.
|
||||||
*
|
*
|
||||||
* This code duplicates some of the ACPI table parsing that is in efi.c & sal.c.
|
* This code duplicates some of the ACPI table parsing that is in efi.c & sal.c.
|
||||||
* Any changes to those file may have to be made hereas well.
|
* Any changes to those file may have to be made here as well.
|
||||||
*/
|
*/
|
||||||
efi_systab = (efi_system_table_t *) __va(ia64_boot_param->efi_systab);
|
efi_systab = (efi_system_table_t *) __va(ia64_boot_param->efi_systab);
|
||||||
config_tables = __va(efi_systab->tables);
|
config_tables = __va(efi_systab->tables);
|
||||||
|
|
|
@ -104,7 +104,7 @@ static inline unsigned long wait_piowc(void)
|
||||||
*
|
*
|
||||||
* SN2 PIO writes from separate CPUs are not guaranteed to arrive in order.
|
* SN2 PIO writes from separate CPUs are not guaranteed to arrive in order.
|
||||||
* Context switching user threads which have memory-mapped MMIO may cause
|
* Context switching user threads which have memory-mapped MMIO may cause
|
||||||
* PIOs to issue from seperate CPUs, thus the PIO writes must be drained
|
* PIOs to issue from separate CPUs, thus the PIO writes must be drained
|
||||||
* from the previous CPU's Shub before execution resumes on the new CPU.
|
* from the previous CPU's Shub before execution resumes on the new CPU.
|
||||||
*/
|
*/
|
||||||
void sn_migrate(struct task_struct *task)
|
void sn_migrate(struct task_struct *task)
|
||||||
|
|
|
@ -293,7 +293,7 @@ xpc_pull_remote_cachelines(struct xpc_partition *part, void *dst,
|
||||||
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Pull the remote per partititon specific variables from the specified
|
* Pull the remote per partition specific variables from the specified
|
||||||
* partition.
|
* partition.
|
||||||
*/
|
*/
|
||||||
enum xpc_retval
|
enum xpc_retval
|
||||||
|
@ -461,7 +461,7 @@ xpc_allocate_local_msgqueue(struct xpc_channel *ch)
|
||||||
// >>> may want to check for ch->flags & XPC_C_DISCONNECTING between
|
// >>> may want to check for ch->flags & XPC_C_DISCONNECTING between
|
||||||
// >>> iterations of the for-loop, bail if set?
|
// >>> iterations of the for-loop, bail if set?
|
||||||
|
|
||||||
// >>> should we impose a minumum #of entries? like 4 or 8?
|
// >>> should we impose a minimum #of entries? like 4 or 8?
|
||||||
for (nentries = ch->local_nentries; nentries > 0; nentries--) {
|
for (nentries = ch->local_nentries; nentries > 0; nentries--) {
|
||||||
|
|
||||||
nbytes = nentries * ch->msg_size;
|
nbytes = nentries * ch->msg_size;
|
||||||
|
@ -514,7 +514,7 @@ xpc_allocate_remote_msgqueue(struct xpc_channel *ch)
|
||||||
// >>> may want to check for ch->flags & XPC_C_DISCONNECTING between
|
// >>> may want to check for ch->flags & XPC_C_DISCONNECTING between
|
||||||
// >>> iterations of the for-loop, bail if set?
|
// >>> iterations of the for-loop, bail if set?
|
||||||
|
|
||||||
// >>> should we impose a minumum #of entries? like 4 or 8?
|
// >>> should we impose a minimum #of entries? like 4 or 8?
|
||||||
for (nentries = ch->remote_nentries; nentries > 0; nentries--) {
|
for (nentries = ch->remote_nentries; nentries > 0; nentries--) {
|
||||||
|
|
||||||
nbytes = nentries * ch->msg_size;
|
nbytes = nentries * ch->msg_size;
|
||||||
|
@ -1478,7 +1478,7 @@ xpc_teardown_infrastructure(struct xpc_partition *part)
|
||||||
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Before proceding with the teardown we have to wait until all
|
* Before proceeding with the teardown we have to wait until all
|
||||||
* existing references cease.
|
* existing references cease.
|
||||||
*/
|
*/
|
||||||
wait_event(part->teardown_wq, (atomic_read(&part->references) == 0));
|
wait_event(part->teardown_wq, (atomic_read(&part->references) == 0));
|
||||||
|
|
|
@ -531,7 +531,7 @@ xpnet_dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
||||||
dev_dbg(xpnet, "destination Partitions mask (dp) = 0x%lx\n", dp);
|
dev_dbg(xpnet, "destination Partitions mask (dp) = 0x%lx\n", dp);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* If we wanted to allow promiscous mode to work like an
|
* If we wanted to allow promiscuous mode to work like an
|
||||||
* unswitched network, this would be a good point to OR in a
|
* unswitched network, this would be a good point to OR in a
|
||||||
* mask of partitions which should be receiving all packets.
|
* mask of partitions which should be receiving all packets.
|
||||||
*/
|
*/
|
||||||
|
|
|
@ -333,7 +333,7 @@ int sn_pci_legacy_read(struct pci_bus *bus, u16 port, u32 *val, u8 size)
|
||||||
/*
|
/*
|
||||||
* First, try the SN_SAL_IOIF_PCI_SAFE SAL call which can work
|
* First, try the SN_SAL_IOIF_PCI_SAFE SAL call which can work
|
||||||
* around hw issues at the pci bus level. SGI proms older than
|
* around hw issues at the pci bus level. SGI proms older than
|
||||||
* 4.10 don't implment this.
|
* 4.10 don't implement this.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
SAL_CALL(isrv, SN_SAL_IOIF_PCI_SAFE,
|
SAL_CALL(isrv, SN_SAL_IOIF_PCI_SAFE,
|
||||||
|
@ -348,7 +348,7 @@ int sn_pci_legacy_read(struct pci_bus *bus, u16 port, u32 *val, u8 size)
|
||||||
/*
|
/*
|
||||||
* If the above failed, retry using the SAL_PROBE call which should
|
* If the above failed, retry using the SAL_PROBE call which should
|
||||||
* be present in all proms (but which cannot work round PCI chipset
|
* be present in all proms (but which cannot work round PCI chipset
|
||||||
* bugs). This code is retained for compatability with old
|
* bugs). This code is retained for compatibility with old
|
||||||
* pre-4.10 proms, and should be removed at some point in the future.
|
* pre-4.10 proms, and should be removed at some point in the future.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
@ -379,7 +379,7 @@ int sn_pci_legacy_write(struct pci_bus *bus, u16 port, u32 val, u8 size)
|
||||||
/*
|
/*
|
||||||
* First, try the SN_SAL_IOIF_PCI_SAFE SAL call which can work
|
* First, try the SN_SAL_IOIF_PCI_SAFE SAL call which can work
|
||||||
* around hw issues at the pci bus level. SGI proms older than
|
* around hw issues at the pci bus level. SGI proms older than
|
||||||
* 4.10 don't implment this.
|
* 4.10 don't implement this.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
SAL_CALL(isrv, SN_SAL_IOIF_PCI_SAFE,
|
SAL_CALL(isrv, SN_SAL_IOIF_PCI_SAFE,
|
||||||
|
@ -394,7 +394,7 @@ int sn_pci_legacy_write(struct pci_bus *bus, u16 port, u32 val, u8 size)
|
||||||
/*
|
/*
|
||||||
* If the above failed, retry using the SAL_PROBE call which should
|
* If the above failed, retry using the SAL_PROBE call which should
|
||||||
* be present in all proms (but which cannot work round PCI chipset
|
* be present in all proms (but which cannot work round PCI chipset
|
||||||
* bugs). This code is retained for compatability with old
|
* bugs). This code is retained for compatibility with old
|
||||||
* pre-4.10 proms, and should be removed at some point in the future.
|
* pre-4.10 proms, and should be removed at some point in the future.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
|
|
@ -30,7 +30,7 @@ static void mark_ate(struct ate_resource *ate_resource, int start, int number,
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* find_free_ate: Find the first free ate index starting from the given
|
* find_free_ate: Find the first free ate index starting from the given
|
||||||
* index for the desired consequtive count.
|
* index for the desired consecutive count.
|
||||||
*/
|
*/
|
||||||
static int find_free_ate(struct ate_resource *ate_resource, int start,
|
static int find_free_ate(struct ate_resource *ate_resource, int start,
|
||||||
int count)
|
int count)
|
||||||
|
@ -88,7 +88,7 @@ static inline int alloc_ate_resource(struct ate_resource *ate_resource,
|
||||||
return -1;
|
return -1;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Find the required number of free consequtive ates.
|
* Find the required number of free consecutive ates.
|
||||||
*/
|
*/
|
||||||
start_index =
|
start_index =
|
||||||
find_free_ate(ate_resource, ate_resource->lowest_free_index,
|
find_free_ate(ate_resource, ate_resource->lowest_free_index,
|
||||||
|
@ -105,7 +105,7 @@ static inline int alloc_ate_resource(struct ate_resource *ate_resource,
|
||||||
/*
|
/*
|
||||||
* Allocate "count" contiguous Bridge Address Translation Entries
|
* Allocate "count" contiguous Bridge Address Translation Entries
|
||||||
* on the specified bridge to be used for PCI to XTALK mappings.
|
* on the specified bridge to be used for PCI to XTALK mappings.
|
||||||
* Indices in rm map range from 1..num_entries. Indicies returned
|
* Indices in rm map range from 1..num_entries. Indices returned
|
||||||
* to caller range from 0..num_entries-1.
|
* to caller range from 0..num_entries-1.
|
||||||
*
|
*
|
||||||
* Return the start index on success, -1 on failure.
|
* Return the start index on success, -1 on failure.
|
||||||
|
|
|
@ -201,7 +201,7 @@ pcibr_dmatrans_direct32(struct pcidev_info * info,
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Wrapper routine for free'ing DMA maps
|
* Wrapper routine for freeing DMA maps
|
||||||
* DMA mappings for Direct 64 and 32 do not have any DMA maps.
|
* DMA mappings for Direct 64 and 32 do not have any DMA maps.
|
||||||
*/
|
*/
|
||||||
void
|
void
|
||||||
|
|
|
@ -223,7 +223,7 @@ tioca_fastwrite_enable(struct tioca_kernel *tioca_kern)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Scan all vga controllers on this bus making sure they all
|
* Scan all vga controllers on this bus making sure they all
|
||||||
* suport FW. If not, return.
|
* support FW. If not, return.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
list_for_each_entry(pdev, tioca_kern->ca_devices, bus_list) {
|
list_for_each_entry(pdev, tioca_kern->ca_devices, bus_list) {
|
||||||
|
@ -364,7 +364,7 @@ tioca_dma_d48(struct pci_dev *pdev, u64 paddr)
|
||||||
* @req_size: len (bytes) to map
|
* @req_size: len (bytes) to map
|
||||||
*
|
*
|
||||||
* Map @paddr into CA address space using the GART mechanism. The mapped
|
* Map @paddr into CA address space using the GART mechanism. The mapped
|
||||||
* dma_addr_t is guarenteed to be contiguous in CA bus space.
|
* dma_addr_t is guaranteed to be contiguous in CA bus space.
|
||||||
*/
|
*/
|
||||||
static dma_addr_t
|
static dma_addr_t
|
||||||
tioca_dma_mapped(struct pci_dev *pdev, u64 paddr, size_t req_size)
|
tioca_dma_mapped(struct pci_dev *pdev, u64 paddr, size_t req_size)
|
||||||
|
@ -526,7 +526,7 @@ tioca_dma_map(struct pci_dev *pdev, u64 paddr, size_t byte_count, int dma_flags)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* If card is 64 or 48 bit addresable, use a direct mapping. 32
|
* If card is 64 or 48 bit addressable, use a direct mapping. 32
|
||||||
* bit direct is so restrictive w.r.t. where the memory resides that
|
* bit direct is so restrictive w.r.t. where the memory resides that
|
||||||
* we don't use it even though CA has some support.
|
* we don't use it even though CA has some support.
|
||||||
*/
|
*/
|
||||||
|
|
|
@ -256,9 +256,9 @@ pcidev_to_tioce(struct pci_dev *pdev, struct tioce __iomem **base,
|
||||||
* @ct_addr: the coretalk address to map
|
* @ct_addr: the coretalk address to map
|
||||||
* @len: number of bytes to map
|
* @len: number of bytes to map
|
||||||
*
|
*
|
||||||
* Given the addressing type, set up various paramaters that define the
|
* Given the addressing type, set up various parameters that define the
|
||||||
* ATE pool to use. Search for a contiguous block of entries to cover the
|
* ATE pool to use. Search for a contiguous block of entries to cover the
|
||||||
* length, and if enough resources exist, fill in the ATE's and construct a
|
* length, and if enough resources exist, fill in the ATEs and construct a
|
||||||
* tioce_dmamap struct to track the mapping.
|
* tioce_dmamap struct to track the mapping.
|
||||||
*/
|
*/
|
||||||
static u64
|
static u64
|
||||||
|
@ -581,8 +581,8 @@ tioce_do_dma_map(struct pci_dev *pdev, u64 paddr, size_t byte_count,
|
||||||
*/
|
*/
|
||||||
if (!mapaddr && !barrier && dma_mask >= 0xffffffffffUL) {
|
if (!mapaddr && !barrier && dma_mask >= 0xffffffffffUL) {
|
||||||
/*
|
/*
|
||||||
* We have two options for 40-bit mappings: 16GB "super" ATE's
|
* We have two options for 40-bit mappings: 16GB "super" ATEs
|
||||||
* and 64MB "regular" ATE's. We'll try both if needed for a
|
* and 64MB "regular" ATEs. We'll try both if needed for a
|
||||||
* given mapping but which one we try first depends on the
|
* given mapping but which one we try first depends on the
|
||||||
* size. For requests >64MB, prefer to use a super page with
|
* size. For requests >64MB, prefer to use a super page with
|
||||||
* regular as the fallback. Otherwise, try in the reverse order.
|
* regular as the fallback. Otherwise, try in the reverse order.
|
||||||
|
@ -687,8 +687,8 @@ tioce_error_intr_handler(int irq, void *arg)
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* tioce_reserve_m32 - reserve M32 ate's for the indicated address range
|
* tioce_reserve_m32 - reserve M32 ATEs for the indicated address range
|
||||||
* @tioce_kernel: TIOCE context to reserve ate's for
|
* @tioce_kernel: TIOCE context to reserve ATEs for
|
||||||
* @base: starting bus address to reserve
|
* @base: starting bus address to reserve
|
||||||
* @limit: last bus address to reserve
|
* @limit: last bus address to reserve
|
||||||
*
|
*
|
||||||
|
@ -763,7 +763,7 @@ tioce_kern_init(struct tioce_common *tioce_common)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Set PMU pagesize to the largest size available, and zero out
|
* Set PMU pagesize to the largest size available, and zero out
|
||||||
* the ate's.
|
* the ATEs.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
tioce_mmr = (struct tioce __iomem *)tioce_common->ce_pcibus.bs_base;
|
tioce_mmr = (struct tioce __iomem *)tioce_common->ce_pcibus.bs_base;
|
||||||
|
@ -784,7 +784,7 @@ tioce_kern_init(struct tioce_common *tioce_common)
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Reserve ATE's corresponding to reserved address ranges. These
|
* Reserve ATEs corresponding to reserved address ranges. These
|
||||||
* include:
|
* include:
|
||||||
*
|
*
|
||||||
* Memory space covered by each PPB mem base/limit register
|
* Memory space covered by each PPB mem base/limit register
|
||||||
|
|
|
@ -28,14 +28,24 @@
|
||||||
*/
|
*/
|
||||||
#include <linux/notifier.h>
|
#include <linux/notifier.h>
|
||||||
|
|
||||||
extern int register_page_fault_notifier(struct notifier_block *);
|
/*
|
||||||
extern int unregister_page_fault_notifier(struct notifier_block *);
|
* These are only here because kprobes.c wants them to implement a
|
||||||
|
* blatant layering violation. Will hopefully go away soon once all
|
||||||
|
* architectures are updated.
|
||||||
|
*/
|
||||||
|
static inline int register_page_fault_notifier(struct notifier_block *nb)
|
||||||
|
{
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
static inline int unregister_page_fault_notifier(struct notifier_block *nb)
|
||||||
|
{
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
enum die_val {
|
enum die_val {
|
||||||
DIE_BREAK = 1,
|
DIE_BREAK = 1,
|
||||||
DIE_FAULT,
|
DIE_FAULT,
|
||||||
DIE_OOPS,
|
DIE_OOPS,
|
||||||
DIE_PAGE_FAULT,
|
|
||||||
DIE_MACHINE_HALT,
|
DIE_MACHINE_HALT,
|
||||||
DIE_MACHINE_RESTART,
|
DIE_MACHINE_RESTART,
|
||||||
DIE_MCA_MONARCH_ENTER,
|
DIE_MCA_MONARCH_ENTER,
|
||||||
|
|
|
@ -120,6 +120,7 @@ struct arch_specific_insn {
|
||||||
unsigned short slot;
|
unsigned short slot;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
extern int kprobes_fault_handler(struct pt_regs *regs, int trapnr);
|
||||||
extern int kprobe_exceptions_notify(struct notifier_block *self,
|
extern int kprobe_exceptions_notify(struct notifier_block *self,
|
||||||
unsigned long val, void *data);
|
unsigned long val, void *data);
|
||||||
|
|
||||||
|
|
|
@ -296,11 +296,14 @@
|
||||||
#define __NR_getcpu 1304
|
#define __NR_getcpu 1304
|
||||||
#define __NR_epoll_pwait 1305
|
#define __NR_epoll_pwait 1305
|
||||||
#define __NR_utimensat 1306
|
#define __NR_utimensat 1306
|
||||||
|
#define __NR_signalfd 1307
|
||||||
|
#define __NR_timerfd 1308
|
||||||
|
#define __NR_eventfd 1309
|
||||||
|
|
||||||
#ifdef __KERNEL__
|
#ifdef __KERNEL__
|
||||||
|
|
||||||
|
|
||||||
#define NR_syscalls 283 /* length of syscall table */
|
#define NR_syscalls 286 /* length of syscall table */
|
||||||
|
|
||||||
#define __ARCH_WANT_SYS_RT_SIGACTION
|
#define __ARCH_WANT_SYS_RT_SIGACTION
|
||||||
#define __ARCH_WANT_SYS_RT_SIGSUSPEND
|
#define __ARCH_WANT_SYS_RT_SIGSUSPEND
|
||||||
|
|
Loading…
Reference in New Issue