mirror of https://gitee.com/openkylin/linux.git
[PATCH] powerpc: Remove lppaca structure from the PACA
At present the lppaca - the structure shared with the iSeries hypervisor and phyp - is contained within the PACA, our own low-level per-cpu structure. This doesn't have to be so, the patch below removes it, making a separate array of lppaca structures. This saves approximately 500*NR_CPUS bytes of image size and kernel memory, because we don't need aligning gap between the Linux and hypervisor portions of every PACA. On the other hand it means an extra level of dereference in many accesses to the lppaca. The patch also gets rid of several places where we assign the paca address to a local variable for no particular reason. Signed-off-by: David Gibson <dwg@au1.ibm.com> Signed-off-by: Paul Mackerras <paulus@samba.org>
This commit is contained in:
parent
e58c3495e6
commit
3356bb9f7b
|
@ -135,7 +135,7 @@ int main(void)
|
|||
DEFINE(PACA_EXMC, offsetof(struct paca_struct, exmc));
|
||||
DEFINE(PACA_EXSLB, offsetof(struct paca_struct, exslb));
|
||||
DEFINE(PACAEMERGSP, offsetof(struct paca_struct, emergency_sp));
|
||||
DEFINE(PACALPPACA, offsetof(struct paca_struct, lppaca));
|
||||
DEFINE(PACALPPACAPTR, offsetof(struct paca_struct, lppaca_ptr));
|
||||
DEFINE(PACAHWCPUID, offsetof(struct paca_struct, hw_cpu_id));
|
||||
|
||||
DEFINE(LPPACASRR0, offsetof(struct lppaca, saved_srr0));
|
||||
|
|
|
@ -511,7 +511,8 @@ restore:
|
|||
cmpdi 0,r5,0
|
||||
beq 4f
|
||||
/* Check for pending interrupts (iSeries) */
|
||||
ld r3,PACALPPACA+LPPACAANYINT(r13)
|
||||
ld r3,PACALPPACAPTR(r13)
|
||||
ld r3,LPPACAANYINT(r3)
|
||||
cmpdi r3,0
|
||||
beq+ 4f /* skip do_IRQ if no interrupts */
|
||||
|
||||
|
|
|
@ -255,8 +255,9 @@ exception_marker:
|
|||
|
||||
#define EXCEPTION_PROLOG_ISERIES_2 \
|
||||
mfmsr r10; \
|
||||
ld r11,PACALPPACA+LPPACASRR0(r13); \
|
||||
ld r12,PACALPPACA+LPPACASRR1(r13); \
|
||||
ld r12,PACALPPACAPTR(r13); \
|
||||
ld r11,LPPACASRR0(r12); \
|
||||
ld r12,LPPACASRR1(r12); \
|
||||
ori r10,r10,MSR_RI; \
|
||||
mtmsrd r10,1
|
||||
|
||||
|
@ -635,7 +636,8 @@ data_access_slb_iSeries:
|
|||
std r12,PACA_EXSLB+EX_R12(r13)
|
||||
mfspr r10,SPRN_SPRG1
|
||||
std r10,PACA_EXSLB+EX_R13(r13)
|
||||
ld r12,PACALPPACA+LPPACASRR1(r13);
|
||||
ld r12,PACALPPACAPTR(r13)
|
||||
ld r12,LPPACASRR1(r12)
|
||||
b .slb_miss_realmode
|
||||
|
||||
STD_EXCEPTION_ISERIES(0x400, instruction_access, PACA_EXGEN)
|
||||
|
@ -645,7 +647,8 @@ instruction_access_slb_iSeries:
|
|||
mtspr SPRN_SPRG1,r13 /* save r13 */
|
||||
mfspr r13,SPRN_SPRG3 /* get paca address into r13 */
|
||||
std r3,PACA_EXSLB+EX_R3(r13)
|
||||
ld r3,PACALPPACA+LPPACASRR0(r13) /* get SRR0 value */
|
||||
ld r3,PACALPPACAPTR(r13)
|
||||
ld r3,LPPACASRR0(r3) /* get SRR0 value */
|
||||
std r9,PACA_EXSLB+EX_R9(r13)
|
||||
mfcr r9
|
||||
#ifdef __DISABLED__
|
||||
|
@ -657,7 +660,8 @@ instruction_access_slb_iSeries:
|
|||
std r12,PACA_EXSLB+EX_R12(r13)
|
||||
mfspr r10,SPRN_SPRG1
|
||||
std r10,PACA_EXSLB+EX_R13(r13)
|
||||
ld r12,PACALPPACA+LPPACASRR1(r13);
|
||||
ld r12,PACALPPACAPTR(r13)
|
||||
ld r12,LPPACASRR1(r12)
|
||||
b .slb_miss_realmode
|
||||
|
||||
#ifdef __DISABLED__
|
||||
|
@ -746,7 +750,8 @@ iSeries_secondary_smp_loop:
|
|||
.globl decrementer_iSeries_masked
|
||||
decrementer_iSeries_masked:
|
||||
li r11,1
|
||||
stb r11,PACALPPACA+LPPACADECRINT(r13)
|
||||
ld r12,PACALPPACAPTR(r13)
|
||||
stb r11,LPPACADECRINT(r12)
|
||||
LOAD_REG_ADDRBASE(r12,tb_ticks_per_jiffy)
|
||||
lwz r12,ADDROFF(tb_ticks_per_jiffy)(r12)
|
||||
mtspr SPRN_DEC,r12
|
||||
|
@ -755,8 +760,9 @@ decrementer_iSeries_masked:
|
|||
.globl hardware_interrupt_iSeries_masked
|
||||
hardware_interrupt_iSeries_masked:
|
||||
mtcrf 0x80,r9 /* Restore regs */
|
||||
ld r11,PACALPPACA+LPPACASRR0(r13)
|
||||
ld r12,PACALPPACA+LPPACASRR1(r13)
|
||||
ld r12,PACALPPACAPTR(r13)
|
||||
ld r11,LPPACASRR0(r12)
|
||||
ld r12,LPPACASRR1(r12)
|
||||
mtspr SPRN_SRR0,r11
|
||||
mtspr SPRN_SRR1,r12
|
||||
ld r9,PACA_EXGEN+EX_R9(r13)
|
||||
|
@ -995,7 +1001,8 @@ _GLOBAL(slb_miss_realmode)
|
|||
ld r3,PACA_EXSLB+EX_R3(r13)
|
||||
lwz r9,PACA_EXSLB+EX_CCR(r13) /* get saved CR */
|
||||
#ifdef CONFIG_PPC_ISERIES
|
||||
ld r11,PACALPPACA+LPPACASRR0(r13) /* get SRR0 value */
|
||||
ld r11,PACALPPACAPTR(r13)
|
||||
ld r11,LPPACASRR0(r11) /* get SRR0 value */
|
||||
#endif /* CONFIG_PPC_ISERIES */
|
||||
|
||||
mtlr r10
|
||||
|
|
|
@ -238,15 +238,11 @@ void do_IRQ(struct pt_regs *regs)
|
|||
irq_exit();
|
||||
|
||||
#ifdef CONFIG_PPC_ISERIES
|
||||
{
|
||||
struct paca_struct *lpaca = get_paca();
|
||||
|
||||
if (lpaca->lppaca.int_dword.fields.decr_int) {
|
||||
lpaca->lppaca.int_dword.fields.decr_int = 0;
|
||||
if (get_lppaca()->int_dword.fields.decr_int) {
|
||||
get_lppaca()->int_dword.fields.decr_int = 0;
|
||||
/* Signal a fake decrementer interrupt */
|
||||
timer_interrupt(regs);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
|
|
|
@ -55,15 +55,13 @@ static unsigned long get_purr(void)
|
|||
{
|
||||
unsigned long sum_purr = 0;
|
||||
int cpu;
|
||||
struct paca_struct *lpaca;
|
||||
|
||||
for_each_cpu(cpu) {
|
||||
lpaca = paca + cpu;
|
||||
sum_purr += lpaca->lppaca.emulated_time_base;
|
||||
sum_purr += lppaca[cpu].emulated_time_base;
|
||||
|
||||
#ifdef PURR_DEBUG
|
||||
printk(KERN_INFO "get_purr for cpu (%d) has value (%ld) \n",
|
||||
cpu, lpaca->lppaca.emulated_time_base);
|
||||
cpu, lppaca[cpu].emulated_time_base);
|
||||
#endif
|
||||
}
|
||||
return sum_purr;
|
||||
|
@ -79,12 +77,11 @@ static int lparcfg_data(struct seq_file *m, void *v)
|
|||
unsigned long pool_id, lp_index;
|
||||
int shared, entitled_capacity, max_entitled_capacity;
|
||||
int processors, max_processors;
|
||||
struct paca_struct *lpaca = get_paca();
|
||||
unsigned long purr = get_purr();
|
||||
|
||||
seq_printf(m, "%s %s \n", MODULE_NAME, MODULE_VERS);
|
||||
|
||||
shared = (int)(lpaca->lppaca_ptr->shared_proc);
|
||||
shared = (int)(get_lppaca()->shared_proc);
|
||||
seq_printf(m, "serial_number=%c%c%c%c%c%c%c\n",
|
||||
e2a(xItExtVpdPanel.mfgID[2]),
|
||||
e2a(xItExtVpdPanel.mfgID[3]),
|
||||
|
@ -402,7 +399,7 @@ static int lparcfg_data(struct seq_file *m, void *v)
|
|||
(h_resource >> 0 * 8) & 0xffff);
|
||||
|
||||
/* pool related entries are apropriate for shared configs */
|
||||
if (paca[0].lppaca.shared_proc) {
|
||||
if (lppaca[0].shared_proc) {
|
||||
|
||||
h_pic(&pool_idle_time, &pool_procs);
|
||||
|
||||
|
@ -451,7 +448,7 @@ static int lparcfg_data(struct seq_file *m, void *v)
|
|||
seq_printf(m, "partition_potential_processors=%d\n",
|
||||
partition_potential_processors);
|
||||
|
||||
seq_printf(m, "shared_processor_mode=%d\n", paca[0].lppaca.shared_proc);
|
||||
seq_printf(m, "shared_processor_mode=%d\n", lppaca[0].shared_proc);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -25,6 +25,28 @@
|
|||
* field correctly */
|
||||
extern unsigned long __toc_start;
|
||||
|
||||
/*
|
||||
* iSeries structure which the hypervisor knows about - this structure
|
||||
* should not cross a page boundary. The vpa_init/register_vpa call
|
||||
* is now known to fail if the lppaca structure crosses a page
|
||||
* boundary. The lppaca is also used on POWER5 pSeries boxes. The
|
||||
* lppaca is 640 bytes long, and cannot readily change since the
|
||||
* hypervisor knows its layout, so a 1kB alignment will suffice to
|
||||
* ensure that it doesn't cross a page boundary.
|
||||
*/
|
||||
struct lppaca lppaca[] = {
|
||||
[0 ... (NR_CPUS-1)] = {
|
||||
.desc = 0xd397d781, /* "LpPa" */
|
||||
.size = sizeof(struct lppaca),
|
||||
.dyn_proc_status = 2,
|
||||
.decr_val = 0x00ff0000,
|
||||
.fpregs_in_use = 1,
|
||||
.end_of_quantum = 0xfffffffffffffffful,
|
||||
.slb_count = 64,
|
||||
.vmxregs_in_use = 0,
|
||||
},
|
||||
};
|
||||
|
||||
/* The Paca is an array with one entry per processor. Each contains an
|
||||
* lppaca, which contains the information shared between the
|
||||
* hypervisor and Linux.
|
||||
|
@ -35,27 +57,17 @@ extern unsigned long __toc_start;
|
|||
* processor (not thread).
|
||||
*/
|
||||
#define PACA_INIT_COMMON(number, start, asrr, asrv) \
|
||||
.lppaca_ptr = &lppaca[number], \
|
||||
.lock_token = 0x8000, \
|
||||
.paca_index = (number), /* Paca Index */ \
|
||||
.kernel_toc = (unsigned long)(&__toc_start) + 0x8000UL, \
|
||||
.stab_real = (asrr), /* Real pointer to segment table */ \
|
||||
.stab_addr = (asrv), /* Virt pointer to segment table */ \
|
||||
.cpu_start = (start), /* Processor start */ \
|
||||
.hw_cpu_id = 0xffff, \
|
||||
.lppaca = { \
|
||||
.desc = 0xd397d781, /* "LpPa" */ \
|
||||
.size = sizeof(struct lppaca), \
|
||||
.dyn_proc_status = 2, \
|
||||
.decr_val = 0x00ff0000, \
|
||||
.fpregs_in_use = 1, \
|
||||
.end_of_quantum = 0xfffffffffffffffful, \
|
||||
.slb_count = 64, \
|
||||
.vmxregs_in_use = 0, \
|
||||
}, \
|
||||
.hw_cpu_id = 0xffff,
|
||||
|
||||
#ifdef CONFIG_PPC_ISERIES
|
||||
#define PACA_INIT_ISERIES(number) \
|
||||
.lppaca_ptr = &paca[number].lppaca, \
|
||||
.reg_save_ptr = &iseries_reg_save[number],
|
||||
|
||||
#define PACA_INIT(number) \
|
||||
|
|
|
@ -431,7 +431,7 @@ void timer_interrupt(struct pt_regs * regs)
|
|||
profile_tick(CPU_PROFILING, regs);
|
||||
|
||||
#ifdef CONFIG_PPC_ISERIES
|
||||
get_paca()->lppaca.int_dword.fields.decr_int = 0;
|
||||
get_lppaca()->int_dword.fields.decr_int = 0;
|
||||
#endif
|
||||
|
||||
while ((ticks = tb_ticks_since(per_cpu(last_jiffy, cpu)))
|
||||
|
|
|
@ -28,15 +28,13 @@
|
|||
void __spin_yield(raw_spinlock_t *lock)
|
||||
{
|
||||
unsigned int lock_value, holder_cpu, yield_count;
|
||||
struct paca_struct *holder_paca;
|
||||
|
||||
lock_value = lock->slock;
|
||||
if (lock_value == 0)
|
||||
return;
|
||||
holder_cpu = lock_value & 0xffff;
|
||||
BUG_ON(holder_cpu >= NR_CPUS);
|
||||
holder_paca = &paca[holder_cpu];
|
||||
yield_count = holder_paca->lppaca.yield_count;
|
||||
yield_count = lppaca[holder_cpu].yield_count;
|
||||
if ((yield_count & 1) == 0)
|
||||
return; /* virtual cpu is currently running */
|
||||
rmb();
|
||||
|
@ -60,15 +58,13 @@ void __rw_yield(raw_rwlock_t *rw)
|
|||
{
|
||||
int lock_value;
|
||||
unsigned int holder_cpu, yield_count;
|
||||
struct paca_struct *holder_paca;
|
||||
|
||||
lock_value = rw->lock;
|
||||
if (lock_value >= 0)
|
||||
return; /* no write lock at present */
|
||||
holder_cpu = lock_value & 0xffff;
|
||||
BUG_ON(holder_cpu >= NR_CPUS);
|
||||
holder_paca = &paca[holder_cpu];
|
||||
yield_count = holder_paca->lppaca.yield_count;
|
||||
yield_count = lppaca[holder_cpu].yield_count;
|
||||
if ((yield_count & 1) == 0)
|
||||
return; /* virtual cpu is currently running */
|
||||
rmb();
|
||||
|
|
|
@ -334,14 +334,12 @@ int __init iSeries_allocate_IRQ(HvBusNumber bus,
|
|||
*/
|
||||
int iSeries_get_irq(struct pt_regs *regs)
|
||||
{
|
||||
struct paca_struct *lpaca;
|
||||
/* -2 means ignore this interrupt */
|
||||
int irq = -2;
|
||||
|
||||
lpaca = get_paca();
|
||||
#ifdef CONFIG_SMP
|
||||
if (lpaca->lppaca.int_dword.fields.ipi_cnt) {
|
||||
lpaca->lppaca.int_dword.fields.ipi_cnt = 0;
|
||||
if (get_lppaca()->int_dword.fields.ipi_cnt) {
|
||||
get_lppaca()->int_dword.fields.ipi_cnt = 0;
|
||||
iSeries_smp_message_recv(regs);
|
||||
}
|
||||
#endif /* CONFIG_SMP */
|
||||
|
|
|
@ -44,7 +44,8 @@ _GLOBAL(local_irq_restore)
|
|||
/* Check pending interrupts */
|
||||
/* A decrementer, IPI or PMC interrupt may have occurred
|
||||
* while we were in the hypervisor (which enables) */
|
||||
ld r4,PACALPPACA+LPPACAANYINT(r13)
|
||||
ld r4,PACALPPACAPTR(r13)
|
||||
ld r4,LPPACAANYINT(r4)
|
||||
cmpdi r4,0
|
||||
beqlr
|
||||
|
||||
|
|
|
@ -538,7 +538,7 @@ static unsigned long __init build_iSeries_Memory_Map(void)
|
|||
*/
|
||||
static void __init iSeries_setup_arch(void)
|
||||
{
|
||||
if (get_paca()->lppaca.shared_proc) {
|
||||
if (get_lppaca()->shared_proc) {
|
||||
ppc_md.idle_loop = iseries_shared_idle;
|
||||
printk(KERN_INFO "Using shared processor idle loop\n");
|
||||
} else {
|
||||
|
@ -647,7 +647,7 @@ static void yield_shared_processor(void)
|
|||
* The decrementer stops during the yield. Force a fake decrementer
|
||||
* here and let the timer_interrupt code sort out the actual time.
|
||||
*/
|
||||
get_paca()->lppaca.int_dword.fields.decr_int = 1;
|
||||
get_lppaca()->int_dword.fields.decr_int = 1;
|
||||
process_iSeries_events();
|
||||
}
|
||||
|
||||
|
@ -883,7 +883,7 @@ void dt_cpus(struct iseries_flat_dt *dt)
|
|||
pft_size[1] = __ilog2(HvCallHpt_getHptPages() * HW_PAGE_SIZE);
|
||||
|
||||
for (i = 0; i < NR_CPUS; i++) {
|
||||
if (paca[i].lppaca.dyn_proc_status >= 2)
|
||||
if (lppaca[i].dyn_proc_status >= 2)
|
||||
continue;
|
||||
|
||||
snprintf(p, 32 - (p - buf), "@%d", i);
|
||||
|
@ -891,7 +891,7 @@ void dt_cpus(struct iseries_flat_dt *dt)
|
|||
|
||||
dt_prop_str(dt, "device_type", "cpu");
|
||||
|
||||
index = paca[i].lppaca.dyn_hv_phys_proc_index;
|
||||
index = lppaca[i].dyn_hv_phys_proc_index;
|
||||
d = &xIoHriProcessorVpd[index];
|
||||
|
||||
dt_prop_u32(dt, "i-cache-size", d->xInstCacheSize * 1024);
|
||||
|
|
|
@ -91,7 +91,7 @@ static void smp_iSeries_kick_cpu(int nr)
|
|||
BUG_ON((nr < 0) || (nr >= NR_CPUS));
|
||||
|
||||
/* Verify that our partition has a processor nr */
|
||||
if (paca[nr].lppaca.dyn_proc_status >= 2)
|
||||
if (lppaca[nr].dyn_proc_status >= 2)
|
||||
return;
|
||||
|
||||
/* The processor is currently spinning, waiting
|
||||
|
|
|
@ -254,11 +254,11 @@ void __init find_udbg_vterm(void)
|
|||
void vpa_init(int cpu)
|
||||
{
|
||||
int hwcpu = get_hard_smp_processor_id(cpu);
|
||||
unsigned long vpa = __pa(&paca[cpu].lppaca);
|
||||
unsigned long vpa = __pa(&lppaca[cpu]);
|
||||
long ret;
|
||||
|
||||
if (cpu_has_feature(CPU_FTR_ALTIVEC))
|
||||
paca[cpu].lppaca.vmxregs_in_use = 1;
|
||||
lppaca[cpu].vmxregs_in_use = 1;
|
||||
|
||||
ret = register_vpa(hwcpu, vpa);
|
||||
|
||||
|
|
|
@ -190,7 +190,7 @@ static void pseries_lpar_enable_pmcs(void)
|
|||
|
||||
/* instruct hypervisor to maintain PMCs */
|
||||
if (firmware_has_feature(FW_FEATURE_SPLPAR))
|
||||
get_paca()->lppaca.pmcregs_in_use = 1;
|
||||
get_lppaca()->pmcregs_in_use = 1;
|
||||
}
|
||||
|
||||
static void __init pSeries_setup_arch(void)
|
||||
|
@ -234,7 +234,7 @@ static void __init pSeries_setup_arch(void)
|
|||
/* Choose an idle loop */
|
||||
if (firmware_has_feature(FW_FEATURE_SPLPAR)) {
|
||||
vpa_init(boot_cpuid);
|
||||
if (get_paca()->lppaca.shared_proc) {
|
||||
if (get_lppaca()->shared_proc) {
|
||||
printk(KERN_INFO "Using shared processor idle loop\n");
|
||||
ppc_md.idle_loop = pseries_shared_idle;
|
||||
} else {
|
||||
|
@ -444,10 +444,10 @@ DECLARE_PER_CPU(unsigned long, smt_snooze_delay);
|
|||
|
||||
static inline void dedicated_idle_sleep(unsigned int cpu)
|
||||
{
|
||||
struct paca_struct *ppaca = &paca[cpu ^ 1];
|
||||
struct lppaca *plppaca = &lppaca[cpu ^ 1];
|
||||
|
||||
/* Only sleep if the other thread is not idle */
|
||||
if (!(ppaca->lppaca.idle)) {
|
||||
if (!(plppaca->idle)) {
|
||||
local_irq_disable();
|
||||
|
||||
/*
|
||||
|
@ -480,7 +480,6 @@ static inline void dedicated_idle_sleep(unsigned int cpu)
|
|||
|
||||
static void pseries_dedicated_idle(void)
|
||||
{
|
||||
struct paca_struct *lpaca = get_paca();
|
||||
unsigned int cpu = smp_processor_id();
|
||||
unsigned long start_snooze;
|
||||
unsigned long *smt_snooze_delay = &__get_cpu_var(smt_snooze_delay);
|
||||
|
@ -491,7 +490,7 @@ static void pseries_dedicated_idle(void)
|
|||
* Indicate to the HV that we are idle. Now would be
|
||||
* a good time to find other work to dispatch.
|
||||
*/
|
||||
lpaca->lppaca.idle = 1;
|
||||
get_lppaca()->idle = 1;
|
||||
|
||||
if (!need_resched()) {
|
||||
start_snooze = get_tb() +
|
||||
|
@ -518,7 +517,7 @@ static void pseries_dedicated_idle(void)
|
|||
HMT_medium();
|
||||
}
|
||||
|
||||
lpaca->lppaca.idle = 0;
|
||||
get_lppaca()->idle = 0;
|
||||
ppc64_runlatch_on();
|
||||
|
||||
preempt_enable_no_resched();
|
||||
|
@ -532,7 +531,6 @@ static void pseries_dedicated_idle(void)
|
|||
|
||||
static void pseries_shared_idle(void)
|
||||
{
|
||||
struct paca_struct *lpaca = get_paca();
|
||||
unsigned int cpu = smp_processor_id();
|
||||
|
||||
while (1) {
|
||||
|
@ -540,7 +538,7 @@ static void pseries_shared_idle(void)
|
|||
* Indicate to the HV that we are idle. Now would be
|
||||
* a good time to find other work to dispatch.
|
||||
*/
|
||||
lpaca->lppaca.idle = 1;
|
||||
get_lppaca()->idle = 1;
|
||||
|
||||
while (!need_resched() && !cpu_is_offline(cpu)) {
|
||||
local_irq_disable();
|
||||
|
@ -564,7 +562,7 @@ static void pseries_shared_idle(void)
|
|||
HMT_medium();
|
||||
}
|
||||
|
||||
lpaca->lppaca.idle = 0;
|
||||
get_lppaca()->idle = 0;
|
||||
ppc64_runlatch_on();
|
||||
|
||||
preempt_enable_no_resched();
|
||||
|
@ -588,7 +586,7 @@ static void pseries_kexec_cpu_down(int crash_shutdown, int secondary)
|
|||
{
|
||||
/* Don't risk a hypervisor call if we're crashing */
|
||||
if (!crash_shutdown) {
|
||||
unsigned long vpa = __pa(&get_paca()->lppaca);
|
||||
unsigned long vpa = __pa(get_lppaca());
|
||||
|
||||
if (unregister_vpa(hard_smp_processor_id(), vpa)) {
|
||||
printk("VPA deregistration of cpu %u (hw_cpu_id %d) "
|
||||
|
|
|
@ -29,7 +29,9 @@
|
|||
//----------------------------------------------------------------------------
|
||||
#include <asm/types.h>
|
||||
|
||||
struct lppaca {
|
||||
/* The Hypervisor barfs if the lppaca crosses a page boundary. A 1k
|
||||
* alignment is sufficient to prevent this */
|
||||
struct __attribute__((__aligned__(0x400))) lppaca {
|
||||
//=============================================================================
|
||||
// CACHE_LINE_1 0x0000 - 0x007F Contains read-only data
|
||||
// NOTE: The xDynXyz fields are fields that will be dynamically changed by
|
||||
|
@ -129,5 +131,7 @@ struct lppaca {
|
|||
u8 pmc_save_area[256]; // PMC interrupt Area x00-xFF
|
||||
};
|
||||
|
||||
extern struct lppaca lppaca[];
|
||||
|
||||
#endif /* __KERNEL__ */
|
||||
#endif /* _ASM_POWERPC_LPPACA_H */
|
||||
|
|
|
@ -23,6 +23,7 @@
|
|||
|
||||
register struct paca_struct *local_paca asm("r13");
|
||||
#define get_paca() local_paca
|
||||
#define get_lppaca() (get_paca()->lppaca_ptr)
|
||||
|
||||
struct task_struct;
|
||||
|
||||
|
@ -95,19 +96,6 @@ struct paca_struct {
|
|||
u64 saved_r1; /* r1 save for RTAS calls */
|
||||
u64 saved_msr; /* MSR saved here by enter_rtas */
|
||||
u8 proc_enabled; /* irq soft-enable flag */
|
||||
|
||||
/*
|
||||
* iSeries structure which the hypervisor knows about -
|
||||
* this structure should not cross a page boundary.
|
||||
* The vpa_init/register_vpa call is now known to fail if the
|
||||
* lppaca structure crosses a page boundary.
|
||||
* The lppaca is also used on POWER5 pSeries boxes.
|
||||
* The lppaca is 640 bytes long, and cannot readily change
|
||||
* since the hypervisor knows its layout, so a 1kB
|
||||
* alignment will suffice to ensure that it doesn't
|
||||
* cross a page boundary.
|
||||
*/
|
||||
struct lppaca lppaca __attribute__((__aligned__(0x400)));
|
||||
};
|
||||
|
||||
extern struct paca_struct paca[];
|
||||
|
|
|
@ -80,7 +80,7 @@ static int __inline__ __raw_spin_trylock(raw_spinlock_t *lock)
|
|||
|
||||
#if defined(CONFIG_PPC_SPLPAR) || defined(CONFIG_PPC_ISERIES)
|
||||
/* We only yield to the hypervisor if we are in shared processor mode */
|
||||
#define SHARED_PROCESSOR (get_paca()->lppaca.shared_proc)
|
||||
#define SHARED_PROCESSOR (get_lppaca()->shared_proc)
|
||||
extern void __spin_yield(raw_spinlock_t *lock);
|
||||
extern void __rw_yield(raw_rwlock_t *lock);
|
||||
#else /* SPLPAR || ISERIES */
|
||||
|
|
|
@ -175,11 +175,10 @@ static inline void set_dec(int val)
|
|||
set_dec_cpu6(val);
|
||||
#else
|
||||
#ifdef CONFIG_PPC_ISERIES
|
||||
struct paca_struct *lpaca = get_paca();
|
||||
int cur_dec;
|
||||
|
||||
if (lpaca->lppaca.shared_proc) {
|
||||
lpaca->lppaca.virtual_decr = val;
|
||||
if (get_lppaca()->shared_proc) {
|
||||
get_lppaca()->virtual_decr = val;
|
||||
cur_dec = get_dec();
|
||||
if (cur_dec > val)
|
||||
HvCall_setVirtualDecr();
|
||||
|
|
Loading…
Reference in New Issue