2005-04-17 06:20:36 +08:00
|
|
|
/*
|
|
|
|
* Copyright (C) 1995-2003 Russell King
|
|
|
|
* 2001-2002 Keith Owens
|
|
|
|
*
|
|
|
|
* Generate definitions needed by assembly language modules.
|
|
|
|
* This code generates raw asm output which is post-processed to extract
|
|
|
|
* and format the required data.
|
|
|
|
*
|
|
|
|
* This program is free software; you can redistribute it and/or modify
|
|
|
|
* it under the terms of the GNU General Public License version 2 as
|
|
|
|
* published by the Free Software Foundation.
|
|
|
|
*/
|
|
|
|
#include <linux/sched.h>
|
|
|
|
#include <linux/mm.h>
|
2009-11-27 00:19:58 +08:00
|
|
|
#include <linux/dma-mapping.h>
|
2011-02-06 23:48:39 +08:00
|
|
|
#include <asm/cacheflush.h>
|
2011-02-06 23:32:24 +08:00
|
|
|
#include <asm/glue-df.h>
|
|
|
|
#include <asm/glue-pf.h>
|
2005-04-17 06:20:36 +08:00
|
|
|
#include <asm/mach/arch.h>
|
|
|
|
#include <asm/thread_info.h>
|
|
|
|
#include <asm/memory.h>
|
2006-11-09 22:20:47 +08:00
|
|
|
#include <asm/procinfo.h>
|
2011-09-30 21:43:12 +08:00
|
|
|
#include <asm/hardware/cache-l2x0.h>
|
2008-04-29 16:03:59 +08:00
|
|
|
#include <linux/kbuild.h>
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Make sure that the compiler and target are compatible.
|
|
|
|
*/
|
|
|
|
#if defined(__APCS_26__)
|
|
|
|
#error Sorry, your compiler targets APCS-26 but this kernel requires APCS-32
|
|
|
|
#endif
|
|
|
|
/*
|
|
|
|
* GCC 3.0, 3.1: general bad code generation.
|
|
|
|
* GCC 3.2.0: incorrect function argument offset calculation.
|
|
|
|
* GCC 3.2.x: miscompiles NEW_AUX_ENT in fs/binfmt_elf.c
|
|
|
|
* (http://gcc.gnu.org/PR8896) and incorrect structure
|
|
|
|
* initialisation in fs/jffs2/erase.c
|
|
|
|
*/
|
2006-01-08 17:04:09 +08:00
|
|
|
#if (__GNUC__ == 3 && __GNUC_MINOR__ < 3)
|
2005-04-17 06:20:36 +08:00
|
|
|
#error Your compiler is too buggy; it is known to miscompile kernels.
|
2006-01-08 17:04:09 +08:00
|
|
|
#error Known good compilers: 3.3
|
2005-04-17 06:20:36 +08:00
|
|
|
#endif
|
|
|
|
|
|
|
|
int main(void)
|
|
|
|
{
|
|
|
|
DEFINE(TSK_ACTIVE_MM, offsetof(struct task_struct, active_mm));
|
2010-06-08 09:50:33 +08:00
|
|
|
#ifdef CONFIG_CC_STACKPROTECTOR
|
|
|
|
DEFINE(TSK_STACK_CANARY, offsetof(struct task_struct, stack_canary));
|
|
|
|
#endif
|
2005-04-17 06:20:36 +08:00
|
|
|
BLANK();
|
|
|
|
DEFINE(TI_FLAGS, offsetof(struct thread_info, flags));
|
|
|
|
DEFINE(TI_PREEMPT, offsetof(struct thread_info, preempt_count));
|
|
|
|
DEFINE(TI_ADDR_LIMIT, offsetof(struct thread_info, addr_limit));
|
|
|
|
DEFINE(TI_TASK, offsetof(struct thread_info, task));
|
|
|
|
DEFINE(TI_EXEC_DOMAIN, offsetof(struct thread_info, exec_domain));
|
|
|
|
DEFINE(TI_CPU, offsetof(struct thread_info, cpu));
|
|
|
|
DEFINE(TI_CPU_DOMAIN, offsetof(struct thread_info, cpu_domain));
|
|
|
|
DEFINE(TI_CPU_SAVE, offsetof(struct thread_info, cpu_context));
|
|
|
|
DEFINE(TI_USED_CP, offsetof(struct thread_info, used_cp));
|
|
|
|
DEFINE(TI_TP_VALUE, offsetof(struct thread_info, tp_value));
|
|
|
|
DEFINE(TI_FPSTATE, offsetof(struct thread_info, fpstate));
|
|
|
|
DEFINE(TI_VFPSTATE, offsetof(struct thread_info, vfpstate));
|
ARM: vfp: fix a hole in VFP thread migration
Fix a hole in the VFP thread migration. Lets define two threads.
Thread 1, we'll call 'interesting_thread' which is a thread which is
running on CPU0, using VFP (so vfp_current_hw_state[0] =
&interesting_thread->vfpstate) and gets migrated off to CPU1, where
it continues execution of VFP instructions.
Thread 2, we'll call 'new_cpu0_thread' which is the thread which takes
over on CPU0. This has also been using VFP, and last used VFP on CPU0,
but doesn't use it again.
The following code will be executed twice:
cpu = thread->cpu;
/*
* On SMP, if VFP is enabled, save the old state in
* case the thread migrates to a different CPU. The
* restoring is done lazily.
*/
if ((fpexc & FPEXC_EN) && vfp_current_hw_state[cpu]) {
vfp_save_state(vfp_current_hw_state[cpu], fpexc);
vfp_current_hw_state[cpu]->hard.cpu = cpu;
}
/*
* Thread migration, just force the reloading of the
* state on the new CPU in case the VFP registers
* contain stale data.
*/
if (thread->vfpstate.hard.cpu != cpu)
vfp_current_hw_state[cpu] = NULL;
The first execution will be on CPU0 to switch away from 'interesting_thread'.
interesting_thread->cpu will be 0.
So, vfp_current_hw_state[0] points at interesting_thread->vfpstate.
The hardware state will be saved, along with the CPU number (0) that
it was executing on.
'thread' will be 'new_cpu0_thread' with new_cpu0_thread->cpu = 0.
Also, because it was executing on CPU0, new_cpu0_thread->vfpstate.hard.cpu = 0,
and so the thread migration check is not triggered.
This means that vfp_current_hw_state[0] remains pointing at interesting_thread.
The second execution will be on CPU1 to switch _to_ 'interesting_thread'.
So, 'thread' will be 'interesting_thread' and interesting_thread->cpu now
will be 1. The previous thread executing on CPU1 is not relevant to this
so we shall ignore that.
We get to the thread migration check. Here, we discover that
interesting_thread->vfpstate.hard.cpu = 0, yet interesting_thread->cpu is
now 1, indicating thread migration. We set vfp_current_hw_state[1] to
NULL.
So, at this point vfp_current_hw_state[] contains the following:
[0] = &interesting_thread->vfpstate
[1] = NULL
Our interesting thread now executes a VFP instruction, takes a fault
which loads the state into the VFP hardware. Now, through the assembly
we now have:
[0] = &interesting_thread->vfpstate
[1] = &interesting_thread->vfpstate
CPU1 stops due to ptrace (and so saves its VFP state) using the thread
switch code above), and CPU0 calls vfp_sync_hwstate().
if (vfp_current_hw_state[cpu] == &thread->vfpstate) {
vfp_save_state(&thread->vfpstate, fpexc | FPEXC_EN);
BANG, we corrupt interesting_thread's VFP state by overwriting the
more up-to-date state saved by CPU1 with the old VFP state from CPU0.
Fix this by ensuring that we have sane semantics for the various state
describing variables:
1. vfp_current_hw_state[] points to the current owner of the context
information stored in each CPUs hardware, or NULL if that state
information is invalid.
2. thread->vfpstate.hard.cpu always contains the most recent CPU number
which the state was loaded into or NR_CPUS if no CPU owns the state.
So, for a particular CPU to be a valid owner of the VFP state for a
particular thread t, two things must be true:
vfp_current_hw_state[cpu] == &t->vfpstate && t->vfpstate.hard.cpu == cpu.
and that is valid from the moment a CPU loads the saved VFP context
into the hardware. This gives clear and consistent semantics to
interpreting these variables.
This patch also fixes thread copying, ensuring that t->vfpstate.hard.cpu
is invalidated, otherwise CPU0 may believe it was the last owner. The
hole can happen thus:
- thread1 runs on CPU2 using VFP, migrates to CPU3, exits and thread_info
freed.
- New thread allocated from a previously running thread on CPU2, reusing
memory for thread1 and copying vfp.hard.cpu.
At this point, the following are true:
new_thread1->vfpstate.hard.cpu == 2
&new_thread1->vfpstate == vfp_current_hw_state[2]
Lastly, this also addresses thread flushing in a similar way to thread
copying. Hole is:
- thread runs on CPU0, using VFP, migrates to CPU1 but does not use VFP.
- thread calls execve(), so thread flush happens, leaving
vfp_current_hw_state[0] intact. This vfpstate is memset to 0 causing
thread->vfpstate.hard.cpu = 0.
- thread migrates back to CPU0 before using VFP.
At this point, the following are true:
thread->vfpstate.hard.cpu == 0
&thread->vfpstate == vfp_current_hw_state[0]
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2011-07-09 23:09:43 +08:00
|
|
|
#ifdef CONFIG_SMP
|
|
|
|
DEFINE(VFP_CPU, offsetof(union vfp_state, hard.cpu));
|
|
|
|
#endif
|
2008-04-19 05:43:06 +08:00
|
|
|
#ifdef CONFIG_ARM_THUMBEE
|
|
|
|
DEFINE(TI_THUMBEE_STATE, offsetof(struct thread_info, thumbee_state));
|
|
|
|
#endif
|
2006-03-13 06:36:06 +08:00
|
|
|
#ifdef CONFIG_IWMMXT
|
|
|
|
DEFINE(TI_IWMMXT_STATE, offsetof(struct thread_info, fpstate.iwmmxt));
|
2006-06-28 06:03:03 +08:00
|
|
|
#endif
|
|
|
|
#ifdef CONFIG_CRUNCH
|
|
|
|
DEFINE(TI_CRUNCH_STATE, offsetof(struct thread_info, crunchstate));
|
2006-03-13 06:36:06 +08:00
|
|
|
#endif
|
2005-04-17 06:20:36 +08:00
|
|
|
BLANK();
|
2005-04-26 22:18:59 +08:00
|
|
|
DEFINE(S_R0, offsetof(struct pt_regs, ARM_r0));
|
|
|
|
DEFINE(S_R1, offsetof(struct pt_regs, ARM_r1));
|
|
|
|
DEFINE(S_R2, offsetof(struct pt_regs, ARM_r2));
|
|
|
|
DEFINE(S_R3, offsetof(struct pt_regs, ARM_r3));
|
|
|
|
DEFINE(S_R4, offsetof(struct pt_regs, ARM_r4));
|
|
|
|
DEFINE(S_R5, offsetof(struct pt_regs, ARM_r5));
|
|
|
|
DEFINE(S_R6, offsetof(struct pt_regs, ARM_r6));
|
|
|
|
DEFINE(S_R7, offsetof(struct pt_regs, ARM_r7));
|
|
|
|
DEFINE(S_R8, offsetof(struct pt_regs, ARM_r8));
|
|
|
|
DEFINE(S_R9, offsetof(struct pt_regs, ARM_r9));
|
|
|
|
DEFINE(S_R10, offsetof(struct pt_regs, ARM_r10));
|
|
|
|
DEFINE(S_FP, offsetof(struct pt_regs, ARM_fp));
|
|
|
|
DEFINE(S_IP, offsetof(struct pt_regs, ARM_ip));
|
|
|
|
DEFINE(S_SP, offsetof(struct pt_regs, ARM_sp));
|
|
|
|
DEFINE(S_LR, offsetof(struct pt_regs, ARM_lr));
|
|
|
|
DEFINE(S_PC, offsetof(struct pt_regs, ARM_pc));
|
|
|
|
DEFINE(S_PSR, offsetof(struct pt_regs, ARM_cpsr));
|
|
|
|
DEFINE(S_OLD_R0, offsetof(struct pt_regs, ARM_ORIG_r0));
|
|
|
|
DEFINE(S_FRAME_SIZE, sizeof(struct pt_regs));
|
|
|
|
BLANK();
|
2011-09-30 21:43:12 +08:00
|
|
|
#ifdef CONFIG_CACHE_L2X0
|
|
|
|
DEFINE(L2X0_R_PHY_BASE, offsetof(struct l2x0_regs, phy_base));
|
|
|
|
DEFINE(L2X0_R_AUX_CTRL, offsetof(struct l2x0_regs, aux_ctrl));
|
|
|
|
DEFINE(L2X0_R_TAG_LATENCY, offsetof(struct l2x0_regs, tag_latency));
|
|
|
|
DEFINE(L2X0_R_DATA_LATENCY, offsetof(struct l2x0_regs, data_latency));
|
|
|
|
DEFINE(L2X0_R_FILTER_START, offsetof(struct l2x0_regs, filter_start));
|
|
|
|
DEFINE(L2X0_R_FILTER_END, offsetof(struct l2x0_regs, filter_end));
|
|
|
|
DEFINE(L2X0_R_PREFETCH_CTRL, offsetof(struct l2x0_regs, prefetch_ctrl));
|
|
|
|
DEFINE(L2X0_R_PWR_CTRL, offsetof(struct l2x0_regs, pwr_ctrl));
|
|
|
|
BLANK();
|
|
|
|
#endif
|
2007-05-17 17:19:23 +08:00
|
|
|
#ifdef CONFIG_CPU_HAS_ASID
|
2005-04-17 06:20:36 +08:00
|
|
|
DEFINE(MM_CONTEXT_ID, offsetof(struct mm_struct, context.id));
|
|
|
|
BLANK();
|
|
|
|
#endif
|
|
|
|
DEFINE(VMA_VM_MM, offsetof(struct vm_area_struct, vm_mm));
|
|
|
|
DEFINE(VMA_VM_FLAGS, offsetof(struct vm_area_struct, vm_flags));
|
|
|
|
BLANK();
|
|
|
|
DEFINE(VM_EXEC, VM_EXEC);
|
|
|
|
BLANK();
|
|
|
|
DEFINE(PAGE_SZ, PAGE_SIZE);
|
|
|
|
BLANK();
|
|
|
|
DEFINE(SYS_ERROR0, 0x9f0000);
|
|
|
|
BLANK();
|
|
|
|
DEFINE(SIZEOF_MACHINE_DESC, sizeof(struct machine_desc));
|
2006-05-05 22:11:14 +08:00
|
|
|
DEFINE(MACHINFO_TYPE, offsetof(struct machine_desc, nr));
|
|
|
|
DEFINE(MACHINFO_NAME, offsetof(struct machine_desc, name));
|
2006-05-11 01:11:05 +08:00
|
|
|
BLANK();
|
|
|
|
DEFINE(PROC_INFO_SZ, sizeof(struct proc_info_list));
|
2006-05-05 22:11:14 +08:00
|
|
|
DEFINE(PROCINFO_INITFUNC, offsetof(struct proc_info_list, __cpu_flush));
|
2006-06-30 01:24:21 +08:00
|
|
|
DEFINE(PROCINFO_MM_MMUFLAGS, offsetof(struct proc_info_list, __cpu_mm_mmu_flags));
|
|
|
|
DEFINE(PROCINFO_IO_MMUFLAGS, offsetof(struct proc_info_list, __cpu_io_mmu_flags));
|
2008-04-19 05:43:07 +08:00
|
|
|
BLANK();
|
|
|
|
#ifdef MULTI_DABORT
|
|
|
|
DEFINE(PROCESSOR_DABT_FUNC, offsetof(struct processor, _data_abort));
|
|
|
|
#endif
|
|
|
|
#ifdef MULTI_PABORT
|
|
|
|
DEFINE(PROCESSOR_PABT_FUNC, offsetof(struct processor, _prefetch_abort));
|
2011-02-06 23:48:39 +08:00
|
|
|
#endif
|
|
|
|
#ifdef MULTI_CPU
|
|
|
|
DEFINE(CPU_SLEEP_SIZE, offsetof(struct processor, suspend_size));
|
|
|
|
DEFINE(CPU_DO_SUSPEND, offsetof(struct processor, do_suspend));
|
|
|
|
DEFINE(CPU_DO_RESUME, offsetof(struct processor, do_resume));
|
|
|
|
#endif
|
|
|
|
#ifdef MULTI_CACHE
|
|
|
|
DEFINE(CACHE_FLUSH_KERN_ALL, offsetof(struct cpu_cache_fns, flush_kern_all));
|
2008-04-19 05:43:07 +08:00
|
|
|
#endif
|
2009-11-27 00:19:58 +08:00
|
|
|
BLANK();
|
|
|
|
DEFINE(DMA_BIDIRECTIONAL, DMA_BIDIRECTIONAL);
|
|
|
|
DEFINE(DMA_TO_DEVICE, DMA_TO_DEVICE);
|
|
|
|
DEFINE(DMA_FROM_DEVICE, DMA_FROM_DEVICE);
|
2005-04-17 06:20:36 +08:00
|
|
|
return 0;
|
|
|
|
}
|