Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
A set of overlapping changes in macvlan and the rocker driver, nothing serious. Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
b079115937
|
@ -41,9 +41,9 @@ Required properties:
|
|||
Optional properties:
|
||||
|
||||
In order to use the GPIO lines in PWM mode, some additional optional
|
||||
properties are required. Only Armada 370 and XP support these properties.
|
||||
properties are required.
|
||||
|
||||
- compatible: Must contain "marvell,armada-370-xp-gpio"
|
||||
- compatible: Must contain "marvell,armada-370-gpio"
|
||||
|
||||
- reg: an additional register set is needed, for the GPIO Blink
|
||||
Counter on/off registers.
|
||||
|
@ -71,7 +71,7 @@ Example:
|
|||
};
|
||||
|
||||
gpio1: gpio@18140 {
|
||||
compatible = "marvell,armada-370-xp-gpio";
|
||||
compatible = "marvell,armada-370-gpio";
|
||||
reg = <0x18140 0x40>, <0x181c8 0x08>;
|
||||
reg-names = "gpio", "pwm";
|
||||
ngpios = <17>;
|
||||
|
|
|
@ -31,7 +31,7 @@ Example:
|
|||
compatible = "st,stm32-timers";
|
||||
reg = <0x40010000 0x400>;
|
||||
clocks = <&rcc 0 160>;
|
||||
clock-names = "clk_int";
|
||||
clock-names = "int";
|
||||
|
||||
pwm {
|
||||
compatible = "st,stm32-pwm";
|
||||
|
|
|
@ -2964,7 +2964,7 @@ F: sound/pci/oxygen/
|
|||
|
||||
C6X ARCHITECTURE
|
||||
M: Mark Salter <msalter@redhat.com>
|
||||
M: Aurelien Jacquiot <a-jacquiot@ti.com>
|
||||
M: Aurelien Jacquiot <jacquiot.aurelien@gmail.com>
|
||||
L: linux-c6x-dev@linux-c6x.org
|
||||
W: http://www.linux-c6x.org/wiki/index.php/Main_Page
|
||||
S: Maintained
|
||||
|
|
4
Makefile
4
Makefile
|
@ -1,7 +1,7 @@
|
|||
VERSION = 4
|
||||
PATCHLEVEL = 12
|
||||
SUBLEVEL = 0
|
||||
EXTRAVERSION = -rc6
|
||||
EXTRAVERSION = -rc7
|
||||
NAME = Fearless Coyote
|
||||
|
||||
# *DOCUMENTATION*
|
||||
|
@ -1437,7 +1437,7 @@ help:
|
|||
@echo ' make V=0|1 [targets] 0 => quiet build (default), 1 => verbose build'
|
||||
@echo ' make V=2 [targets] 2 => give reason for rebuild of target'
|
||||
@echo ' make O=dir [targets] Locate all output files in "dir", including .config'
|
||||
@echo ' make C=1 [targets] Check all c source with $$CHECK (sparse by default)'
|
||||
@echo ' make C=1 [targets] Check re-compiled c source with $$CHECK (sparse by default)'
|
||||
@echo ' make C=2 [targets] Force check of all c source with $$CHECK'
|
||||
@echo ' make RECORDMCOUNT_WARN=1 [targets] Warn about ignored mcount sections'
|
||||
@echo ' make W=n [targets] Enable extra gcc checks, n=1,2,3 where'
|
||||
|
|
|
@ -86,8 +86,6 @@ struct task_struct;
|
|||
#define TSK_K_BLINK(tsk) TSK_K_REG(tsk, 4)
|
||||
#define TSK_K_FP(tsk) TSK_K_REG(tsk, 0)
|
||||
|
||||
#define thread_saved_pc(tsk) TSK_K_BLINK(tsk)
|
||||
|
||||
extern void start_thread(struct pt_regs * regs, unsigned long pc,
|
||||
unsigned long usp);
|
||||
|
||||
|
|
|
@ -1416,6 +1416,7 @@ choice
|
|||
config VMSPLIT_3G
|
||||
bool "3G/1G user/kernel split"
|
||||
config VMSPLIT_3G_OPT
|
||||
depends on !ARM_LPAE
|
||||
bool "3G/1G user/kernel split (for full 1G low memory)"
|
||||
config VMSPLIT_2G
|
||||
bool "2G/2G user/kernel split"
|
||||
|
|
|
@ -17,7 +17,8 @@
|
|||
@ there.
|
||||
.inst 'M' | ('Z' << 8) | (0x1310 << 16) @ tstne r0, #0x4d000
|
||||
#else
|
||||
W(mov) r0, r0
|
||||
AR_CLASS( mov r0, r0 )
|
||||
M_CLASS( nop.w )
|
||||
#endif
|
||||
.endm
|
||||
|
||||
|
|
|
@ -315,7 +315,7 @@ static void __init cacheid_init(void)
|
|||
if (arch >= CPU_ARCH_ARMv6) {
|
||||
unsigned int cachetype = read_cpuid_cachetype();
|
||||
|
||||
if ((arch == CPU_ARCH_ARMv7M) && !cachetype) {
|
||||
if ((arch == CPU_ARCH_ARMv7M) && !(cachetype & 0xf000f)) {
|
||||
cacheid = 0;
|
||||
} else if ((cachetype & (7 << 29)) == 4 << 29) {
|
||||
/* ARMv7 register format */
|
||||
|
|
|
@ -221,10 +221,11 @@ void update_vsyscall(struct timekeeper *tk)
|
|||
/* tkr_mono.cycle_last == tkr_raw.cycle_last */
|
||||
vdso_data->cs_cycle_last = tk->tkr_mono.cycle_last;
|
||||
vdso_data->raw_time_sec = tk->raw_time.tv_sec;
|
||||
vdso_data->raw_time_nsec = tk->raw_time.tv_nsec;
|
||||
vdso_data->raw_time_nsec = (tk->raw_time.tv_nsec <<
|
||||
tk->tkr_raw.shift) +
|
||||
tk->tkr_raw.xtime_nsec;
|
||||
vdso_data->xtime_clock_sec = tk->xtime_sec;
|
||||
vdso_data->xtime_clock_nsec = tk->tkr_mono.xtime_nsec;
|
||||
/* tkr_raw.xtime_nsec == 0 */
|
||||
vdso_data->cs_mono_mult = tk->tkr_mono.mult;
|
||||
vdso_data->cs_raw_mult = tk->tkr_raw.mult;
|
||||
/* tkr_mono.shift == tkr_raw.shift */
|
||||
|
|
|
@ -256,7 +256,6 @@ monotonic_raw:
|
|||
seqcnt_check fail=monotonic_raw
|
||||
|
||||
/* All computations are done with left-shifted nsecs. */
|
||||
lsl x14, x14, x12
|
||||
get_nsec_per_sec res=x9
|
||||
lsl x9, x9, x12
|
||||
|
||||
|
|
|
@ -75,11 +75,6 @@ static inline void release_thread(struct task_struct *dead_task)
|
|||
{
|
||||
}
|
||||
|
||||
/*
|
||||
* Return saved PC of a blocked thread.
|
||||
*/
|
||||
#define thread_saved_pc(tsk) (tsk->thread.pc)
|
||||
|
||||
unsigned long get_wchan(struct task_struct *p);
|
||||
|
||||
#define KSTK_EIP(tsk) \
|
||||
|
|
|
@ -95,11 +95,6 @@ static inline void release_thread(struct task_struct *dead_task)
|
|||
#define copy_segments(tsk, mm) do { } while (0)
|
||||
#define release_segments(mm) do { } while (0)
|
||||
|
||||
/*
|
||||
* saved PC of a blocked thread.
|
||||
*/
|
||||
#define thread_saved_pc(tsk) (task_pt_regs(tsk)->pc)
|
||||
|
||||
/*
|
||||
* saved kernel SP and DP of a blocked thread.
|
||||
*/
|
||||
|
|
|
@ -69,14 +69,6 @@ void hard_reset_now (void)
|
|||
while(1) /* waiting for RETRIBUTION! */ ;
|
||||
}
|
||||
|
||||
/*
|
||||
* Return saved PC of a blocked thread.
|
||||
*/
|
||||
unsigned long thread_saved_pc(struct task_struct *t)
|
||||
{
|
||||
return task_pt_regs(t)->irp;
|
||||
}
|
||||
|
||||
/* setup the child's kernel stack with a pt_regs and switch_stack on it.
|
||||
* it will be un-nested during _resume and _ret_from_sys_call when the
|
||||
* new thread is scheduled.
|
||||
|
|
|
@ -84,14 +84,6 @@ hard_reset_now(void)
|
|||
; /* Wait for reset. */
|
||||
}
|
||||
|
||||
/*
|
||||
* Return saved PC of a blocked thread.
|
||||
*/
|
||||
unsigned long thread_saved_pc(struct task_struct *t)
|
||||
{
|
||||
return task_pt_regs(t)->erp;
|
||||
}
|
||||
|
||||
/*
|
||||
* Setup the child's kernel stack with a pt_regs and call switch_stack() on it.
|
||||
* It will be unnested during _resume and _ret_from_sys_call when the new thread
|
||||
|
|
|
@ -52,8 +52,6 @@ unsigned long get_wchan(struct task_struct *p);
|
|||
|
||||
#define KSTK_ESP(tsk) ((tsk) == current ? rdusp() : (tsk)->thread.usp)
|
||||
|
||||
extern unsigned long thread_saved_pc(struct task_struct *tsk);
|
||||
|
||||
/* Free all resources held by a thread. */
|
||||
static inline void release_thread(struct task_struct *dead_task)
|
||||
{
|
||||
|
|
|
@ -96,11 +96,6 @@ extern asmlinkage void *restore_user_regs(const struct user_context *target, ...
|
|||
#define release_segments(mm) do { } while (0)
|
||||
#define forget_segments() do { } while (0)
|
||||
|
||||
/*
|
||||
* Return saved PC of a blocked thread.
|
||||
*/
|
||||
extern unsigned long thread_saved_pc(struct task_struct *tsk);
|
||||
|
||||
unsigned long get_wchan(struct task_struct *p);
|
||||
|
||||
#define KSTK_EIP(tsk) ((tsk)->thread.frame0->pc)
|
||||
|
|
|
@ -198,15 +198,6 @@ unsigned long get_wchan(struct task_struct *p)
|
|||
return 0;
|
||||
}
|
||||
|
||||
unsigned long thread_saved_pc(struct task_struct *tsk)
|
||||
{
|
||||
/* Check whether the thread is blocked in resume() */
|
||||
if (in_sched_functions(tsk->thread.pc))
|
||||
return ((unsigned long *)tsk->thread.fp)[2];
|
||||
else
|
||||
return tsk->thread.pc;
|
||||
}
|
||||
|
||||
int elf_check_arch(const struct elf32_hdr *hdr)
|
||||
{
|
||||
unsigned long hsr0 = __get_HSR(0);
|
||||
|
|
|
@ -110,10 +110,6 @@ static inline void release_thread(struct task_struct *dead_task)
|
|||
{
|
||||
}
|
||||
|
||||
/*
|
||||
* Return saved PC of a blocked thread.
|
||||
*/
|
||||
unsigned long thread_saved_pc(struct task_struct *tsk);
|
||||
unsigned long get_wchan(struct task_struct *p);
|
||||
|
||||
#define KSTK_EIP(tsk) \
|
||||
|
|
|
@ -129,11 +129,6 @@ int copy_thread(unsigned long clone_flags,
|
|||
return 0;
|
||||
}
|
||||
|
||||
unsigned long thread_saved_pc(struct task_struct *tsk)
|
||||
{
|
||||
return ((struct pt_regs *)tsk->thread.esp0)->pc;
|
||||
}
|
||||
|
||||
unsigned long get_wchan(struct task_struct *p)
|
||||
{
|
||||
unsigned long fp, pc;
|
||||
|
|
|
@ -33,9 +33,6 @@
|
|||
/* task_struct, defined elsewhere, is the "process descriptor" */
|
||||
struct task_struct;
|
||||
|
||||
/* this is defined in arch/process.c */
|
||||
extern unsigned long thread_saved_pc(struct task_struct *tsk);
|
||||
|
||||
extern void start_thread(struct pt_regs *, unsigned long, unsigned long);
|
||||
|
||||
/*
|
||||
|
|
|
@ -60,14 +60,6 @@ void arch_cpu_idle(void)
|
|||
local_irq_enable();
|
||||
}
|
||||
|
||||
/*
|
||||
* Return saved PC of a blocked thread
|
||||
*/
|
||||
unsigned long thread_saved_pc(struct task_struct *tsk)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Copy architecture-specific thread state
|
||||
*/
|
||||
|
|
|
@ -601,23 +601,6 @@ ia64_set_unat (__u64 *unat, void *spill_addr, unsigned long nat)
|
|||
*unat = (*unat & ~mask) | (nat << bit);
|
||||
}
|
||||
|
||||
/*
|
||||
* Return saved PC of a blocked thread.
|
||||
* Note that the only way T can block is through a call to schedule() -> switch_to().
|
||||
*/
|
||||
static inline unsigned long
|
||||
thread_saved_pc (struct task_struct *t)
|
||||
{
|
||||
struct unw_frame_info info;
|
||||
unsigned long ip;
|
||||
|
||||
unw_init_from_blocked_task(&info, t);
|
||||
if (unw_unwind(&info) < 0)
|
||||
return 0;
|
||||
unw_get_ip(&info, &ip);
|
||||
return ip;
|
||||
}
|
||||
|
||||
/*
|
||||
* Get the current instruction/program counter value.
|
||||
*/
|
||||
|
|
|
@ -122,8 +122,6 @@ extern void release_thread(struct task_struct *);
|
|||
extern void copy_segments(struct task_struct *p, struct mm_struct * mm);
|
||||
extern void release_segments(struct mm_struct * mm);
|
||||
|
||||
extern unsigned long thread_saved_pc(struct task_struct *);
|
||||
|
||||
/* Copy and release all segment info associated with a VM */
|
||||
#define copy_segments(p, mm) do { } while (0)
|
||||
#define release_segments(mm) do { } while (0)
|
||||
|
|
|
@ -39,14 +39,6 @@
|
|||
|
||||
#include <linux/err.h>
|
||||
|
||||
/*
|
||||
* Return saved PC of a blocked thread.
|
||||
*/
|
||||
unsigned long thread_saved_pc(struct task_struct *tsk)
|
||||
{
|
||||
return tsk->thread.lr;
|
||||
}
|
||||
|
||||
void (*pm_power_off)(void) = NULL;
|
||||
EXPORT_SYMBOL(pm_power_off);
|
||||
|
||||
|
|
|
@ -130,8 +130,6 @@ static inline void release_thread(struct task_struct *dead_task)
|
|||
{
|
||||
}
|
||||
|
||||
extern unsigned long thread_saved_pc(struct task_struct *tsk);
|
||||
|
||||
unsigned long get_wchan(struct task_struct *p);
|
||||
|
||||
#define KSTK_EIP(tsk) \
|
||||
|
|
|
@ -40,20 +40,6 @@
|
|||
asmlinkage void ret_from_fork(void);
|
||||
asmlinkage void ret_from_kernel_thread(void);
|
||||
|
||||
|
||||
/*
|
||||
* Return saved PC from a blocked thread
|
||||
*/
|
||||
unsigned long thread_saved_pc(struct task_struct *tsk)
|
||||
{
|
||||
struct switch_stack *sw = (struct switch_stack *)tsk->thread.ksp;
|
||||
/* Check whether the thread is blocked in resume() */
|
||||
if (in_sched_functions(sw->retpc))
|
||||
return ((unsigned long *)sw->a6)[1];
|
||||
else
|
||||
return sw->retpc;
|
||||
}
|
||||
|
||||
void arch_cpu_idle(void)
|
||||
{
|
||||
#if defined(MACH_ATARI_ONLY)
|
||||
|
|
|
@ -69,8 +69,6 @@ static inline void release_thread(struct task_struct *dead_task)
|
|||
{
|
||||
}
|
||||
|
||||
extern unsigned long thread_saved_pc(struct task_struct *t);
|
||||
|
||||
extern unsigned long get_wchan(struct task_struct *p);
|
||||
|
||||
# define KSTK_EIP(tsk) (0)
|
||||
|
@ -121,10 +119,6 @@ static inline void release_thread(struct task_struct *dead_task)
|
|||
{
|
||||
}
|
||||
|
||||
/* Return saved (kernel) PC of a blocked thread. */
|
||||
# define thread_saved_pc(tsk) \
|
||||
((tsk)->thread.regs ? (tsk)->thread.regs->r15 : 0)
|
||||
|
||||
unsigned long get_wchan(struct task_struct *p);
|
||||
|
||||
/* The size allocated for kernel stacks. This _must_ be a power of two! */
|
||||
|
|
|
@ -119,23 +119,6 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
|
|||
return 0;
|
||||
}
|
||||
|
||||
#ifndef CONFIG_MMU
|
||||
/*
|
||||
* Return saved PC of a blocked thread.
|
||||
*/
|
||||
unsigned long thread_saved_pc(struct task_struct *tsk)
|
||||
{
|
||||
struct cpu_context *ctx =
|
||||
&(((struct thread_info *)(tsk->stack))->cpu_context);
|
||||
|
||||
/* Check whether the thread is blocked in resume() */
|
||||
if (in_sched_functions(ctx->r15))
|
||||
return (unsigned long)ctx->r15;
|
||||
else
|
||||
return ctx->r14;
|
||||
}
|
||||
#endif
|
||||
|
||||
unsigned long get_wchan(struct task_struct *p)
|
||||
{
|
||||
/* TBD (used by procfs) */
|
||||
|
|
|
@ -166,7 +166,11 @@ static int _kvm_mips_host_tlb_inv(unsigned long entryhi)
|
|||
int kvm_mips_host_tlb_inv(struct kvm_vcpu *vcpu, unsigned long va,
|
||||
bool user, bool kernel)
|
||||
{
|
||||
int idx_user, idx_kernel;
|
||||
/*
|
||||
* Initialize idx_user and idx_kernel to workaround bogus
|
||||
* maybe-initialized warning when using GCC 6.
|
||||
*/
|
||||
int idx_user = 0, idx_kernel = 0;
|
||||
unsigned long flags, old_entryhi;
|
||||
|
||||
local_irq_save(flags);
|
||||
|
|
|
@ -132,11 +132,6 @@ static inline void start_thread(struct pt_regs *regs,
|
|||
/* Free all resources held by a thread. */
|
||||
extern void release_thread(struct task_struct *);
|
||||
|
||||
/*
|
||||
* Return saved PC of a blocked thread.
|
||||
*/
|
||||
extern unsigned long thread_saved_pc(struct task_struct *tsk);
|
||||
|
||||
unsigned long get_wchan(struct task_struct *p);
|
||||
|
||||
#define task_pt_regs(task) ((task)->thread.uregs)
|
||||
|
|
|
@ -39,14 +39,6 @@
|
|||
#include <asm/gdb-stub.h>
|
||||
#include "internal.h"
|
||||
|
||||
/*
|
||||
* return saved PC of a blocked thread.
|
||||
*/
|
||||
unsigned long thread_saved_pc(struct task_struct *tsk)
|
||||
{
|
||||
return ((unsigned long *) tsk->thread.sp)[3];
|
||||
}
|
||||
|
||||
/*
|
||||
* power off function, if any
|
||||
*/
|
||||
|
|
|
@ -75,9 +75,6 @@ static inline void release_thread(struct task_struct *dead_task)
|
|||
{
|
||||
}
|
||||
|
||||
/* Return saved PC of a blocked thread. */
|
||||
#define thread_saved_pc(tsk) ((tsk)->thread.kregs->ea)
|
||||
|
||||
extern unsigned long get_wchan(struct task_struct *p);
|
||||
|
||||
#define task_pt_regs(p) \
|
||||
|
|
|
@ -84,11 +84,6 @@ void start_thread(struct pt_regs *regs, unsigned long nip, unsigned long sp);
|
|||
void release_thread(struct task_struct *);
|
||||
unsigned long get_wchan(struct task_struct *p);
|
||||
|
||||
/*
|
||||
* Return saved PC of a blocked thread. For now, this is the "user" PC
|
||||
*/
|
||||
extern unsigned long thread_saved_pc(struct task_struct *t);
|
||||
|
||||
#define init_stack (init_thread_union.stack)
|
||||
|
||||
#define cpu_relax() barrier()
|
||||
|
|
|
@ -110,11 +110,6 @@ void show_regs(struct pt_regs *regs)
|
|||
show_registers(regs);
|
||||
}
|
||||
|
||||
unsigned long thread_saved_pc(struct task_struct *t)
|
||||
{
|
||||
return (unsigned long)user_regs(t->stack)->pc;
|
||||
}
|
||||
|
||||
void release_thread(struct task_struct *dead_task)
|
||||
{
|
||||
}
|
||||
|
|
|
@ -163,12 +163,7 @@ struct thread_struct {
|
|||
.flags = 0 \
|
||||
}
|
||||
|
||||
/*
|
||||
* Return saved PC of a blocked thread. This is used by ps mostly.
|
||||
*/
|
||||
|
||||
struct task_struct;
|
||||
unsigned long thread_saved_pc(struct task_struct *t);
|
||||
void show_trace(struct task_struct *task, unsigned long *stack);
|
||||
|
||||
/*
|
||||
|
|
|
@ -239,11 +239,6 @@ copy_thread(unsigned long clone_flags, unsigned long usp,
|
|||
return 0;
|
||||
}
|
||||
|
||||
unsigned long thread_saved_pc(struct task_struct *t)
|
||||
{
|
||||
return t->thread.regs.kpc;
|
||||
}
|
||||
|
||||
unsigned long
|
||||
get_wchan(struct task_struct *p)
|
||||
{
|
||||
|
|
|
@ -103,6 +103,7 @@ extern int kprobe_exceptions_notify(struct notifier_block *self,
|
|||
extern int kprobe_fault_handler(struct pt_regs *regs, int trapnr);
|
||||
extern int kprobe_handler(struct pt_regs *regs);
|
||||
extern int kprobe_post_handler(struct pt_regs *regs);
|
||||
extern int is_current_kprobe_addr(unsigned long addr);
|
||||
#ifdef CONFIG_KPROBES_ON_FTRACE
|
||||
extern int skip_singlestep(struct kprobe *p, struct pt_regs *regs,
|
||||
struct kprobe_ctlblk *kcb);
|
||||
|
|
|
@ -378,12 +378,6 @@ struct thread_struct {
|
|||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Return saved PC of a blocked thread. For now, this is the "user" PC
|
||||
*/
|
||||
#define thread_saved_pc(tsk) \
|
||||
((tsk)->thread.regs? (tsk)->thread.regs->nip: 0)
|
||||
|
||||
#define task_pt_regs(tsk) ((struct pt_regs *)(tsk)->thread.regs)
|
||||
|
||||
unsigned long get_wchan(struct task_struct *p);
|
||||
|
|
|
@ -1411,10 +1411,8 @@ USE_TEXT_SECTION()
|
|||
.balign IFETCH_ALIGN_BYTES
|
||||
do_hash_page:
|
||||
#ifdef CONFIG_PPC_STD_MMU_64
|
||||
andis. r0,r4,0xa410 /* weird error? */
|
||||
andis. r0,r4,0xa450 /* weird error? */
|
||||
bne- handle_page_fault /* if not, try to insert a HPTE */
|
||||
andis. r0,r4,DSISR_DABRMATCH@h
|
||||
bne- handle_dabr_fault
|
||||
CURRENT_THREAD_INFO(r11, r1)
|
||||
lwz r0,TI_PREEMPT(r11) /* If we're in an "NMI" */
|
||||
andis. r0,r0,NMI_MASK@h /* (i.e. an irq when soft-disabled) */
|
||||
|
@ -1438,11 +1436,16 @@ do_hash_page:
|
|||
|
||||
/* Error */
|
||||
blt- 13f
|
||||
|
||||
/* Reload DSISR into r4 for the DABR check below */
|
||||
ld r4,_DSISR(r1)
|
||||
#endif /* CONFIG_PPC_STD_MMU_64 */
|
||||
|
||||
/* Here we have a page fault that hash_page can't handle. */
|
||||
handle_page_fault:
|
||||
11: ld r4,_DAR(r1)
|
||||
11: andis. r0,r4,DSISR_DABRMATCH@h
|
||||
bne- handle_dabr_fault
|
||||
ld r4,_DAR(r1)
|
||||
ld r5,_DSISR(r1)
|
||||
addi r3,r1,STACK_FRAME_OVERHEAD
|
||||
bl do_page_fault
|
||||
|
|
|
@ -43,6 +43,12 @@ DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
|
|||
|
||||
struct kretprobe_blackpoint kretprobe_blacklist[] = {{NULL, NULL}};
|
||||
|
||||
int is_current_kprobe_addr(unsigned long addr)
|
||||
{
|
||||
struct kprobe *p = kprobe_running();
|
||||
return (p && (unsigned long)p->addr == addr) ? 1 : 0;
|
||||
}
|
||||
|
||||
bool arch_within_kprobe_blacklist(unsigned long addr)
|
||||
{
|
||||
return (addr >= (unsigned long)__kprobes_text_start &&
|
||||
|
@ -617,6 +623,15 @@ int setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
|
|||
regs->gpr[2] = (unsigned long)(((func_descr_t *)jp->entry)->toc);
|
||||
#endif
|
||||
|
||||
/*
|
||||
* jprobes use jprobe_return() which skips the normal return
|
||||
* path of the function, and this messes up the accounting of the
|
||||
* function graph tracer.
|
||||
*
|
||||
* Pause function graph tracing while performing the jprobe function.
|
||||
*/
|
||||
pause_graph_tracing();
|
||||
|
||||
return 1;
|
||||
}
|
||||
NOKPROBE_SYMBOL(setjmp_pre_handler);
|
||||
|
@ -642,6 +657,8 @@ int longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
|
|||
* saved regs...
|
||||
*/
|
||||
memcpy(regs, &kcb->jprobe_saved_regs, sizeof(struct pt_regs));
|
||||
/* It's OK to start function graph tracing again */
|
||||
unpause_graph_tracing();
|
||||
preempt_enable_no_resched();
|
||||
return 1;
|
||||
}
|
||||
|
|
|
@ -615,6 +615,24 @@ void __init exc_lvl_early_init(void)
|
|||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Emergency stacks are used for a range of things, from asynchronous
|
||||
* NMIs (system reset, machine check) to synchronous, process context.
|
||||
* We set preempt_count to zero, even though that isn't necessarily correct. To
|
||||
* get the right value we'd need to copy it from the previous thread_info, but
|
||||
* doing that might fault causing more problems.
|
||||
* TODO: what to do with accounting?
|
||||
*/
|
||||
static void emerg_stack_init_thread_info(struct thread_info *ti, int cpu)
|
||||
{
|
||||
ti->task = NULL;
|
||||
ti->cpu = cpu;
|
||||
ti->preempt_count = 0;
|
||||
ti->local_flags = 0;
|
||||
ti->flags = 0;
|
||||
klp_init_thread_info(ti);
|
||||
}
|
||||
|
||||
/*
|
||||
* Stack space used when we detect a bad kernel stack pointer, and
|
||||
* early in SMP boots before relocation is enabled. Exclusive emergency
|
||||
|
@ -633,24 +651,31 @@ void __init emergency_stack_init(void)
|
|||
* Since we use these as temporary stacks during secondary CPU
|
||||
* bringup, we need to get at them in real mode. This means they
|
||||
* must also be within the RMO region.
|
||||
*
|
||||
* The IRQ stacks allocated elsewhere in this file are zeroed and
|
||||
* initialized in kernel/irq.c. These are initialized here in order
|
||||
* to have emergency stacks available as early as possible.
|
||||
*/
|
||||
limit = min(safe_stack_limit(), ppc64_rma_size);
|
||||
|
||||
for_each_possible_cpu(i) {
|
||||
struct thread_info *ti;
|
||||
ti = __va(memblock_alloc_base(THREAD_SIZE, THREAD_SIZE, limit));
|
||||
klp_init_thread_info(ti);
|
||||
memset(ti, 0, THREAD_SIZE);
|
||||
emerg_stack_init_thread_info(ti, i);
|
||||
paca[i].emergency_sp = (void *)ti + THREAD_SIZE;
|
||||
|
||||
#ifdef CONFIG_PPC_BOOK3S_64
|
||||
/* emergency stack for NMI exception handling. */
|
||||
ti = __va(memblock_alloc_base(THREAD_SIZE, THREAD_SIZE, limit));
|
||||
klp_init_thread_info(ti);
|
||||
memset(ti, 0, THREAD_SIZE);
|
||||
emerg_stack_init_thread_info(ti, i);
|
||||
paca[i].nmi_emergency_sp = (void *)ti + THREAD_SIZE;
|
||||
|
||||
/* emergency stack for machine check exception handling. */
|
||||
ti = __va(memblock_alloc_base(THREAD_SIZE, THREAD_SIZE, limit));
|
||||
klp_init_thread_info(ti);
|
||||
memset(ti, 0, THREAD_SIZE);
|
||||
emerg_stack_init_thread_info(ti, i);
|
||||
paca[i].mc_emergency_sp = (void *)ti + THREAD_SIZE;
|
||||
#endif
|
||||
}
|
||||
|
|
|
@ -45,10 +45,14 @@ _GLOBAL(ftrace_caller)
|
|||
stdu r1,-SWITCH_FRAME_SIZE(r1)
|
||||
|
||||
/* Save all gprs to pt_regs */
|
||||
SAVE_8GPRS(0,r1)
|
||||
SAVE_8GPRS(8,r1)
|
||||
SAVE_8GPRS(16,r1)
|
||||
SAVE_8GPRS(24,r1)
|
||||
SAVE_GPR(0, r1)
|
||||
SAVE_10GPRS(2, r1)
|
||||
SAVE_10GPRS(12, r1)
|
||||
SAVE_10GPRS(22, r1)
|
||||
|
||||
/* Save previous stack pointer (r1) */
|
||||
addi r8, r1, SWITCH_FRAME_SIZE
|
||||
std r8, GPR1(r1)
|
||||
|
||||
/* Load special regs for save below */
|
||||
mfmsr r8
|
||||
|
@ -95,18 +99,44 @@ ftrace_call:
|
|||
bl ftrace_stub
|
||||
nop
|
||||
|
||||
/* Load ctr with the possibly modified NIP */
|
||||
ld r3, _NIP(r1)
|
||||
mtctr r3
|
||||
/* Load the possibly modified NIP */
|
||||
ld r15, _NIP(r1)
|
||||
|
||||
#ifdef CONFIG_LIVEPATCH
|
||||
cmpd r14,r3 /* has NIP been altered? */
|
||||
cmpd r14, r15 /* has NIP been altered? */
|
||||
#endif
|
||||
|
||||
#if defined(CONFIG_LIVEPATCH) && defined(CONFIG_KPROBES_ON_FTRACE)
|
||||
/* NIP has not been altered, skip over further checks */
|
||||
beq 1f
|
||||
|
||||
/* Check if there is an active kprobe on us */
|
||||
subi r3, r14, 4
|
||||
bl is_current_kprobe_addr
|
||||
nop
|
||||
|
||||
/*
|
||||
* If r3 == 1, then this is a kprobe/jprobe.
|
||||
* else, this is livepatched function.
|
||||
*
|
||||
* The conditional branch for livepatch_handler below will use the
|
||||
* result of this comparison. For kprobe/jprobe, we just need to branch to
|
||||
* the new NIP, not call livepatch_handler. The branch below is bne, so we
|
||||
* want CR0[EQ] to be true if this is a kprobe/jprobe. Which means we want
|
||||
* CR0[EQ] = (r3 == 1).
|
||||
*/
|
||||
cmpdi r3, 1
|
||||
1:
|
||||
#endif
|
||||
|
||||
/* Load CTR with the possibly modified NIP */
|
||||
mtctr r15
|
||||
|
||||
/* Restore gprs */
|
||||
REST_8GPRS(0,r1)
|
||||
REST_8GPRS(8,r1)
|
||||
REST_8GPRS(16,r1)
|
||||
REST_8GPRS(24,r1)
|
||||
REST_GPR(0,r1)
|
||||
REST_10GPRS(2,r1)
|
||||
REST_10GPRS(12,r1)
|
||||
REST_10GPRS(22,r1)
|
||||
|
||||
/* Restore possibly modified LR */
|
||||
ld r0, _LINK(r1)
|
||||
|
@ -119,7 +149,10 @@ ftrace_call:
|
|||
addi r1, r1, SWITCH_FRAME_SIZE
|
||||
|
||||
#ifdef CONFIG_LIVEPATCH
|
||||
/* Based on the cmpd above, if the NIP was altered handle livepatch */
|
||||
/*
|
||||
* Based on the cmpd or cmpdi above, if the NIP was altered and we're
|
||||
* not on a kprobe/jprobe, then handle livepatch.
|
||||
*/
|
||||
bne- livepatch_handler
|
||||
#endif
|
||||
|
||||
|
|
|
@ -1486,6 +1486,14 @@ static int kvmppc_set_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
|
|||
r = set_vpa(vcpu, &vcpu->arch.dtl, addr, len);
|
||||
break;
|
||||
case KVM_REG_PPC_TB_OFFSET:
|
||||
/*
|
||||
* POWER9 DD1 has an erratum where writing TBU40 causes
|
||||
* the timebase to lose ticks. So we don't let the
|
||||
* timebase offset be changed on P9 DD1. (It is
|
||||
* initialized to zero.)
|
||||
*/
|
||||
if (cpu_has_feature(CPU_FTR_POWER9_DD1))
|
||||
break;
|
||||
/* round up to multiple of 2^24 */
|
||||
vcpu->arch.vcore->tb_offset =
|
||||
ALIGN(set_reg_val(id, *val), 1UL << 24);
|
||||
|
@ -2907,12 +2915,36 @@ static int kvmppc_vcpu_run_hv(struct kvm_run *run, struct kvm_vcpu *vcpu)
|
|||
{
|
||||
int r;
|
||||
int srcu_idx;
|
||||
unsigned long ebb_regs[3] = {}; /* shut up GCC */
|
||||
unsigned long user_tar = 0;
|
||||
unsigned int user_vrsave;
|
||||
|
||||
if (!vcpu->arch.sane) {
|
||||
run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/*
|
||||
* Don't allow entry with a suspended transaction, because
|
||||
* the guest entry/exit code will lose it.
|
||||
* If the guest has TM enabled, save away their TM-related SPRs
|
||||
* (they will get restored by the TM unavailable interrupt).
|
||||
*/
|
||||
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
|
||||
if (cpu_has_feature(CPU_FTR_TM) && current->thread.regs &&
|
||||
(current->thread.regs->msr & MSR_TM)) {
|
||||
if (MSR_TM_ACTIVE(current->thread.regs->msr)) {
|
||||
run->exit_reason = KVM_EXIT_FAIL_ENTRY;
|
||||
run->fail_entry.hardware_entry_failure_reason = 0;
|
||||
return -EINVAL;
|
||||
}
|
||||
current->thread.tm_tfhar = mfspr(SPRN_TFHAR);
|
||||
current->thread.tm_tfiar = mfspr(SPRN_TFIAR);
|
||||
current->thread.tm_texasr = mfspr(SPRN_TEXASR);
|
||||
current->thread.regs->msr &= ~MSR_TM;
|
||||
}
|
||||
#endif
|
||||
|
||||
kvmppc_core_prepare_to_enter(vcpu);
|
||||
|
||||
/* No need to go into the guest when all we'll do is come back out */
|
||||
|
@ -2934,6 +2966,15 @@ static int kvmppc_vcpu_run_hv(struct kvm_run *run, struct kvm_vcpu *vcpu)
|
|||
|
||||
flush_all_to_thread(current);
|
||||
|
||||
/* Save userspace EBB and other register values */
|
||||
if (cpu_has_feature(CPU_FTR_ARCH_207S)) {
|
||||
ebb_regs[0] = mfspr(SPRN_EBBHR);
|
||||
ebb_regs[1] = mfspr(SPRN_EBBRR);
|
||||
ebb_regs[2] = mfspr(SPRN_BESCR);
|
||||
user_tar = mfspr(SPRN_TAR);
|
||||
}
|
||||
user_vrsave = mfspr(SPRN_VRSAVE);
|
||||
|
||||
vcpu->arch.wqp = &vcpu->arch.vcore->wq;
|
||||
vcpu->arch.pgdir = current->mm->pgd;
|
||||
vcpu->arch.state = KVMPPC_VCPU_BUSY_IN_HOST;
|
||||
|
@ -2960,6 +3001,16 @@ static int kvmppc_vcpu_run_hv(struct kvm_run *run, struct kvm_vcpu *vcpu)
|
|||
}
|
||||
} while (is_kvmppc_resume_guest(r));
|
||||
|
||||
/* Restore userspace EBB and other register values */
|
||||
if (cpu_has_feature(CPU_FTR_ARCH_207S)) {
|
||||
mtspr(SPRN_EBBHR, ebb_regs[0]);
|
||||
mtspr(SPRN_EBBRR, ebb_regs[1]);
|
||||
mtspr(SPRN_BESCR, ebb_regs[2]);
|
||||
mtspr(SPRN_TAR, user_tar);
|
||||
mtspr(SPRN_FSCR, current->thread.fscr);
|
||||
}
|
||||
mtspr(SPRN_VRSAVE, user_vrsave);
|
||||
|
||||
out:
|
||||
vcpu->arch.state = KVMPPC_VCPU_NOTREADY;
|
||||
atomic_dec(&vcpu->kvm->arch.vcpus_running);
|
||||
|
|
|
@ -121,10 +121,20 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
|
|||
* Put whatever is in the decrementer into the
|
||||
* hypervisor decrementer.
|
||||
*/
|
||||
BEGIN_FTR_SECTION
|
||||
ld r5, HSTATE_KVM_VCORE(r13)
|
||||
ld r6, VCORE_KVM(r5)
|
||||
ld r9, KVM_HOST_LPCR(r6)
|
||||
andis. r9, r9, LPCR_LD@h
|
||||
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
|
||||
mfspr r8,SPRN_DEC
|
||||
mftb r7
|
||||
mtspr SPRN_HDEC,r8
|
||||
BEGIN_FTR_SECTION
|
||||
/* On POWER9, don't sign-extend if host LPCR[LD] bit is set */
|
||||
bne 32f
|
||||
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
|
||||
extsw r8,r8
|
||||
32: mtspr SPRN_HDEC,r8
|
||||
add r8,r8,r7
|
||||
std r8,HSTATE_DECEXP(r13)
|
||||
|
||||
|
|
|
@ -32,12 +32,29 @@
|
|||
#include <asm/opal.h>
|
||||
#include <asm/xive-regs.h>
|
||||
|
||||
/* Sign-extend HDEC if not on POWER9 */
|
||||
#define EXTEND_HDEC(reg) \
|
||||
BEGIN_FTR_SECTION; \
|
||||
extsw reg, reg; \
|
||||
END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
|
||||
|
||||
#define VCPU_GPRS_TM(reg) (((reg) * ULONG_SIZE) + VCPU_GPR_TM)
|
||||
|
||||
/* Values in HSTATE_NAPPING(r13) */
|
||||
#define NAPPING_CEDE 1
|
||||
#define NAPPING_NOVCPU 2
|
||||
|
||||
/* Stack frame offsets for kvmppc_hv_entry */
|
||||
#define SFS 144
|
||||
#define STACK_SLOT_TRAP (SFS-4)
|
||||
#define STACK_SLOT_TID (SFS-16)
|
||||
#define STACK_SLOT_PSSCR (SFS-24)
|
||||
#define STACK_SLOT_PID (SFS-32)
|
||||
#define STACK_SLOT_IAMR (SFS-40)
|
||||
#define STACK_SLOT_CIABR (SFS-48)
|
||||
#define STACK_SLOT_DAWR (SFS-56)
|
||||
#define STACK_SLOT_DAWRX (SFS-64)
|
||||
|
||||
/*
|
||||
* Call kvmppc_hv_entry in real mode.
|
||||
* Must be called with interrupts hard-disabled.
|
||||
|
@ -214,6 +231,8 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
|
|||
kvmppc_primary_no_guest:
|
||||
/* We handle this much like a ceded vcpu */
|
||||
/* put the HDEC into the DEC, since HDEC interrupts don't wake us */
|
||||
/* HDEC may be larger than DEC for arch >= v3.00, but since the */
|
||||
/* HDEC value came from DEC in the first place, it will fit */
|
||||
mfspr r3, SPRN_HDEC
|
||||
mtspr SPRN_DEC, r3
|
||||
/*
|
||||
|
@ -295,8 +314,9 @@ kvm_novcpu_wakeup:
|
|||
|
||||
/* See if our timeslice has expired (HDEC is negative) */
|
||||
mfspr r0, SPRN_HDEC
|
||||
EXTEND_HDEC(r0)
|
||||
li r12, BOOK3S_INTERRUPT_HV_DECREMENTER
|
||||
cmpwi r0, 0
|
||||
cmpdi r0, 0
|
||||
blt kvm_novcpu_exit
|
||||
|
||||
/* Got an IPI but other vcpus aren't yet exiting, must be a latecomer */
|
||||
|
@ -319,10 +339,10 @@ kvm_novcpu_exit:
|
|||
bl kvmhv_accumulate_time
|
||||
#endif
|
||||
13: mr r3, r12
|
||||
stw r12, 112-4(r1)
|
||||
stw r12, STACK_SLOT_TRAP(r1)
|
||||
bl kvmhv_commence_exit
|
||||
nop
|
||||
lwz r12, 112-4(r1)
|
||||
lwz r12, STACK_SLOT_TRAP(r1)
|
||||
b kvmhv_switch_to_host
|
||||
|
||||
/*
|
||||
|
@ -390,8 +410,8 @@ kvm_secondary_got_guest:
|
|||
lbz r4, HSTATE_PTID(r13)
|
||||
cmpwi r4, 0
|
||||
bne 63f
|
||||
lis r6, 0x7fff
|
||||
ori r6, r6, 0xffff
|
||||
LOAD_REG_ADDR(r6, decrementer_max)
|
||||
ld r6, 0(r6)
|
||||
mtspr SPRN_HDEC, r6
|
||||
/* and set per-LPAR registers, if doing dynamic micro-threading */
|
||||
ld r6, HSTATE_SPLIT_MODE(r13)
|
||||
|
@ -545,11 +565,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
|
|||
* *
|
||||
*****************************************************************************/
|
||||
|
||||
/* Stack frame offsets */
|
||||
#define STACK_SLOT_TID (112-16)
|
||||
#define STACK_SLOT_PSSCR (112-24)
|
||||
#define STACK_SLOT_PID (112-32)
|
||||
|
||||
.global kvmppc_hv_entry
|
||||
kvmppc_hv_entry:
|
||||
|
||||
|
@ -565,7 +580,7 @@ kvmppc_hv_entry:
|
|||
*/
|
||||
mflr r0
|
||||
std r0, PPC_LR_STKOFF(r1)
|
||||
stdu r1, -112(r1)
|
||||
stdu r1, -SFS(r1)
|
||||
|
||||
/* Save R1 in the PACA */
|
||||
std r1, HSTATE_HOST_R1(r13)
|
||||
|
@ -749,10 +764,20 @@ BEGIN_FTR_SECTION
|
|||
mfspr r5, SPRN_TIDR
|
||||
mfspr r6, SPRN_PSSCR
|
||||
mfspr r7, SPRN_PID
|
||||
mfspr r8, SPRN_IAMR
|
||||
std r5, STACK_SLOT_TID(r1)
|
||||
std r6, STACK_SLOT_PSSCR(r1)
|
||||
std r7, STACK_SLOT_PID(r1)
|
||||
std r8, STACK_SLOT_IAMR(r1)
|
||||
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
|
||||
BEGIN_FTR_SECTION
|
||||
mfspr r5, SPRN_CIABR
|
||||
mfspr r6, SPRN_DAWR
|
||||
mfspr r7, SPRN_DAWRX
|
||||
std r5, STACK_SLOT_CIABR(r1)
|
||||
std r6, STACK_SLOT_DAWR(r1)
|
||||
std r7, STACK_SLOT_DAWRX(r1)
|
||||
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
|
||||
|
||||
BEGIN_FTR_SECTION
|
||||
/* Set partition DABR */
|
||||
|
@ -968,7 +993,8 @@ ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300)
|
|||
|
||||
/* Check if HDEC expires soon */
|
||||
mfspr r3, SPRN_HDEC
|
||||
cmpwi r3, 512 /* 1 microsecond */
|
||||
EXTEND_HDEC(r3)
|
||||
cmpdi r3, 512 /* 1 microsecond */
|
||||
blt hdec_soon
|
||||
|
||||
#ifdef CONFIG_KVM_XICS
|
||||
|
@ -1505,11 +1531,10 @@ ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300)
|
|||
* set by the guest could disrupt the host.
|
||||
*/
|
||||
li r0, 0
|
||||
mtspr SPRN_IAMR, r0
|
||||
mtspr SPRN_CIABR, r0
|
||||
mtspr SPRN_DAWRX, r0
|
||||
mtspr SPRN_PSPB, r0
|
||||
mtspr SPRN_WORT, r0
|
||||
BEGIN_FTR_SECTION
|
||||
mtspr SPRN_IAMR, r0
|
||||
mtspr SPRN_TCSCR, r0
|
||||
/* Set MMCRS to 1<<31 to freeze and disable the SPMC counters */
|
||||
li r0, 1
|
||||
|
@ -1525,6 +1550,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
|
|||
std r6,VCPU_UAMOR(r9)
|
||||
li r6,0
|
||||
mtspr SPRN_AMR,r6
|
||||
mtspr SPRN_UAMOR, r6
|
||||
|
||||
/* Switch DSCR back to host value */
|
||||
mfspr r8, SPRN_DSCR
|
||||
|
@ -1669,13 +1695,23 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
|
|||
ptesync
|
||||
|
||||
/* Restore host values of some registers */
|
||||
BEGIN_FTR_SECTION
|
||||
ld r5, STACK_SLOT_CIABR(r1)
|
||||
ld r6, STACK_SLOT_DAWR(r1)
|
||||
ld r7, STACK_SLOT_DAWRX(r1)
|
||||
mtspr SPRN_CIABR, r5
|
||||
mtspr SPRN_DAWR, r6
|
||||
mtspr SPRN_DAWRX, r7
|
||||
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
|
||||
BEGIN_FTR_SECTION
|
||||
ld r5, STACK_SLOT_TID(r1)
|
||||
ld r6, STACK_SLOT_PSSCR(r1)
|
||||
ld r7, STACK_SLOT_PID(r1)
|
||||
ld r8, STACK_SLOT_IAMR(r1)
|
||||
mtspr SPRN_TIDR, r5
|
||||
mtspr SPRN_PSSCR, r6
|
||||
mtspr SPRN_PID, r7
|
||||
mtspr SPRN_IAMR, r8
|
||||
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
|
||||
BEGIN_FTR_SECTION
|
||||
PPC_INVALIDATE_ERAT
|
||||
|
@ -1819,8 +1855,8 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_RADIX)
|
|||
li r0, KVM_GUEST_MODE_NONE
|
||||
stb r0, HSTATE_IN_GUEST(r13)
|
||||
|
||||
ld r0, 112+PPC_LR_STKOFF(r1)
|
||||
addi r1, r1, 112
|
||||
ld r0, SFS+PPC_LR_STKOFF(r1)
|
||||
addi r1, r1, SFS
|
||||
mtlr r0
|
||||
blr
|
||||
|
||||
|
@ -2366,12 +2402,13 @@ END_FTR_SECTION_IFSET(CPU_FTR_TM)
|
|||
mfspr r3, SPRN_DEC
|
||||
mfspr r4, SPRN_HDEC
|
||||
mftb r5
|
||||
cmpw r3, r4
|
||||
extsw r3, r3
|
||||
EXTEND_HDEC(r4)
|
||||
cmpd r3, r4
|
||||
ble 67f
|
||||
mtspr SPRN_DEC, r4
|
||||
67:
|
||||
/* save expiry time of guest decrementer */
|
||||
extsw r3, r3
|
||||
add r3, r3, r5
|
||||
ld r4, HSTATE_KVM_VCPU(r13)
|
||||
ld r5, HSTATE_KVM_VCORE(r13)
|
||||
|
|
|
@ -101,5 +101,6 @@ void perf_get_regs_user(struct perf_regs *regs_user,
|
|||
struct pt_regs *regs_user_copy)
|
||||
{
|
||||
regs_user->regs = task_pt_regs(current);
|
||||
regs_user->abi = perf_reg_abi(current);
|
||||
regs_user->abi = (regs_user->regs) ? perf_reg_abi(current) :
|
||||
PERF_SAMPLE_REGS_ABI_NONE;
|
||||
}
|
||||
|
|
|
@ -449,7 +449,7 @@ static int mmio_launch_invalidate(struct npu *npu, unsigned long launch,
|
|||
return mmio_atsd_reg;
|
||||
}
|
||||
|
||||
static int mmio_invalidate_pid(struct npu *npu, unsigned long pid)
|
||||
static int mmio_invalidate_pid(struct npu *npu, unsigned long pid, bool flush)
|
||||
{
|
||||
unsigned long launch;
|
||||
|
||||
|
@ -465,12 +465,15 @@ static int mmio_invalidate_pid(struct npu *npu, unsigned long pid)
|
|||
/* PID */
|
||||
launch |= pid << PPC_BITLSHIFT(38);
|
||||
|
||||
/* No flush */
|
||||
launch |= !flush << PPC_BITLSHIFT(39);
|
||||
|
||||
/* Invalidating the entire process doesn't use a va */
|
||||
return mmio_launch_invalidate(npu, launch, 0);
|
||||
}
|
||||
|
||||
static int mmio_invalidate_va(struct npu *npu, unsigned long va,
|
||||
unsigned long pid)
|
||||
unsigned long pid, bool flush)
|
||||
{
|
||||
unsigned long launch;
|
||||
|
||||
|
@ -486,26 +489,60 @@ static int mmio_invalidate_va(struct npu *npu, unsigned long va,
|
|||
/* PID */
|
||||
launch |= pid << PPC_BITLSHIFT(38);
|
||||
|
||||
/* No flush */
|
||||
launch |= !flush << PPC_BITLSHIFT(39);
|
||||
|
||||
return mmio_launch_invalidate(npu, launch, va);
|
||||
}
|
||||
|
||||
#define mn_to_npu_context(x) container_of(x, struct npu_context, mn)
|
||||
|
||||
struct mmio_atsd_reg {
|
||||
struct npu *npu;
|
||||
int reg;
|
||||
};
|
||||
|
||||
static void mmio_invalidate_wait(
|
||||
struct mmio_atsd_reg mmio_atsd_reg[NV_MAX_NPUS], bool flush)
|
||||
{
|
||||
struct npu *npu;
|
||||
int i, reg;
|
||||
|
||||
/* Wait for all invalidations to complete */
|
||||
for (i = 0; i <= max_npu2_index; i++) {
|
||||
if (mmio_atsd_reg[i].reg < 0)
|
||||
continue;
|
||||
|
||||
/* Wait for completion */
|
||||
npu = mmio_atsd_reg[i].npu;
|
||||
reg = mmio_atsd_reg[i].reg;
|
||||
while (__raw_readq(npu->mmio_atsd_regs[reg] + XTS_ATSD_STAT))
|
||||
cpu_relax();
|
||||
|
||||
put_mmio_atsd_reg(npu, reg);
|
||||
|
||||
/*
|
||||
* The GPU requires two flush ATSDs to ensure all entries have
|
||||
* been flushed. We use PID 0 as it will never be used for a
|
||||
* process on the GPU.
|
||||
*/
|
||||
if (flush)
|
||||
mmio_invalidate_pid(npu, 0, true);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Invalidate either a single address or an entire PID depending on
|
||||
* the value of va.
|
||||
*/
|
||||
static void mmio_invalidate(struct npu_context *npu_context, int va,
|
||||
unsigned long address)
|
||||
unsigned long address, bool flush)
|
||||
{
|
||||
int i, j, reg;
|
||||
int i, j;
|
||||
struct npu *npu;
|
||||
struct pnv_phb *nphb;
|
||||
struct pci_dev *npdev;
|
||||
struct {
|
||||
struct npu *npu;
|
||||
int reg;
|
||||
} mmio_atsd_reg[NV_MAX_NPUS];
|
||||
struct mmio_atsd_reg mmio_atsd_reg[NV_MAX_NPUS];
|
||||
unsigned long pid = npu_context->mm->context.id;
|
||||
|
||||
/*
|
||||
|
@ -525,10 +562,11 @@ static void mmio_invalidate(struct npu_context *npu_context, int va,
|
|||
|
||||
if (va)
|
||||
mmio_atsd_reg[i].reg =
|
||||
mmio_invalidate_va(npu, address, pid);
|
||||
mmio_invalidate_va(npu, address, pid,
|
||||
flush);
|
||||
else
|
||||
mmio_atsd_reg[i].reg =
|
||||
mmio_invalidate_pid(npu, pid);
|
||||
mmio_invalidate_pid(npu, pid, flush);
|
||||
|
||||
/*
|
||||
* The NPU hardware forwards the shootdown to all GPUs
|
||||
|
@ -544,18 +582,10 @@ static void mmio_invalidate(struct npu_context *npu_context, int va,
|
|||
*/
|
||||
flush_tlb_mm(npu_context->mm);
|
||||
|
||||
/* Wait for all invalidations to complete */
|
||||
for (i = 0; i <= max_npu2_index; i++) {
|
||||
if (mmio_atsd_reg[i].reg < 0)
|
||||
continue;
|
||||
|
||||
/* Wait for completion */
|
||||
npu = mmio_atsd_reg[i].npu;
|
||||
reg = mmio_atsd_reg[i].reg;
|
||||
while (__raw_readq(npu->mmio_atsd_regs[reg] + XTS_ATSD_STAT))
|
||||
cpu_relax();
|
||||
put_mmio_atsd_reg(npu, reg);
|
||||
}
|
||||
mmio_invalidate_wait(mmio_atsd_reg, flush);
|
||||
if (flush)
|
||||
/* Wait for the flush to complete */
|
||||
mmio_invalidate_wait(mmio_atsd_reg, false);
|
||||
}
|
||||
|
||||
static void pnv_npu2_mn_release(struct mmu_notifier *mn,
|
||||
|
@ -571,7 +601,7 @@ static void pnv_npu2_mn_release(struct mmu_notifier *mn,
|
|||
* There should be no more translation requests for this PID, but we
|
||||
* need to ensure any entries for it are removed from the TLB.
|
||||
*/
|
||||
mmio_invalidate(npu_context, 0, 0);
|
||||
mmio_invalidate(npu_context, 0, 0, true);
|
||||
}
|
||||
|
||||
static void pnv_npu2_mn_change_pte(struct mmu_notifier *mn,
|
||||
|
@ -581,7 +611,7 @@ static void pnv_npu2_mn_change_pte(struct mmu_notifier *mn,
|
|||
{
|
||||
struct npu_context *npu_context = mn_to_npu_context(mn);
|
||||
|
||||
mmio_invalidate(npu_context, 1, address);
|
||||
mmio_invalidate(npu_context, 1, address, true);
|
||||
}
|
||||
|
||||
static void pnv_npu2_mn_invalidate_page(struct mmu_notifier *mn,
|
||||
|
@ -590,7 +620,7 @@ static void pnv_npu2_mn_invalidate_page(struct mmu_notifier *mn,
|
|||
{
|
||||
struct npu_context *npu_context = mn_to_npu_context(mn);
|
||||
|
||||
mmio_invalidate(npu_context, 1, address);
|
||||
mmio_invalidate(npu_context, 1, address, true);
|
||||
}
|
||||
|
||||
static void pnv_npu2_mn_invalidate_range(struct mmu_notifier *mn,
|
||||
|
@ -600,8 +630,11 @@ static void pnv_npu2_mn_invalidate_range(struct mmu_notifier *mn,
|
|||
struct npu_context *npu_context = mn_to_npu_context(mn);
|
||||
unsigned long address;
|
||||
|
||||
for (address = start; address <= end; address += PAGE_SIZE)
|
||||
mmio_invalidate(npu_context, 1, address);
|
||||
for (address = start; address < end; address += PAGE_SIZE)
|
||||
mmio_invalidate(npu_context, 1, address, false);
|
||||
|
||||
/* Do the flush only on the final addess == end */
|
||||
mmio_invalidate(npu_context, 1, address, true);
|
||||
}
|
||||
|
||||
static const struct mmu_notifier_ops nv_nmmu_notifier_ops = {
|
||||
|
@ -651,8 +684,11 @@ struct npu_context *pnv_npu2_init_context(struct pci_dev *gpdev,
|
|||
/* No nvlink associated with this GPU device */
|
||||
return ERR_PTR(-ENODEV);
|
||||
|
||||
if (!mm) {
|
||||
/* kernel thread contexts are not supported */
|
||||
if (!mm || mm->context.id == 0) {
|
||||
/*
|
||||
* Kernel thread contexts are not supported and context id 0 is
|
||||
* reserved on the GPU.
|
||||
*/
|
||||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
|
||||
|
|
|
@ -221,11 +221,6 @@ extern void release_thread(struct task_struct *);
|
|||
/* Free guarded storage control block for current */
|
||||
void exit_thread_gs(void);
|
||||
|
||||
/*
|
||||
* Return saved PC of a blocked thread.
|
||||
*/
|
||||
extern unsigned long thread_saved_pc(struct task_struct *t);
|
||||
|
||||
unsigned long get_wchan(struct task_struct *p);
|
||||
#define task_pt_regs(tsk) ((struct pt_regs *) \
|
||||
(task_stack_page(tsk) + THREAD_SIZE) - 1)
|
||||
|
|
|
@ -564,8 +564,6 @@ static struct kset *ipl_kset;
|
|||
|
||||
static void __ipl_run(void *unused)
|
||||
{
|
||||
if (MACHINE_IS_LPAR && ipl_info.type == IPL_TYPE_CCW)
|
||||
diag308(DIAG308_LOAD_NORMAL_DUMP, NULL);
|
||||
diag308(DIAG308_LOAD_CLEAR, NULL);
|
||||
if (MACHINE_IS_VM)
|
||||
__cpcmd("IPL", NULL, 0, NULL);
|
||||
|
@ -1088,10 +1086,7 @@ static void __reipl_run(void *unused)
|
|||
break;
|
||||
case REIPL_METHOD_CCW_DIAG:
|
||||
diag308(DIAG308_SET, reipl_block_ccw);
|
||||
if (MACHINE_IS_LPAR)
|
||||
diag308(DIAG308_LOAD_NORMAL_DUMP, NULL);
|
||||
else
|
||||
diag308(DIAG308_LOAD_CLEAR, NULL);
|
||||
diag308(DIAG308_LOAD_CLEAR, NULL);
|
||||
break;
|
||||
case REIPL_METHOD_FCP_RW_DIAG:
|
||||
diag308(DIAG308_SET, reipl_block_fcp);
|
||||
|
|
|
@ -41,31 +41,6 @@
|
|||
|
||||
asmlinkage void ret_from_fork(void) asm ("ret_from_fork");
|
||||
|
||||
/*
|
||||
* Return saved PC of a blocked thread. used in kernel/sched.
|
||||
* resume in entry.S does not create a new stack frame, it
|
||||
* just stores the registers %r6-%r15 to the frame given by
|
||||
* schedule. We want to return the address of the caller of
|
||||
* schedule, so we have to walk the backchain one time to
|
||||
* find the frame schedule() store its return address.
|
||||
*/
|
||||
unsigned long thread_saved_pc(struct task_struct *tsk)
|
||||
{
|
||||
struct stack_frame *sf, *low, *high;
|
||||
|
||||
if (!tsk || !task_stack_page(tsk))
|
||||
return 0;
|
||||
low = task_stack_page(tsk);
|
||||
high = (struct stack_frame *) task_pt_regs(tsk);
|
||||
sf = (struct stack_frame *) tsk->thread.ksp;
|
||||
if (sf <= low || sf > high)
|
||||
return 0;
|
||||
sf = (struct stack_frame *) sf->back_chain;
|
||||
if (sf <= low || sf > high)
|
||||
return 0;
|
||||
return sf->gprs[8];
|
||||
}
|
||||
|
||||
extern void kernel_thread_starter(void);
|
||||
|
||||
/*
|
||||
|
|
|
@ -977,11 +977,12 @@ static int kvm_s390_shadow_tables(struct gmap *sg, unsigned long saddr,
|
|||
ptr = asce.origin * 4096;
|
||||
if (asce.r) {
|
||||
*fake = 1;
|
||||
ptr = 0;
|
||||
asce.dt = ASCE_TYPE_REGION1;
|
||||
}
|
||||
switch (asce.dt) {
|
||||
case ASCE_TYPE_REGION1:
|
||||
if (vaddr.rfx01 > asce.tl && !asce.r)
|
||||
if (vaddr.rfx01 > asce.tl && !*fake)
|
||||
return PGM_REGION_FIRST_TRANS;
|
||||
break;
|
||||
case ASCE_TYPE_REGION2:
|
||||
|
@ -1009,8 +1010,7 @@ static int kvm_s390_shadow_tables(struct gmap *sg, unsigned long saddr,
|
|||
union region1_table_entry rfte;
|
||||
|
||||
if (*fake) {
|
||||
/* offset in 16EB guest memory block */
|
||||
ptr = ptr + ((unsigned long) vaddr.rsx << 53UL);
|
||||
ptr += (unsigned long) vaddr.rfx << 53;
|
||||
rfte.val = ptr;
|
||||
goto shadow_r2t;
|
||||
}
|
||||
|
@ -1036,8 +1036,7 @@ static int kvm_s390_shadow_tables(struct gmap *sg, unsigned long saddr,
|
|||
union region2_table_entry rste;
|
||||
|
||||
if (*fake) {
|
||||
/* offset in 8PB guest memory block */
|
||||
ptr = ptr + ((unsigned long) vaddr.rtx << 42UL);
|
||||
ptr += (unsigned long) vaddr.rsx << 42;
|
||||
rste.val = ptr;
|
||||
goto shadow_r3t;
|
||||
}
|
||||
|
@ -1064,8 +1063,7 @@ static int kvm_s390_shadow_tables(struct gmap *sg, unsigned long saddr,
|
|||
union region3_table_entry rtte;
|
||||
|
||||
if (*fake) {
|
||||
/* offset in 4TB guest memory block */
|
||||
ptr = ptr + ((unsigned long) vaddr.sx << 31UL);
|
||||
ptr += (unsigned long) vaddr.rtx << 31;
|
||||
rtte.val = ptr;
|
||||
goto shadow_sgt;
|
||||
}
|
||||
|
@ -1101,8 +1099,7 @@ static int kvm_s390_shadow_tables(struct gmap *sg, unsigned long saddr,
|
|||
union segment_table_entry ste;
|
||||
|
||||
if (*fake) {
|
||||
/* offset in 2G guest memory block */
|
||||
ptr = ptr + ((unsigned long) vaddr.sx << 20UL);
|
||||
ptr += (unsigned long) vaddr.sx << 20;
|
||||
ste.val = ptr;
|
||||
goto shadow_pgt;
|
||||
}
|
||||
|
|
|
@ -13,7 +13,6 @@ struct task_struct;
|
|||
*/
|
||||
extern void (*cpu_wait)(void);
|
||||
|
||||
extern unsigned long thread_saved_pc(struct task_struct *tsk);
|
||||
extern void start_thread(struct pt_regs *regs,
|
||||
unsigned long pc, unsigned long sp);
|
||||
extern unsigned long get_wchan(struct task_struct *p);
|
||||
|
|
|
@ -101,11 +101,6 @@ int dump_fpu(struct pt_regs *regs, elf_fpregset_t *r)
|
|||
return 1;
|
||||
}
|
||||
|
||||
unsigned long thread_saved_pc(struct task_struct *tsk)
|
||||
{
|
||||
return task_pt_regs(tsk)->cp0_epc;
|
||||
}
|
||||
|
||||
unsigned long get_wchan(struct task_struct *task)
|
||||
{
|
||||
if (!task || task == current || task->state == TASK_RUNNING)
|
||||
|
|
|
@ -67,9 +67,6 @@ struct thread_struct {
|
|||
.current_ds = KERNEL_DS, \
|
||||
}
|
||||
|
||||
/* Return saved PC of a blocked thread. */
|
||||
unsigned long thread_saved_pc(struct task_struct *t);
|
||||
|
||||
/* Do necessary setup to start up a newly executed thread. */
|
||||
static inline void start_thread(struct pt_regs * regs, unsigned long pc,
|
||||
unsigned long sp)
|
||||
|
|
|
@ -89,9 +89,7 @@ struct thread_struct {
|
|||
#include <linux/types.h>
|
||||
#include <asm/fpumacro.h>
|
||||
|
||||
/* Return saved PC of a blocked thread. */
|
||||
struct task_struct;
|
||||
unsigned long thread_saved_pc(struct task_struct *);
|
||||
|
||||
/* On Uniprocessor, even in RMO processes see TSO semantics */
|
||||
#ifdef CONFIG_SMP
|
||||
|
|
|
@ -176,14 +176,6 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
|
|||
printk("\n");
|
||||
}
|
||||
|
||||
/*
|
||||
* Note: sparc64 has a pretty intricated thread_saved_pc, check it out.
|
||||
*/
|
||||
unsigned long thread_saved_pc(struct task_struct *tsk)
|
||||
{
|
||||
return task_thread_info(tsk)->kpc;
|
||||
}
|
||||
|
||||
/*
|
||||
* Free current thread data structures etc..
|
||||
*/
|
||||
|
|
|
@ -400,25 +400,6 @@ core_initcall(sparc_sysrq_init);
|
|||
|
||||
#endif
|
||||
|
||||
unsigned long thread_saved_pc(struct task_struct *tsk)
|
||||
{
|
||||
struct thread_info *ti = task_thread_info(tsk);
|
||||
unsigned long ret = 0xdeadbeefUL;
|
||||
|
||||
if (ti && ti->ksp) {
|
||||
unsigned long *sp;
|
||||
sp = (unsigned long *)(ti->ksp + STACK_BIAS);
|
||||
if (((unsigned long)sp & (sizeof(long) - 1)) == 0UL &&
|
||||
sp[14]) {
|
||||
unsigned long *fp;
|
||||
fp = (unsigned long *)(sp[14] + STACK_BIAS);
|
||||
if (((unsigned long)fp & (sizeof(long) - 1)) == 0UL)
|
||||
ret = fp[15];
|
||||
}
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Free current thread data structures etc.. */
|
||||
void exit_thread(struct task_struct *tsk)
|
||||
{
|
||||
|
|
|
@ -214,13 +214,6 @@ static inline void release_thread(struct task_struct *dead_task)
|
|||
|
||||
extern void prepare_exit_to_usermode(struct pt_regs *regs, u32 flags);
|
||||
|
||||
|
||||
/*
|
||||
* Return saved (kernel) PC of a blocked thread.
|
||||
* Only used in a printk() in kernel/sched/core.c, so don't work too hard.
|
||||
*/
|
||||
#define thread_saved_pc(t) ((t)->thread.pc)
|
||||
|
||||
unsigned long get_wchan(struct task_struct *p);
|
||||
|
||||
/* Return initial ksp value for given task. */
|
||||
|
|
|
@ -58,8 +58,6 @@ static inline void release_thread(struct task_struct *task)
|
|||
{
|
||||
}
|
||||
|
||||
extern unsigned long thread_saved_pc(struct task_struct *t);
|
||||
|
||||
static inline void mm_copy_segments(struct mm_struct *from_mm,
|
||||
struct mm_struct *new_mm)
|
||||
{
|
||||
|
|
|
@ -56,12 +56,6 @@ union thread_union cpu0_irqstack
|
|||
__attribute__((__section__(".data..init_irqstack"))) =
|
||||
{ INIT_THREAD_INFO(init_task) };
|
||||
|
||||
unsigned long thread_saved_pc(struct task_struct *task)
|
||||
{
|
||||
/* FIXME: Need to look up userspace_pid by cpu */
|
||||
return os_process_pc(userspace_pid[0]);
|
||||
}
|
||||
|
||||
/* Changed in setup_arch, which is called in early boot */
|
||||
static char host_info[(__NEW_UTS_LEN + 1) * 5];
|
||||
|
||||
|
|
|
@ -431,11 +431,11 @@ static __initconst const u64 skl_hw_cache_event_ids
|
|||
[ C(DTLB) ] = {
|
||||
[ C(OP_READ) ] = {
|
||||
[ C(RESULT_ACCESS) ] = 0x81d0, /* MEM_INST_RETIRED.ALL_LOADS */
|
||||
[ C(RESULT_MISS) ] = 0x608, /* DTLB_LOAD_MISSES.WALK_COMPLETED */
|
||||
[ C(RESULT_MISS) ] = 0xe08, /* DTLB_LOAD_MISSES.WALK_COMPLETED */
|
||||
},
|
||||
[ C(OP_WRITE) ] = {
|
||||
[ C(RESULT_ACCESS) ] = 0x82d0, /* MEM_INST_RETIRED.ALL_STORES */
|
||||
[ C(RESULT_MISS) ] = 0x649, /* DTLB_STORE_MISSES.WALK_COMPLETED */
|
||||
[ C(RESULT_MISS) ] = 0xe49, /* DTLB_STORE_MISSES.WALK_COMPLETED */
|
||||
},
|
||||
[ C(OP_PREFETCH) ] = {
|
||||
[ C(RESULT_ACCESS) ] = 0x0,
|
||||
|
|
|
@ -296,6 +296,7 @@ struct x86_emulate_ctxt {
|
|||
|
||||
bool perm_ok; /* do not check permissions if true */
|
||||
bool ud; /* inject an #UD if host doesn't support insn */
|
||||
bool tf; /* TF value before instruction (after for syscall/sysret) */
|
||||
|
||||
bool have_exception;
|
||||
struct x86_exception exception;
|
||||
|
|
|
@ -2,8 +2,7 @@
|
|||
#define _ASM_X86_MSHYPER_H
|
||||
|
||||
#include <linux/types.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/clocksource.h>
|
||||
#include <linux/atomic.h>
|
||||
#include <asm/hyperv.h>
|
||||
|
||||
/*
|
||||
|
|
|
@ -860,8 +860,6 @@ extern unsigned long KSTK_ESP(struct task_struct *task);
|
|||
|
||||
#endif /* CONFIG_X86_64 */
|
||||
|
||||
extern unsigned long thread_saved_pc(struct task_struct *tsk);
|
||||
|
||||
extern void start_thread(struct pt_regs *regs, unsigned long new_ip,
|
||||
unsigned long new_sp);
|
||||
|
||||
|
|
|
@ -544,17 +544,6 @@ unsigned long arch_randomize_brk(struct mm_struct *mm)
|
|||
return randomize_page(mm->brk, 0x02000000);
|
||||
}
|
||||
|
||||
/*
|
||||
* Return saved PC of a blocked thread.
|
||||
* What is this good for? it will be always the scheduler or ret_from_fork.
|
||||
*/
|
||||
unsigned long thread_saved_pc(struct task_struct *tsk)
|
||||
{
|
||||
struct inactive_task_frame *frame =
|
||||
(struct inactive_task_frame *) READ_ONCE(tsk->thread.sp);
|
||||
return READ_ONCE_NOCHECK(frame->ret_addr);
|
||||
}
|
||||
|
||||
/*
|
||||
* Called from fs/proc with a reference on @p to find the function
|
||||
* which called into schedule(). This needs to be done carefully
|
||||
|
|
|
@ -2742,6 +2742,7 @@ static int em_syscall(struct x86_emulate_ctxt *ctxt)
|
|||
ctxt->eflags &= ~(X86_EFLAGS_VM | X86_EFLAGS_IF);
|
||||
}
|
||||
|
||||
ctxt->tf = (ctxt->eflags & X86_EFLAGS_TF) != 0;
|
||||
return X86EMUL_CONTINUE;
|
||||
}
|
||||
|
||||
|
|
|
@ -5313,6 +5313,8 @@ static void init_emulate_ctxt(struct kvm_vcpu *vcpu)
|
|||
kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
|
||||
|
||||
ctxt->eflags = kvm_get_rflags(vcpu);
|
||||
ctxt->tf = (ctxt->eflags & X86_EFLAGS_TF) != 0;
|
||||
|
||||
ctxt->eip = kvm_rip_read(vcpu);
|
||||
ctxt->mode = (!is_protmode(vcpu)) ? X86EMUL_MODE_REAL :
|
||||
(ctxt->eflags & X86_EFLAGS_VM) ? X86EMUL_MODE_VM86 :
|
||||
|
@ -5528,36 +5530,25 @@ static int kvm_vcpu_check_hw_bp(unsigned long addr, u32 type, u32 dr7,
|
|||
return dr6;
|
||||
}
|
||||
|
||||
static void kvm_vcpu_check_singlestep(struct kvm_vcpu *vcpu, unsigned long rflags, int *r)
|
||||
static void kvm_vcpu_do_singlestep(struct kvm_vcpu *vcpu, int *r)
|
||||
{
|
||||
struct kvm_run *kvm_run = vcpu->run;
|
||||
|
||||
/*
|
||||
* rflags is the old, "raw" value of the flags. The new value has
|
||||
* not been saved yet.
|
||||
*
|
||||
* This is correct even for TF set by the guest, because "the
|
||||
* processor will not generate this exception after the instruction
|
||||
* that sets the TF flag".
|
||||
*/
|
||||
if (unlikely(rflags & X86_EFLAGS_TF)) {
|
||||
if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) {
|
||||
kvm_run->debug.arch.dr6 = DR6_BS | DR6_FIXED_1 |
|
||||
DR6_RTM;
|
||||
kvm_run->debug.arch.pc = vcpu->arch.singlestep_rip;
|
||||
kvm_run->debug.arch.exception = DB_VECTOR;
|
||||
kvm_run->exit_reason = KVM_EXIT_DEBUG;
|
||||
*r = EMULATE_USER_EXIT;
|
||||
} else {
|
||||
/*
|
||||
* "Certain debug exceptions may clear bit 0-3. The
|
||||
* remaining contents of the DR6 register are never
|
||||
* cleared by the processor".
|
||||
*/
|
||||
vcpu->arch.dr6 &= ~15;
|
||||
vcpu->arch.dr6 |= DR6_BS | DR6_RTM;
|
||||
kvm_queue_exception(vcpu, DB_VECTOR);
|
||||
}
|
||||
if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) {
|
||||
kvm_run->debug.arch.dr6 = DR6_BS | DR6_FIXED_1 | DR6_RTM;
|
||||
kvm_run->debug.arch.pc = vcpu->arch.singlestep_rip;
|
||||
kvm_run->debug.arch.exception = DB_VECTOR;
|
||||
kvm_run->exit_reason = KVM_EXIT_DEBUG;
|
||||
*r = EMULATE_USER_EXIT;
|
||||
} else {
|
||||
/*
|
||||
* "Certain debug exceptions may clear bit 0-3. The
|
||||
* remaining contents of the DR6 register are never
|
||||
* cleared by the processor".
|
||||
*/
|
||||
vcpu->arch.dr6 &= ~15;
|
||||
vcpu->arch.dr6 |= DR6_BS | DR6_RTM;
|
||||
kvm_queue_exception(vcpu, DB_VECTOR);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -5567,7 +5558,17 @@ int kvm_skip_emulated_instruction(struct kvm_vcpu *vcpu)
|
|||
int r = EMULATE_DONE;
|
||||
|
||||
kvm_x86_ops->skip_emulated_instruction(vcpu);
|
||||
kvm_vcpu_check_singlestep(vcpu, rflags, &r);
|
||||
|
||||
/*
|
||||
* rflags is the old, "raw" value of the flags. The new value has
|
||||
* not been saved yet.
|
||||
*
|
||||
* This is correct even for TF set by the guest, because "the
|
||||
* processor will not generate this exception after the instruction
|
||||
* that sets the TF flag".
|
||||
*/
|
||||
if (unlikely(rflags & X86_EFLAGS_TF))
|
||||
kvm_vcpu_do_singlestep(vcpu, &r);
|
||||
return r == EMULATE_DONE;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kvm_skip_emulated_instruction);
|
||||
|
@ -5726,8 +5727,9 @@ int x86_emulate_instruction(struct kvm_vcpu *vcpu,
|
|||
toggle_interruptibility(vcpu, ctxt->interruptibility);
|
||||
vcpu->arch.emulate_regs_need_sync_to_vcpu = false;
|
||||
kvm_rip_write(vcpu, ctxt->eip);
|
||||
if (r == EMULATE_DONE)
|
||||
kvm_vcpu_check_singlestep(vcpu, rflags, &r);
|
||||
if (r == EMULATE_DONE &&
|
||||
(ctxt->tf || (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)))
|
||||
kvm_vcpu_do_singlestep(vcpu, &r);
|
||||
if (!ctxt->have_exception ||
|
||||
exception_type(ctxt->exception.vector) == EXCPT_TRAP)
|
||||
__kvm_set_rflags(vcpu, ctxt->eflags);
|
||||
|
|
|
@ -213,8 +213,6 @@ struct mm_struct;
|
|||
#define release_segments(mm) do { } while(0)
|
||||
#define forget_segments() do { } while (0)
|
||||
|
||||
#define thread_saved_pc(tsk) (task_pt_regs(tsk)->pc)
|
||||
|
||||
extern unsigned long get_wchan(struct task_struct *p);
|
||||
|
||||
#define KSTK_EIP(tsk) (task_pt_regs(tsk)->pc)
|
||||
|
|
12
block/bio.c
12
block/bio.c
|
@ -240,20 +240,21 @@ struct bio_vec *bvec_alloc(gfp_t gfp_mask, int nr, unsigned long *idx,
|
|||
return bvl;
|
||||
}
|
||||
|
||||
static void __bio_free(struct bio *bio)
|
||||
void bio_uninit(struct bio *bio)
|
||||
{
|
||||
bio_disassociate_task(bio);
|
||||
|
||||
if (bio_integrity(bio))
|
||||
bio_integrity_free(bio);
|
||||
}
|
||||
EXPORT_SYMBOL(bio_uninit);
|
||||
|
||||
static void bio_free(struct bio *bio)
|
||||
{
|
||||
struct bio_set *bs = bio->bi_pool;
|
||||
void *p;
|
||||
|
||||
__bio_free(bio);
|
||||
bio_uninit(bio);
|
||||
|
||||
if (bs) {
|
||||
bvec_free(bs->bvec_pool, bio->bi_io_vec, BVEC_POOL_IDX(bio));
|
||||
|
@ -271,6 +272,11 @@ static void bio_free(struct bio *bio)
|
|||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Users of this function have their own bio allocation. Subsequently,
|
||||
* they must remember to pair any call to bio_init() with bio_uninit()
|
||||
* when IO has completed, or when the bio is released.
|
||||
*/
|
||||
void bio_init(struct bio *bio, struct bio_vec *table,
|
||||
unsigned short max_vecs)
|
||||
{
|
||||
|
@ -297,7 +303,7 @@ void bio_reset(struct bio *bio)
|
|||
{
|
||||
unsigned long flags = bio->bi_flags & (~0UL << BIO_RESET_BITS);
|
||||
|
||||
__bio_free(bio);
|
||||
bio_uninit(bio);
|
||||
|
||||
memset(bio, 0, BIO_RESET_BYTES);
|
||||
bio->bi_flags = flags;
|
||||
|
|
|
@ -68,6 +68,45 @@ static void blk_mq_sched_assign_ioc(struct request_queue *q,
|
|||
__blk_mq_sched_assign_ioc(q, rq, bio, ioc);
|
||||
}
|
||||
|
||||
/*
|
||||
* Mark a hardware queue as needing a restart. For shared queues, maintain
|
||||
* a count of how many hardware queues are marked for restart.
|
||||
*/
|
||||
static void blk_mq_sched_mark_restart_hctx(struct blk_mq_hw_ctx *hctx)
|
||||
{
|
||||
if (test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state))
|
||||
return;
|
||||
|
||||
if (hctx->flags & BLK_MQ_F_TAG_SHARED) {
|
||||
struct request_queue *q = hctx->queue;
|
||||
|
||||
if (!test_and_set_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state))
|
||||
atomic_inc(&q->shared_hctx_restart);
|
||||
} else
|
||||
set_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state);
|
||||
}
|
||||
|
||||
static bool blk_mq_sched_restart_hctx(struct blk_mq_hw_ctx *hctx)
|
||||
{
|
||||
if (!test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state))
|
||||
return false;
|
||||
|
||||
if (hctx->flags & BLK_MQ_F_TAG_SHARED) {
|
||||
struct request_queue *q = hctx->queue;
|
||||
|
||||
if (test_and_clear_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state))
|
||||
atomic_dec(&q->shared_hctx_restart);
|
||||
} else
|
||||
clear_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state);
|
||||
|
||||
if (blk_mq_hctx_has_pending(hctx)) {
|
||||
blk_mq_run_hw_queue(hctx, true);
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
struct request *blk_mq_sched_get_request(struct request_queue *q,
|
||||
struct bio *bio,
|
||||
unsigned int op,
|
||||
|
@ -266,18 +305,6 @@ static bool blk_mq_sched_bypass_insert(struct blk_mq_hw_ctx *hctx,
|
|||
return true;
|
||||
}
|
||||
|
||||
static bool blk_mq_sched_restart_hctx(struct blk_mq_hw_ctx *hctx)
|
||||
{
|
||||
if (test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state)) {
|
||||
clear_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state);
|
||||
if (blk_mq_hctx_has_pending(hctx)) {
|
||||
blk_mq_run_hw_queue(hctx, true);
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* list_for_each_entry_rcu_rr - iterate in a round-robin fashion over rcu list
|
||||
* @pos: loop cursor.
|
||||
|
@ -309,6 +336,13 @@ void blk_mq_sched_restart(struct blk_mq_hw_ctx *const hctx)
|
|||
unsigned int i, j;
|
||||
|
||||
if (set->flags & BLK_MQ_F_TAG_SHARED) {
|
||||
/*
|
||||
* If this is 0, then we know that no hardware queues
|
||||
* have RESTART marked. We're done.
|
||||
*/
|
||||
if (!atomic_read(&queue->shared_hctx_restart))
|
||||
return;
|
||||
|
||||
rcu_read_lock();
|
||||
list_for_each_entry_rcu_rr(q, queue, &set->tag_list,
|
||||
tag_set_list) {
|
||||
|
|
|
@ -115,15 +115,6 @@ static inline bool blk_mq_sched_has_work(struct blk_mq_hw_ctx *hctx)
|
|||
return false;
|
||||
}
|
||||
|
||||
/*
|
||||
* Mark a hardware queue as needing a restart.
|
||||
*/
|
||||
static inline void blk_mq_sched_mark_restart_hctx(struct blk_mq_hw_ctx *hctx)
|
||||
{
|
||||
if (!test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state))
|
||||
set_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state);
|
||||
}
|
||||
|
||||
static inline bool blk_mq_sched_needs_restart(struct blk_mq_hw_ctx *hctx)
|
||||
{
|
||||
return test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state);
|
||||
|
|
|
@ -2103,20 +2103,30 @@ static void blk_mq_map_swqueue(struct request_queue *q,
|
|||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Caller needs to ensure that we're either frozen/quiesced, or that
|
||||
* the queue isn't live yet.
|
||||
*/
|
||||
static void queue_set_hctx_shared(struct request_queue *q, bool shared)
|
||||
{
|
||||
struct blk_mq_hw_ctx *hctx;
|
||||
int i;
|
||||
|
||||
queue_for_each_hw_ctx(q, hctx, i) {
|
||||
if (shared)
|
||||
if (shared) {
|
||||
if (test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state))
|
||||
atomic_inc(&q->shared_hctx_restart);
|
||||
hctx->flags |= BLK_MQ_F_TAG_SHARED;
|
||||
else
|
||||
} else {
|
||||
if (test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state))
|
||||
atomic_dec(&q->shared_hctx_restart);
|
||||
hctx->flags &= ~BLK_MQ_F_TAG_SHARED;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void blk_mq_update_tag_set_depth(struct blk_mq_tag_set *set, bool shared)
|
||||
static void blk_mq_update_tag_set_depth(struct blk_mq_tag_set *set,
|
||||
bool shared)
|
||||
{
|
||||
struct request_queue *q;
|
||||
|
||||
|
|
|
@ -1428,6 +1428,37 @@ static void acpi_init_coherency(struct acpi_device *adev)
|
|||
adev->flags.coherent_dma = cca;
|
||||
}
|
||||
|
||||
static int acpi_check_spi_i2c_slave(struct acpi_resource *ares, void *data)
|
||||
{
|
||||
bool *is_spi_i2c_slave_p = data;
|
||||
|
||||
if (ares->type != ACPI_RESOURCE_TYPE_SERIAL_BUS)
|
||||
return 1;
|
||||
|
||||
/*
|
||||
* devices that are connected to UART still need to be enumerated to
|
||||
* platform bus
|
||||
*/
|
||||
if (ares->data.common_serial_bus.type != ACPI_RESOURCE_SERIAL_TYPE_UART)
|
||||
*is_spi_i2c_slave_p = true;
|
||||
|
||||
/* no need to do more checking */
|
||||
return -1;
|
||||
}
|
||||
|
||||
static bool acpi_is_spi_i2c_slave(struct acpi_device *device)
|
||||
{
|
||||
struct list_head resource_list;
|
||||
bool is_spi_i2c_slave = false;
|
||||
|
||||
INIT_LIST_HEAD(&resource_list);
|
||||
acpi_dev_get_resources(device, &resource_list, acpi_check_spi_i2c_slave,
|
||||
&is_spi_i2c_slave);
|
||||
acpi_dev_free_resource_list(&resource_list);
|
||||
|
||||
return is_spi_i2c_slave;
|
||||
}
|
||||
|
||||
void acpi_init_device_object(struct acpi_device *device, acpi_handle handle,
|
||||
int type, unsigned long long sta)
|
||||
{
|
||||
|
@ -1443,6 +1474,7 @@ void acpi_init_device_object(struct acpi_device *device, acpi_handle handle,
|
|||
acpi_bus_get_flags(device);
|
||||
device->flags.match_driver = false;
|
||||
device->flags.initialized = true;
|
||||
device->flags.spi_i2c_slave = acpi_is_spi_i2c_slave(device);
|
||||
acpi_device_clear_enumerated(device);
|
||||
device_initialize(&device->dev);
|
||||
dev_set_uevent_suppress(&device->dev, true);
|
||||
|
@ -1727,38 +1759,13 @@ static acpi_status acpi_bus_check_add(acpi_handle handle, u32 lvl_not_used,
|
|||
return AE_OK;
|
||||
}
|
||||
|
||||
static int acpi_check_spi_i2c_slave(struct acpi_resource *ares, void *data)
|
||||
{
|
||||
bool *is_spi_i2c_slave_p = data;
|
||||
|
||||
if (ares->type != ACPI_RESOURCE_TYPE_SERIAL_BUS)
|
||||
return 1;
|
||||
|
||||
/*
|
||||
* devices that are connected to UART still need to be enumerated to
|
||||
* platform bus
|
||||
*/
|
||||
if (ares->data.common_serial_bus.type != ACPI_RESOURCE_SERIAL_TYPE_UART)
|
||||
*is_spi_i2c_slave_p = true;
|
||||
|
||||
/* no need to do more checking */
|
||||
return -1;
|
||||
}
|
||||
|
||||
static void acpi_default_enumeration(struct acpi_device *device)
|
||||
{
|
||||
struct list_head resource_list;
|
||||
bool is_spi_i2c_slave = false;
|
||||
|
||||
/*
|
||||
* Do not enumerate SPI/I2C slaves as they will be enumerated by their
|
||||
* respective parents.
|
||||
*/
|
||||
INIT_LIST_HEAD(&resource_list);
|
||||
acpi_dev_get_resources(device, &resource_list, acpi_check_spi_i2c_slave,
|
||||
&is_spi_i2c_slave);
|
||||
acpi_dev_free_resource_list(&resource_list);
|
||||
if (!is_spi_i2c_slave) {
|
||||
if (!device->flags.spi_i2c_slave) {
|
||||
acpi_create_platform_device(device, NULL);
|
||||
acpi_device_set_enumerated(device);
|
||||
} else {
|
||||
|
@ -1854,7 +1861,7 @@ static void acpi_bus_attach(struct acpi_device *device)
|
|||
return;
|
||||
|
||||
device->flags.match_driver = true;
|
||||
if (ret > 0) {
|
||||
if (ret > 0 && !device->flags.spi_i2c_slave) {
|
||||
acpi_device_set_enumerated(device);
|
||||
goto ok;
|
||||
}
|
||||
|
@ -1863,10 +1870,10 @@ static void acpi_bus_attach(struct acpi_device *device)
|
|||
if (ret < 0)
|
||||
return;
|
||||
|
||||
if (device->pnp.type.platform_id)
|
||||
acpi_default_enumeration(device);
|
||||
else
|
||||
if (!device->pnp.type.platform_id && !device->flags.spi_i2c_slave)
|
||||
acpi_device_set_enumerated(device);
|
||||
else
|
||||
acpi_default_enumeration(device);
|
||||
|
||||
ok:
|
||||
list_for_each_entry(child, &device->children, node)
|
||||
|
|
|
@ -609,8 +609,6 @@ int xen_blkif_schedule(void *arg)
|
|||
unsigned long timeout;
|
||||
int ret;
|
||||
|
||||
xen_blkif_get(blkif);
|
||||
|
||||
set_freezable();
|
||||
while (!kthread_should_stop()) {
|
||||
if (try_to_freeze())
|
||||
|
@ -665,7 +663,6 @@ int xen_blkif_schedule(void *arg)
|
|||
print_stats(ring);
|
||||
|
||||
ring->xenblkd = NULL;
|
||||
xen_blkif_put(blkif);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -1436,34 +1433,35 @@ static int dispatch_rw_block_io(struct xen_blkif_ring *ring,
|
|||
static void make_response(struct xen_blkif_ring *ring, u64 id,
|
||||
unsigned short op, int st)
|
||||
{
|
||||
struct blkif_response resp;
|
||||
struct blkif_response *resp;
|
||||
unsigned long flags;
|
||||
union blkif_back_rings *blk_rings;
|
||||
int notify;
|
||||
|
||||
resp.id = id;
|
||||
resp.operation = op;
|
||||
resp.status = st;
|
||||
|
||||
spin_lock_irqsave(&ring->blk_ring_lock, flags);
|
||||
blk_rings = &ring->blk_rings;
|
||||
/* Place on the response ring for the relevant domain. */
|
||||
switch (ring->blkif->blk_protocol) {
|
||||
case BLKIF_PROTOCOL_NATIVE:
|
||||
memcpy(RING_GET_RESPONSE(&blk_rings->native, blk_rings->native.rsp_prod_pvt),
|
||||
&resp, sizeof(resp));
|
||||
resp = RING_GET_RESPONSE(&blk_rings->native,
|
||||
blk_rings->native.rsp_prod_pvt);
|
||||
break;
|
||||
case BLKIF_PROTOCOL_X86_32:
|
||||
memcpy(RING_GET_RESPONSE(&blk_rings->x86_32, blk_rings->x86_32.rsp_prod_pvt),
|
||||
&resp, sizeof(resp));
|
||||
resp = RING_GET_RESPONSE(&blk_rings->x86_32,
|
||||
blk_rings->x86_32.rsp_prod_pvt);
|
||||
break;
|
||||
case BLKIF_PROTOCOL_X86_64:
|
||||
memcpy(RING_GET_RESPONSE(&blk_rings->x86_64, blk_rings->x86_64.rsp_prod_pvt),
|
||||
&resp, sizeof(resp));
|
||||
resp = RING_GET_RESPONSE(&blk_rings->x86_64,
|
||||
blk_rings->x86_64.rsp_prod_pvt);
|
||||
break;
|
||||
default:
|
||||
BUG();
|
||||
}
|
||||
|
||||
resp->id = id;
|
||||
resp->operation = op;
|
||||
resp->status = st;
|
||||
|
||||
blk_rings->common.rsp_prod_pvt++;
|
||||
RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&blk_rings->common, notify);
|
||||
spin_unlock_irqrestore(&ring->blk_ring_lock, flags);
|
||||
|
|
|
@ -75,9 +75,8 @@ extern unsigned int xenblk_max_queues;
|
|||
struct blkif_common_request {
|
||||
char dummy;
|
||||
};
|
||||
struct blkif_common_response {
|
||||
char dummy;
|
||||
};
|
||||
|
||||
/* i386 protocol version */
|
||||
|
||||
struct blkif_x86_32_request_rw {
|
||||
uint8_t nr_segments; /* number of segments */
|
||||
|
@ -129,14 +128,6 @@ struct blkif_x86_32_request {
|
|||
} u;
|
||||
} __attribute__((__packed__));
|
||||
|
||||
/* i386 protocol version */
|
||||
#pragma pack(push, 4)
|
||||
struct blkif_x86_32_response {
|
||||
uint64_t id; /* copied from request */
|
||||
uint8_t operation; /* copied from request */
|
||||
int16_t status; /* BLKIF_RSP_??? */
|
||||
};
|
||||
#pragma pack(pop)
|
||||
/* x86_64 protocol version */
|
||||
|
||||
struct blkif_x86_64_request_rw {
|
||||
|
@ -193,18 +184,12 @@ struct blkif_x86_64_request {
|
|||
} u;
|
||||
} __attribute__((__packed__));
|
||||
|
||||
struct blkif_x86_64_response {
|
||||
uint64_t __attribute__((__aligned__(8))) id;
|
||||
uint8_t operation; /* copied from request */
|
||||
int16_t status; /* BLKIF_RSP_??? */
|
||||
};
|
||||
|
||||
DEFINE_RING_TYPES(blkif_common, struct blkif_common_request,
|
||||
struct blkif_common_response);
|
||||
struct blkif_response);
|
||||
DEFINE_RING_TYPES(blkif_x86_32, struct blkif_x86_32_request,
|
||||
struct blkif_x86_32_response);
|
||||
struct blkif_response __packed);
|
||||
DEFINE_RING_TYPES(blkif_x86_64, struct blkif_x86_64_request,
|
||||
struct blkif_x86_64_response);
|
||||
struct blkif_response);
|
||||
|
||||
union blkif_back_rings {
|
||||
struct blkif_back_ring native;
|
||||
|
@ -281,6 +266,7 @@ struct xen_blkif_ring {
|
|||
|
||||
wait_queue_head_t wq;
|
||||
atomic_t inflight;
|
||||
bool active;
|
||||
/* One thread per blkif ring. */
|
||||
struct task_struct *xenblkd;
|
||||
unsigned int waiting_reqs;
|
||||
|
|
|
@ -159,7 +159,7 @@ static int xen_blkif_alloc_rings(struct xen_blkif *blkif)
|
|||
init_waitqueue_head(&ring->shutdown_wq);
|
||||
ring->blkif = blkif;
|
||||
ring->st_print = jiffies;
|
||||
xen_blkif_get(blkif);
|
||||
ring->active = true;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@ -249,10 +249,12 @@ static int xen_blkif_disconnect(struct xen_blkif *blkif)
|
|||
struct xen_blkif_ring *ring = &blkif->rings[r];
|
||||
unsigned int i = 0;
|
||||
|
||||
if (!ring->active)
|
||||
continue;
|
||||
|
||||
if (ring->xenblkd) {
|
||||
kthread_stop(ring->xenblkd);
|
||||
wake_up(&ring->shutdown_wq);
|
||||
ring->xenblkd = NULL;
|
||||
}
|
||||
|
||||
/* The above kthread_stop() guarantees that at this point we
|
||||
|
@ -296,7 +298,7 @@ static int xen_blkif_disconnect(struct xen_blkif *blkif)
|
|||
BUG_ON(ring->free_pages_num != 0);
|
||||
BUG_ON(ring->persistent_gnt_c != 0);
|
||||
WARN_ON(i != (XEN_BLKIF_REQS_PER_PAGE * blkif->nr_ring_pages));
|
||||
xen_blkif_put(blkif);
|
||||
ring->active = false;
|
||||
}
|
||||
blkif->nr_ring_pages = 0;
|
||||
/*
|
||||
|
@ -312,9 +314,10 @@ static int xen_blkif_disconnect(struct xen_blkif *blkif)
|
|||
|
||||
static void xen_blkif_free(struct xen_blkif *blkif)
|
||||
{
|
||||
|
||||
xen_blkif_disconnect(blkif);
|
||||
WARN_ON(xen_blkif_disconnect(blkif));
|
||||
xen_vbd_free(&blkif->vbd);
|
||||
kfree(blkif->be->mode);
|
||||
kfree(blkif->be);
|
||||
|
||||
/* Make sure everything is drained before shutting down */
|
||||
kmem_cache_free(xen_blkif_cachep, blkif);
|
||||
|
@ -511,8 +514,6 @@ static int xen_blkbk_remove(struct xenbus_device *dev)
|
|||
xen_blkif_put(be->blkif);
|
||||
}
|
||||
|
||||
kfree(be->mode);
|
||||
kfree(be);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -803,13 +803,13 @@ static int crng_fast_load(const char *cp, size_t len)
|
|||
p[crng_init_cnt % CHACHA20_KEY_SIZE] ^= *cp;
|
||||
cp++; crng_init_cnt++; len--;
|
||||
}
|
||||
spin_unlock_irqrestore(&primary_crng.lock, flags);
|
||||
if (crng_init_cnt >= CRNG_INIT_CNT_THRESH) {
|
||||
invalidate_batched_entropy();
|
||||
crng_init = 1;
|
||||
wake_up_interruptible(&crng_init_wait);
|
||||
pr_notice("random: fast init done\n");
|
||||
}
|
||||
spin_unlock_irqrestore(&primary_crng.lock, flags);
|
||||
return 1;
|
||||
}
|
||||
|
||||
|
@ -841,6 +841,7 @@ static void crng_reseed(struct crng_state *crng, struct entropy_store *r)
|
|||
}
|
||||
memzero_explicit(&buf, sizeof(buf));
|
||||
crng->init_time = jiffies;
|
||||
spin_unlock_irqrestore(&primary_crng.lock, flags);
|
||||
if (crng == &primary_crng && crng_init < 2) {
|
||||
invalidate_batched_entropy();
|
||||
crng_init = 2;
|
||||
|
@ -848,7 +849,6 @@ static void crng_reseed(struct crng_state *crng, struct entropy_store *r)
|
|||
wake_up_interruptible(&crng_init_wait);
|
||||
pr_notice("random: crng init done\n");
|
||||
}
|
||||
spin_unlock_irqrestore(&primary_crng.lock, flags);
|
||||
}
|
||||
|
||||
static inline void crng_wait_ready(void)
|
||||
|
@ -2041,8 +2041,8 @@ static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u64);
|
|||
u64 get_random_u64(void)
|
||||
{
|
||||
u64 ret;
|
||||
bool use_lock = crng_init < 2;
|
||||
unsigned long flags;
|
||||
bool use_lock = READ_ONCE(crng_init) < 2;
|
||||
unsigned long flags = 0;
|
||||
struct batched_entropy *batch;
|
||||
|
||||
#if BITS_PER_LONG == 64
|
||||
|
@ -2073,8 +2073,8 @@ static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u32);
|
|||
u32 get_random_u32(void)
|
||||
{
|
||||
u32 ret;
|
||||
bool use_lock = crng_init < 2;
|
||||
unsigned long flags;
|
||||
bool use_lock = READ_ONCE(crng_init) < 2;
|
||||
unsigned long flags = 0;
|
||||
struct batched_entropy *batch;
|
||||
|
||||
if (arch_get_random_int(&ret))
|
||||
|
|
|
@ -1209,9 +1209,9 @@ arch_timer_mem_frame_get_cntfrq(struct arch_timer_mem_frame *frame)
|
|||
return 0;
|
||||
}
|
||||
|
||||
rate = readl_relaxed(frame + CNTFRQ);
|
||||
rate = readl_relaxed(base + CNTFRQ);
|
||||
|
||||
iounmap(frame);
|
||||
iounmap(base);
|
||||
|
||||
return rate;
|
||||
}
|
||||
|
|
|
@ -18,6 +18,7 @@
|
|||
#include <linux/clk.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/clockchips.h>
|
||||
#include <linux/clocksource.h>
|
||||
#include <linux/of_address.h>
|
||||
#include <linux/of_irq.h>
|
||||
#include <linux/slab.h>
|
||||
|
|
|
@ -12,6 +12,7 @@
|
|||
|
||||
#include <linux/clk.h>
|
||||
#include <linux/clockchips.h>
|
||||
#include <linux/clocksource.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/irq.h>
|
||||
|
|
|
@ -721,7 +721,7 @@ static int mvebu_pwm_probe(struct platform_device *pdev,
|
|||
u32 set;
|
||||
|
||||
if (!of_device_is_compatible(mvchip->chip.of_node,
|
||||
"marvell,armada-370-xp-gpio"))
|
||||
"marvell,armada-370-gpio"))
|
||||
return 0;
|
||||
|
||||
if (IS_ERR(mvchip->clk))
|
||||
|
@ -852,7 +852,7 @@ static const struct of_device_id mvebu_gpio_of_match[] = {
|
|||
.data = (void *) MVEBU_GPIO_SOC_VARIANT_ARMADAXP,
|
||||
},
|
||||
{
|
||||
.compatible = "marvell,armada-370-xp-gpio",
|
||||
.compatible = "marvell,armada-370-gpio",
|
||||
.data = (void *) MVEBU_GPIO_SOC_VARIANT_ORION,
|
||||
},
|
||||
{
|
||||
|
@ -1128,7 +1128,7 @@ static int mvebu_gpio_probe(struct platform_device *pdev)
|
|||
mvchip);
|
||||
}
|
||||
|
||||
/* Armada 370/XP has simple PWM support for GPIO lines */
|
||||
/* Some MVEBU SoCs have simple PWM support for GPIO lines */
|
||||
if (IS_ENABLED(CONFIG_PWM))
|
||||
return mvebu_pwm_probe(pdev, mvchip, id);
|
||||
|
||||
|
|
|
@ -693,6 +693,10 @@ int amdgpu_atombios_get_clock_info(struct amdgpu_device *adev)
|
|||
DRM_INFO("Changing default dispclk from %dMhz to 600Mhz\n",
|
||||
adev->clock.default_dispclk / 100);
|
||||
adev->clock.default_dispclk = 60000;
|
||||
} else if (adev->clock.default_dispclk <= 60000) {
|
||||
DRM_INFO("Changing default dispclk from %dMhz to 625Mhz\n",
|
||||
adev->clock.default_dispclk / 100);
|
||||
adev->clock.default_dispclk = 62500;
|
||||
}
|
||||
adev->clock.dp_extclk =
|
||||
le16_to_cpu(firmware_info->info_21.usUniphyDPModeExtClkFreq);
|
||||
|
|
|
@ -449,6 +449,7 @@ static const struct pci_device_id pciidlist[] = {
|
|||
{0x1002, 0x6986, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12},
|
||||
{0x1002, 0x6987, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12},
|
||||
{0x1002, 0x6995, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12},
|
||||
{0x1002, 0x6997, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12},
|
||||
{0x1002, 0x699F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12},
|
||||
/* Vega 10 */
|
||||
{0x1002, 0x6860, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10|AMD_EXP_HW_SUPPORT},
|
||||
|
|
|
@ -165,7 +165,7 @@ void amdgpu_atombios_crtc_powergate(struct drm_crtc *crtc, int state)
|
|||
struct drm_device *dev = crtc->dev;
|
||||
struct amdgpu_device *adev = dev->dev_private;
|
||||
int index = GetIndexIntoMasterTable(COMMAND, EnableDispPowerGating);
|
||||
ENABLE_DISP_POWER_GATING_PARAMETERS_V2_1 args;
|
||||
ENABLE_DISP_POWER_GATING_PS_ALLOCATION args;
|
||||
|
||||
memset(&args, 0, sizeof(args));
|
||||
|
||||
|
@ -178,7 +178,7 @@ void amdgpu_atombios_crtc_powergate(struct drm_crtc *crtc, int state)
|
|||
void amdgpu_atombios_crtc_powergate_init(struct amdgpu_device *adev)
|
||||
{
|
||||
int index = GetIndexIntoMasterTable(COMMAND, EnableDispPowerGating);
|
||||
ENABLE_DISP_POWER_GATING_PARAMETERS_V2_1 args;
|
||||
ENABLE_DISP_POWER_GATING_PS_ALLOCATION args;
|
||||
|
||||
memset(&args, 0, sizeof(args));
|
||||
|
||||
|
|
|
@ -1229,21 +1229,6 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
|
|||
if (!connector)
|
||||
return -ENOENT;
|
||||
|
||||
drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
|
||||
encoder = drm_connector_get_encoder(connector);
|
||||
if (encoder)
|
||||
out_resp->encoder_id = encoder->base.id;
|
||||
else
|
||||
out_resp->encoder_id = 0;
|
||||
|
||||
ret = drm_mode_object_get_properties(&connector->base, file_priv->atomic,
|
||||
(uint32_t __user *)(unsigned long)(out_resp->props_ptr),
|
||||
(uint64_t __user *)(unsigned long)(out_resp->prop_values_ptr),
|
||||
&out_resp->count_props);
|
||||
drm_modeset_unlock(&dev->mode_config.connection_mutex);
|
||||
if (ret)
|
||||
goto out_unref;
|
||||
|
||||
for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++)
|
||||
if (connector->encoder_ids[i] != 0)
|
||||
encoders_count++;
|
||||
|
@ -1256,7 +1241,7 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
|
|||
if (put_user(connector->encoder_ids[i],
|
||||
encoder_ptr + copied)) {
|
||||
ret = -EFAULT;
|
||||
goto out_unref;
|
||||
goto out;
|
||||
}
|
||||
copied++;
|
||||
}
|
||||
|
@ -1300,15 +1285,32 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
|
|||
if (copy_to_user(mode_ptr + copied,
|
||||
&u_mode, sizeof(u_mode))) {
|
||||
ret = -EFAULT;
|
||||
mutex_unlock(&dev->mode_config.mutex);
|
||||
|
||||
goto out;
|
||||
}
|
||||
copied++;
|
||||
}
|
||||
}
|
||||
out_resp->count_modes = mode_count;
|
||||
out:
|
||||
mutex_unlock(&dev->mode_config.mutex);
|
||||
out_unref:
|
||||
|
||||
drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
|
||||
encoder = drm_connector_get_encoder(connector);
|
||||
if (encoder)
|
||||
out_resp->encoder_id = encoder->base.id;
|
||||
else
|
||||
out_resp->encoder_id = 0;
|
||||
|
||||
/* Only grab properties after probing, to make sure EDID and other
|
||||
* properties reflect the latest status. */
|
||||
ret = drm_mode_object_get_properties(&connector->base, file_priv->atomic,
|
||||
(uint32_t __user *)(unsigned long)(out_resp->props_ptr),
|
||||
(uint64_t __user *)(unsigned long)(out_resp->prop_values_ptr),
|
||||
&out_resp->count_props);
|
||||
drm_modeset_unlock(&dev->mode_config.connection_mutex);
|
||||
|
||||
out:
|
||||
drm_connector_put(connector);
|
||||
|
||||
return ret;
|
||||
|
|
|
@ -106,9 +106,10 @@ struct etnaviv_gem_submit {
|
|||
struct etnaviv_gpu *gpu;
|
||||
struct ww_acquire_ctx ticket;
|
||||
struct dma_fence *fence;
|
||||
u32 flags;
|
||||
unsigned int nr_bos;
|
||||
struct etnaviv_gem_submit_bo bos[0];
|
||||
u32 flags;
|
||||
/* No new members here, the previous one is variable-length! */
|
||||
};
|
||||
|
||||
int etnaviv_gem_wait_bo(struct etnaviv_gpu *gpu, struct drm_gem_object *obj,
|
||||
|
|
|
@ -172,7 +172,7 @@ static int submit_fence_sync(const struct etnaviv_gem_submit *submit)
|
|||
for (i = 0; i < submit->nr_bos; i++) {
|
||||
struct etnaviv_gem_object *etnaviv_obj = submit->bos[i].obj;
|
||||
bool write = submit->bos[i].flags & ETNA_SUBMIT_BO_WRITE;
|
||||
bool explicit = !(submit->flags & ETNA_SUBMIT_NO_IMPLICIT);
|
||||
bool explicit = !!(submit->flags & ETNA_SUBMIT_NO_IMPLICIT);
|
||||
|
||||
ret = etnaviv_gpu_fence_sync_obj(etnaviv_obj, context, write,
|
||||
explicit);
|
||||
|
|
|
@ -292,6 +292,8 @@ static int per_file_stats(int id, void *ptr, void *data)
|
|||
struct file_stats *stats = data;
|
||||
struct i915_vma *vma;
|
||||
|
||||
lockdep_assert_held(&obj->base.dev->struct_mutex);
|
||||
|
||||
stats->count++;
|
||||
stats->total += obj->base.size;
|
||||
if (!obj->bind_count)
|
||||
|
@ -476,6 +478,8 @@ static int i915_gem_object_info(struct seq_file *m, void *data)
|
|||
struct drm_i915_gem_request *request;
|
||||
struct task_struct *task;
|
||||
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
|
||||
memset(&stats, 0, sizeof(stats));
|
||||
stats.file_priv = file->driver_priv;
|
||||
spin_lock(&file->table_lock);
|
||||
|
@ -487,7 +491,6 @@ static int i915_gem_object_info(struct seq_file *m, void *data)
|
|||
* still alive (e.g. get_pid(current) => fork() => exit()).
|
||||
* Therefore, we need to protect this ->comm access using RCU.
|
||||
*/
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
request = list_first_entry_or_null(&file_priv->mm.request_list,
|
||||
struct drm_i915_gem_request,
|
||||
client_link);
|
||||
|
@ -497,6 +500,7 @@ static int i915_gem_object_info(struct seq_file *m, void *data)
|
|||
PIDTYPE_PID);
|
||||
print_file_stats(m, task ? task->comm : "<unknown>", stats);
|
||||
rcu_read_unlock();
|
||||
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
}
|
||||
mutex_unlock(&dev->filelist_mutex);
|
||||
|
|
|
@ -2285,8 +2285,8 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
|
|||
struct page *page;
|
||||
unsigned long last_pfn = 0; /* suppress gcc warning */
|
||||
unsigned int max_segment;
|
||||
gfp_t noreclaim;
|
||||
int ret;
|
||||
gfp_t gfp;
|
||||
|
||||
/* Assert that the object is not currently in any GPU domain. As it
|
||||
* wasn't in the GTT, there shouldn't be any way it could have been in
|
||||
|
@ -2315,22 +2315,31 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
|
|||
* Fail silently without starting the shrinker
|
||||
*/
|
||||
mapping = obj->base.filp->f_mapping;
|
||||
gfp = mapping_gfp_constraint(mapping, ~(__GFP_IO | __GFP_RECLAIM));
|
||||
gfp |= __GFP_NORETRY | __GFP_NOWARN;
|
||||
noreclaim = mapping_gfp_constraint(mapping,
|
||||
~(__GFP_IO | __GFP_RECLAIM));
|
||||
noreclaim |= __GFP_NORETRY | __GFP_NOWARN;
|
||||
|
||||
sg = st->sgl;
|
||||
st->nents = 0;
|
||||
for (i = 0; i < page_count; i++) {
|
||||
page = shmem_read_mapping_page_gfp(mapping, i, gfp);
|
||||
if (unlikely(IS_ERR(page))) {
|
||||
i915_gem_shrink(dev_priv,
|
||||
page_count,
|
||||
I915_SHRINK_BOUND |
|
||||
I915_SHRINK_UNBOUND |
|
||||
I915_SHRINK_PURGEABLE);
|
||||
const unsigned int shrink[] = {
|
||||
I915_SHRINK_BOUND | I915_SHRINK_UNBOUND | I915_SHRINK_PURGEABLE,
|
||||
0,
|
||||
}, *s = shrink;
|
||||
gfp_t gfp = noreclaim;
|
||||
|
||||
do {
|
||||
page = shmem_read_mapping_page_gfp(mapping, i, gfp);
|
||||
}
|
||||
if (unlikely(IS_ERR(page))) {
|
||||
gfp_t reclaim;
|
||||
if (likely(!IS_ERR(page)))
|
||||
break;
|
||||
|
||||
if (!*s) {
|
||||
ret = PTR_ERR(page);
|
||||
goto err_sg;
|
||||
}
|
||||
|
||||
i915_gem_shrink(dev_priv, 2 * page_count, *s++);
|
||||
cond_resched();
|
||||
|
||||
/* We've tried hard to allocate the memory by reaping
|
||||
* our own buffer, now let the real VM do its job and
|
||||
|
@ -2340,15 +2349,26 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
|
|||
* defer the oom here by reporting the ENOMEM back
|
||||
* to userspace.
|
||||
*/
|
||||
reclaim = mapping_gfp_mask(mapping);
|
||||
reclaim |= __GFP_NORETRY; /* reclaim, but no oom */
|
||||
if (!*s) {
|
||||
/* reclaim and warn, but no oom */
|
||||
gfp = mapping_gfp_mask(mapping);
|
||||
|
||||
page = shmem_read_mapping_page_gfp(mapping, i, reclaim);
|
||||
if (IS_ERR(page)) {
|
||||
ret = PTR_ERR(page);
|
||||
goto err_sg;
|
||||
/* Our bo are always dirty and so we require
|
||||
* kswapd to reclaim our pages (direct reclaim
|
||||
* does not effectively begin pageout of our
|
||||
* buffers on its own). However, direct reclaim
|
||||
* only waits for kswapd when under allocation
|
||||
* congestion. So as a result __GFP_RECLAIM is
|
||||
* unreliable and fails to actually reclaim our
|
||||
* dirty pages -- unless you try over and over
|
||||
* again with !__GFP_NORETRY. However, we still
|
||||
* want to fail this allocation rather than
|
||||
* trigger the out-of-memory killer and for
|
||||
* this we want the future __GFP_MAYFAIL.
|
||||
*/
|
||||
}
|
||||
}
|
||||
} while (1);
|
||||
|
||||
if (!i ||
|
||||
sg->length >= max_segment ||
|
||||
page_to_pfn(page) != last_pfn + 1) {
|
||||
|
@ -4222,6 +4242,7 @@ i915_gem_object_create(struct drm_i915_private *dev_priv, u64 size)
|
|||
|
||||
mapping = obj->base.filp->f_mapping;
|
||||
mapping_set_gfp_mask(mapping, mask);
|
||||
GEM_BUG_ON(!(mapping_gfp_mask(mapping) & __GFP_RECLAIM));
|
||||
|
||||
i915_gem_object_init(obj, &i915_gem_object_ops);
|
||||
|
||||
|
|
|
@ -546,11 +546,12 @@ relocate_entry(struct drm_i915_gem_object *obj,
|
|||
}
|
||||
|
||||
static int
|
||||
i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
|
||||
i915_gem_execbuffer_relocate_entry(struct i915_vma *vma,
|
||||
struct eb_vmas *eb,
|
||||
struct drm_i915_gem_relocation_entry *reloc,
|
||||
struct reloc_cache *cache)
|
||||
{
|
||||
struct drm_i915_gem_object *obj = vma->obj;
|
||||
struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
|
||||
struct drm_gem_object *target_obj;
|
||||
struct drm_i915_gem_object *target_i915_obj;
|
||||
|
@ -628,6 +629,16 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
/*
|
||||
* If we write into the object, we need to force the synchronisation
|
||||
* barrier, either with an asynchronous clflush or if we executed the
|
||||
* patching using the GPU (though that should be serialised by the
|
||||
* timeline). To be completely sure, and since we are required to
|
||||
* do relocations we are already stalling, disable the user's opt
|
||||
* of our synchronisation.
|
||||
*/
|
||||
vma->exec_entry->flags &= ~EXEC_OBJECT_ASYNC;
|
||||
|
||||
ret = relocate_entry(obj, reloc, cache, target_offset);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
@ -678,7 +689,7 @@ i915_gem_execbuffer_relocate_vma(struct i915_vma *vma,
|
|||
do {
|
||||
u64 offset = r->presumed_offset;
|
||||
|
||||
ret = i915_gem_execbuffer_relocate_entry(vma->obj, eb, r, &cache);
|
||||
ret = i915_gem_execbuffer_relocate_entry(vma, eb, r, &cache);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
|
@ -726,7 +737,7 @@ i915_gem_execbuffer_relocate_vma_slow(struct i915_vma *vma,
|
|||
|
||||
reloc_cache_init(&cache, eb->i915);
|
||||
for (i = 0; i < entry->relocation_count; i++) {
|
||||
ret = i915_gem_execbuffer_relocate_entry(vma->obj, eb, &relocs[i], &cache);
|
||||
ret = i915_gem_execbuffer_relocate_entry(vma, eb, &relocs[i], &cache);
|
||||
if (ret)
|
||||
break;
|
||||
}
|
||||
|
|
|
@ -623,7 +623,7 @@ i915_gem_request_alloc(struct intel_engine_cs *engine,
|
|||
* GPU processing the request, we never over-estimate the
|
||||
* position of the head.
|
||||
*/
|
||||
req->head = req->ring->tail;
|
||||
req->head = req->ring->emit;
|
||||
|
||||
/* Check that we didn't interrupt ourselves with a new request */
|
||||
GEM_BUG_ON(req->timeline->seqno != req->fence.seqno);
|
||||
|
|
|
@ -480,9 +480,7 @@ static void guc_wq_item_append(struct i915_guc_client *client,
|
|||
GEM_BUG_ON(freespace < wqi_size);
|
||||
|
||||
/* The GuC firmware wants the tail index in QWords, not bytes */
|
||||
tail = rq->tail;
|
||||
assert_ring_tail_valid(rq->ring, rq->tail);
|
||||
tail >>= 3;
|
||||
tail = intel_ring_set_tail(rq->ring, rq->tail) >> 3;
|
||||
GEM_BUG_ON(tail > WQ_RING_TAIL_MAX);
|
||||
|
||||
/* For now workqueue item is 4 DWs; workqueue buffer is 2 pages. So we
|
||||
|
|
|
@ -650,6 +650,11 @@ int i915_vma_unbind(struct i915_vma *vma)
|
|||
break;
|
||||
}
|
||||
|
||||
if (!ret) {
|
||||
ret = i915_gem_active_retire(&vma->last_fence,
|
||||
&vma->vm->i915->drm.struct_mutex);
|
||||
}
|
||||
|
||||
__i915_vma_unpin(vma);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
|
|
@ -120,7 +120,8 @@ static void intel_crtc_init_scalers(struct intel_crtc *crtc,
|
|||
static void skylake_pfit_enable(struct intel_crtc *crtc);
|
||||
static void ironlake_pfit_disable(struct intel_crtc *crtc, bool force);
|
||||
static void ironlake_pfit_enable(struct intel_crtc *crtc);
|
||||
static void intel_modeset_setup_hw_state(struct drm_device *dev);
|
||||
static void intel_modeset_setup_hw_state(struct drm_device *dev,
|
||||
struct drm_modeset_acquire_ctx *ctx);
|
||||
static void intel_pre_disable_primary_noatomic(struct drm_crtc *crtc);
|
||||
|
||||
struct intel_limit {
|
||||
|
@ -3449,7 +3450,7 @@ __intel_display_resume(struct drm_device *dev,
|
|||
struct drm_crtc *crtc;
|
||||
int i, ret;
|
||||
|
||||
intel_modeset_setup_hw_state(dev);
|
||||
intel_modeset_setup_hw_state(dev, ctx);
|
||||
i915_redisable_vga(to_i915(dev));
|
||||
|
||||
if (!state)
|
||||
|
@ -5825,7 +5826,8 @@ static void i9xx_crtc_disable(struct intel_crtc_state *old_crtc_state,
|
|||
intel_update_watermarks(intel_crtc);
|
||||
}
|
||||
|
||||
static void intel_crtc_disable_noatomic(struct drm_crtc *crtc)
|
||||
static void intel_crtc_disable_noatomic(struct drm_crtc *crtc,
|
||||
struct drm_modeset_acquire_ctx *ctx)
|
||||
{
|
||||
struct intel_encoder *encoder;
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
|
||||
|
@ -5855,7 +5857,7 @@ static void intel_crtc_disable_noatomic(struct drm_crtc *crtc)
|
|||
return;
|
||||
}
|
||||
|
||||
state->acquire_ctx = crtc->dev->mode_config.acquire_ctx;
|
||||
state->acquire_ctx = ctx;
|
||||
|
||||
/* Everything's already locked, -EDEADLK can't happen. */
|
||||
crtc_state = intel_atomic_get_crtc_state(state, intel_crtc);
|
||||
|
@ -15030,7 +15032,7 @@ int intel_modeset_init(struct drm_device *dev)
|
|||
intel_setup_outputs(dev_priv);
|
||||
|
||||
drm_modeset_lock_all(dev);
|
||||
intel_modeset_setup_hw_state(dev);
|
||||
intel_modeset_setup_hw_state(dev, dev->mode_config.acquire_ctx);
|
||||
drm_modeset_unlock_all(dev);
|
||||
|
||||
for_each_intel_crtc(dev, crtc) {
|
||||
|
@ -15067,13 +15069,13 @@ int intel_modeset_init(struct drm_device *dev)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void intel_enable_pipe_a(struct drm_device *dev)
|
||||
static void intel_enable_pipe_a(struct drm_device *dev,
|
||||
struct drm_modeset_acquire_ctx *ctx)
|
||||
{
|
||||
struct intel_connector *connector;
|
||||
struct drm_connector_list_iter conn_iter;
|
||||
struct drm_connector *crt = NULL;
|
||||
struct intel_load_detect_pipe load_detect_temp;
|
||||
struct drm_modeset_acquire_ctx *ctx = dev->mode_config.acquire_ctx;
|
||||
int ret;
|
||||
|
||||
/* We can't just switch on the pipe A, we need to set things up with a
|
||||
|
@ -15145,7 +15147,8 @@ static bool has_pch_trancoder(struct drm_i915_private *dev_priv,
|
|||
(HAS_PCH_LPT_H(dev_priv) && pch_transcoder == TRANSCODER_A);
|
||||
}
|
||||
|
||||
static void intel_sanitize_crtc(struct intel_crtc *crtc)
|
||||
static void intel_sanitize_crtc(struct intel_crtc *crtc,
|
||||
struct drm_modeset_acquire_ctx *ctx)
|
||||
{
|
||||
struct drm_device *dev = crtc->base.dev;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
|
@ -15191,7 +15194,7 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc)
|
|||
plane = crtc->plane;
|
||||
crtc->base.primary->state->visible = true;
|
||||
crtc->plane = !plane;
|
||||
intel_crtc_disable_noatomic(&crtc->base);
|
||||
intel_crtc_disable_noatomic(&crtc->base, ctx);
|
||||
crtc->plane = plane;
|
||||
}
|
||||
|
||||
|
@ -15201,13 +15204,13 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc)
|
|||
* resume. Force-enable the pipe to fix this, the update_dpms
|
||||
* call below we restore the pipe to the right state, but leave
|
||||
* the required bits on. */
|
||||
intel_enable_pipe_a(dev);
|
||||
intel_enable_pipe_a(dev, ctx);
|
||||
}
|
||||
|
||||
/* Adjust the state of the output pipe according to whether we
|
||||
* have active connectors/encoders. */
|
||||
if (crtc->active && !intel_crtc_has_encoders(crtc))
|
||||
intel_crtc_disable_noatomic(&crtc->base);
|
||||
intel_crtc_disable_noatomic(&crtc->base, ctx);
|
||||
|
||||
if (crtc->active || HAS_GMCH_DISPLAY(dev_priv)) {
|
||||
/*
|
||||
|
@ -15505,7 +15508,8 @@ get_encoder_power_domains(struct drm_i915_private *dev_priv)
|
|||
* and sanitizes it to the current state
|
||||
*/
|
||||
static void
|
||||
intel_modeset_setup_hw_state(struct drm_device *dev)
|
||||
intel_modeset_setup_hw_state(struct drm_device *dev,
|
||||
struct drm_modeset_acquire_ctx *ctx)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
enum pipe pipe;
|
||||
|
@ -15525,7 +15529,7 @@ intel_modeset_setup_hw_state(struct drm_device *dev)
|
|||
for_each_pipe(dev_priv, pipe) {
|
||||
crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
|
||||
|
||||
intel_sanitize_crtc(crtc);
|
||||
intel_sanitize_crtc(crtc, ctx);
|
||||
intel_dump_pipe_config(crtc, crtc->config,
|
||||
"[setup_hw_state]");
|
||||
}
|
||||
|
|
|
@ -119,8 +119,6 @@ static int intel_dp_aux_setup_backlight(struct intel_connector *connector,
|
|||
struct intel_dp *intel_dp = enc_to_intel_dp(&connector->encoder->base);
|
||||
struct intel_panel *panel = &connector->panel;
|
||||
|
||||
intel_dp_aux_enable_backlight(connector);
|
||||
|
||||
if (intel_dp->edp_dpcd[2] & DP_EDP_BACKLIGHT_BRIGHTNESS_BYTE_COUNT)
|
||||
panel->backlight.max = 0xFFFF;
|
||||
else
|
||||
|
|
|
@ -326,8 +326,7 @@ static u64 execlists_update_context(struct drm_i915_gem_request *rq)
|
|||
rq->ctx->ppgtt ?: rq->i915->mm.aliasing_ppgtt;
|
||||
u32 *reg_state = ce->lrc_reg_state;
|
||||
|
||||
assert_ring_tail_valid(rq->ring, rq->tail);
|
||||
reg_state[CTX_RING_TAIL+1] = rq->tail;
|
||||
reg_state[CTX_RING_TAIL+1] = intel_ring_set_tail(rq->ring, rq->tail);
|
||||
|
||||
/* True 32b PPGTT with dynamic page allocation: update PDP
|
||||
* registers and point the unallocated PDPs to scratch page.
|
||||
|
@ -2036,8 +2035,7 @@ void intel_lr_context_resume(struct drm_i915_private *dev_priv)
|
|||
ce->state->obj->mm.dirty = true;
|
||||
i915_gem_object_unpin_map(ce->state->obj);
|
||||
|
||||
ce->ring->head = ce->ring->tail = 0;
|
||||
intel_ring_update_space(ce->ring);
|
||||
intel_ring_reset(ce->ring, 0);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -49,7 +49,7 @@ static int __intel_ring_space(int head, int tail, int size)
|
|||
|
||||
void intel_ring_update_space(struct intel_ring *ring)
|
||||
{
|
||||
ring->space = __intel_ring_space(ring->head, ring->tail, ring->size);
|
||||
ring->space = __intel_ring_space(ring->head, ring->emit, ring->size);
|
||||
}
|
||||
|
||||
static int
|
||||
|
@ -774,8 +774,8 @@ static void i9xx_submit_request(struct drm_i915_gem_request *request)
|
|||
|
||||
i915_gem_request_submit(request);
|
||||
|
||||
assert_ring_tail_valid(request->ring, request->tail);
|
||||
I915_WRITE_TAIL(request->engine, request->tail);
|
||||
I915_WRITE_TAIL(request->engine,
|
||||
intel_ring_set_tail(request->ring, request->tail));
|
||||
}
|
||||
|
||||
static void i9xx_emit_breadcrumb(struct drm_i915_gem_request *req, u32 *cs)
|
||||
|
@ -1316,11 +1316,23 @@ int intel_ring_pin(struct intel_ring *ring, unsigned int offset_bias)
|
|||
return PTR_ERR(addr);
|
||||
}
|
||||
|
||||
void intel_ring_reset(struct intel_ring *ring, u32 tail)
|
||||
{
|
||||
GEM_BUG_ON(!list_empty(&ring->request_list));
|
||||
ring->tail = tail;
|
||||
ring->head = tail;
|
||||
ring->emit = tail;
|
||||
intel_ring_update_space(ring);
|
||||
}
|
||||
|
||||
void intel_ring_unpin(struct intel_ring *ring)
|
||||
{
|
||||
GEM_BUG_ON(!ring->vma);
|
||||
GEM_BUG_ON(!ring->vaddr);
|
||||
|
||||
/* Discard any unused bytes beyond that submitted to hw. */
|
||||
intel_ring_reset(ring, ring->tail);
|
||||
|
||||
if (i915_vma_is_map_and_fenceable(ring->vma))
|
||||
i915_vma_unpin_iomap(ring->vma);
|
||||
else
|
||||
|
@ -1562,8 +1574,9 @@ void intel_legacy_submission_resume(struct drm_i915_private *dev_priv)
|
|||
struct intel_engine_cs *engine;
|
||||
enum intel_engine_id id;
|
||||
|
||||
/* Restart from the beginning of the rings for convenience */
|
||||
for_each_engine(engine, dev_priv, id)
|
||||
engine->buffer->head = engine->buffer->tail;
|
||||
intel_ring_reset(engine->buffer, 0);
|
||||
}
|
||||
|
||||
static int ring_request_alloc(struct drm_i915_gem_request *request)
|
||||
|
@ -1616,7 +1629,7 @@ static int wait_for_space(struct drm_i915_gem_request *req, int bytes)
|
|||
unsigned space;
|
||||
|
||||
/* Would completion of this request free enough space? */
|
||||
space = __intel_ring_space(target->postfix, ring->tail,
|
||||
space = __intel_ring_space(target->postfix, ring->emit,
|
||||
ring->size);
|
||||
if (space >= bytes)
|
||||
break;
|
||||
|
@ -1641,8 +1654,8 @@ static int wait_for_space(struct drm_i915_gem_request *req, int bytes)
|
|||
u32 *intel_ring_begin(struct drm_i915_gem_request *req, int num_dwords)
|
||||
{
|
||||
struct intel_ring *ring = req->ring;
|
||||
int remain_actual = ring->size - ring->tail;
|
||||
int remain_usable = ring->effective_size - ring->tail;
|
||||
int remain_actual = ring->size - ring->emit;
|
||||
int remain_usable = ring->effective_size - ring->emit;
|
||||
int bytes = num_dwords * sizeof(u32);
|
||||
int total_bytes, wait_bytes;
|
||||
bool need_wrap = false;
|
||||
|
@ -1678,17 +1691,17 @@ u32 *intel_ring_begin(struct drm_i915_gem_request *req, int num_dwords)
|
|||
|
||||
if (unlikely(need_wrap)) {
|
||||
GEM_BUG_ON(remain_actual > ring->space);
|
||||
GEM_BUG_ON(ring->tail + remain_actual > ring->size);
|
||||
GEM_BUG_ON(ring->emit + remain_actual > ring->size);
|
||||
|
||||
/* Fill the tail with MI_NOOP */
|
||||
memset(ring->vaddr + ring->tail, 0, remain_actual);
|
||||
ring->tail = 0;
|
||||
memset(ring->vaddr + ring->emit, 0, remain_actual);
|
||||
ring->emit = 0;
|
||||
ring->space -= remain_actual;
|
||||
}
|
||||
|
||||
GEM_BUG_ON(ring->tail > ring->size - bytes);
|
||||
cs = ring->vaddr + ring->tail;
|
||||
ring->tail += bytes;
|
||||
GEM_BUG_ON(ring->emit > ring->size - bytes);
|
||||
cs = ring->vaddr + ring->emit;
|
||||
ring->emit += bytes;
|
||||
ring->space -= bytes;
|
||||
GEM_BUG_ON(ring->space < 0);
|
||||
|
||||
|
@ -1699,7 +1712,7 @@ u32 *intel_ring_begin(struct drm_i915_gem_request *req, int num_dwords)
|
|||
int intel_ring_cacheline_align(struct drm_i915_gem_request *req)
|
||||
{
|
||||
int num_dwords =
|
||||
(req->ring->tail & (CACHELINE_BYTES - 1)) / sizeof(uint32_t);
|
||||
(req->ring->emit & (CACHELINE_BYTES - 1)) / sizeof(uint32_t);
|
||||
u32 *cs;
|
||||
|
||||
if (num_dwords == 0)
|
||||
|
|
|
@ -145,6 +145,7 @@ struct intel_ring {
|
|||
|
||||
u32 head;
|
||||
u32 tail;
|
||||
u32 emit;
|
||||
|
||||
int space;
|
||||
int size;
|
||||
|
@ -488,6 +489,8 @@ intel_write_status_page(struct intel_engine_cs *engine, int reg, u32 value)
|
|||
struct intel_ring *
|
||||
intel_engine_create_ring(struct intel_engine_cs *engine, int size);
|
||||
int intel_ring_pin(struct intel_ring *ring, unsigned int offset_bias);
|
||||
void intel_ring_reset(struct intel_ring *ring, u32 tail);
|
||||
void intel_ring_update_space(struct intel_ring *ring);
|
||||
void intel_ring_unpin(struct intel_ring *ring);
|
||||
void intel_ring_free(struct intel_ring *ring);
|
||||
|
||||
|
@ -511,7 +514,7 @@ intel_ring_advance(struct drm_i915_gem_request *req, u32 *cs)
|
|||
* reserved for the command packet (i.e. the value passed to
|
||||
* intel_ring_begin()).
|
||||
*/
|
||||
GEM_BUG_ON((req->ring->vaddr + req->ring->tail) != cs);
|
||||
GEM_BUG_ON((req->ring->vaddr + req->ring->emit) != cs);
|
||||
}
|
||||
|
||||
static inline u32
|
||||
|
@ -540,7 +543,19 @@ assert_ring_tail_valid(const struct intel_ring *ring, unsigned int tail)
|
|||
GEM_BUG_ON(tail >= ring->size);
|
||||
}
|
||||
|
||||
void intel_ring_update_space(struct intel_ring *ring);
|
||||
static inline unsigned int
|
||||
intel_ring_set_tail(struct intel_ring *ring, unsigned int tail)
|
||||
{
|
||||
/* Whilst writes to the tail are strictly order, there is no
|
||||
* serialisation between readers and the writers. The tail may be
|
||||
* read by i915_gem_request_retire() just as it is being updated
|
||||
* by execlists, as although the breadcrumb is complete, the context
|
||||
* switch hasn't been seen.
|
||||
*/
|
||||
assert_ring_tail_valid(ring, tail);
|
||||
ring->tail = tail;
|
||||
return tail;
|
||||
}
|
||||
|
||||
void intel_engine_init_global_seqno(struct intel_engine_cs *engine, u32 seqno);
|
||||
|
||||
|
|
|
@ -3393,6 +3393,13 @@ void radeon_combios_asic_init(struct drm_device *dev)
|
|||
rdev->pdev->subsystem_vendor == 0x103c &&
|
||||
rdev->pdev->subsystem_device == 0x280a)
|
||||
return;
|
||||
/* quirk for rs4xx Toshiba Sattellite L20-183 latop to make it resume
|
||||
* - it hangs on resume inside the dynclk 1 table.
|
||||
*/
|
||||
if (rdev->family == CHIP_RS400 &&
|
||||
rdev->pdev->subsystem_vendor == 0x1179 &&
|
||||
rdev->pdev->subsystem_device == 0xff31)
|
||||
return;
|
||||
|
||||
/* DYN CLK 1 */
|
||||
table = combios_get_table_offset(dev, COMBIOS_DYN_CLK_1_TABLE);
|
||||
|
|
|
@ -136,6 +136,10 @@ static struct radeon_px_quirk radeon_px_quirk_list[] = {
|
|||
* https://bugzilla.kernel.org/show_bug.cgi?id=51381
|
||||
*/
|
||||
{ PCI_VENDOR_ID_ATI, 0x6840, 0x1043, 0x2122, RADEON_PX_QUIRK_DISABLE_PX },
|
||||
/* Asus K53TK laptop with AMD A6-3420M APU and Radeon 7670m GPU
|
||||
* https://bugs.freedesktop.org/show_bug.cgi?id=101491
|
||||
*/
|
||||
{ PCI_VENDOR_ID_ATI, 0x6741, 0x1043, 0x2122, RADEON_PX_QUIRK_DISABLE_PX },
|
||||
/* macbook pro 8.2 */
|
||||
{ PCI_VENDOR_ID_ATI, 0x6741, PCI_VENDOR_ID_APPLE, 0x00e2, RADEON_PX_QUIRK_LONG_WAKEUP },
|
||||
{ 0, 0, 0, 0, 0 },
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue