Merge branch 'linus' into sched/core, to pick up fixes

Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
Ingo Molnar 2017-06-24 08:57:20 +02:00
commit 1bc3cd4dfa
119 changed files with 1157 additions and 664 deletions

View File

@ -41,9 +41,9 @@ Required properties:
Optional properties: Optional properties:
In order to use the GPIO lines in PWM mode, some additional optional In order to use the GPIO lines in PWM mode, some additional optional
properties are required. Only Armada 370 and XP support these properties. properties are required.
- compatible: Must contain "marvell,armada-370-xp-gpio" - compatible: Must contain "marvell,armada-370-gpio"
- reg: an additional register set is needed, for the GPIO Blink - reg: an additional register set is needed, for the GPIO Blink
Counter on/off registers. Counter on/off registers.
@ -71,7 +71,7 @@ Example:
}; };
gpio1: gpio@18140 { gpio1: gpio@18140 {
compatible = "marvell,armada-370-xp-gpio"; compatible = "marvell,armada-370-gpio";
reg = <0x18140 0x40>, <0x181c8 0x08>; reg = <0x18140 0x40>, <0x181c8 0x08>;
reg-names = "gpio", "pwm"; reg-names = "gpio", "pwm";
ngpios = <17>; ngpios = <17>;

View File

@ -31,7 +31,7 @@ Example:
compatible = "st,stm32-timers"; compatible = "st,stm32-timers";
reg = <0x40010000 0x400>; reg = <0x40010000 0x400>;
clocks = <&rcc 0 160>; clocks = <&rcc 0 160>;
clock-names = "clk_int"; clock-names = "int";
pwm { pwm {
compatible = "st,stm32-pwm"; compatible = "st,stm32-pwm";

View File

@ -34,7 +34,7 @@ Required properties:
"brcm,bcm6328-switch" "brcm,bcm6328-switch"
"brcm,bcm6368-switch" and the mandatory "brcm,bcm63xx-switch" "brcm,bcm6368-switch" and the mandatory "brcm,bcm63xx-switch"
See Documentation/devicetree/bindings/dsa/dsa.txt for a list of additional See Documentation/devicetree/bindings/net/dsa/dsa.txt for a list of additional
required and optional properties. required and optional properties.
Examples: Examples:

View File

@ -27,6 +27,7 @@ Optional properties:
of the device. On many systems this is wired high so the device goes of the device. On many systems this is wired high so the device goes
out of reset at power-on, but if it is under program control, this out of reset at power-on, but if it is under program control, this
optional GPIO can wake up in response to it. optional GPIO can wake up in response to it.
- vdd33a-supply, vddvario-supply : 3.3V analog and IO logic power supplies
Examples: Examples:

View File

@ -166,7 +166,11 @@ static int _kvm_mips_host_tlb_inv(unsigned long entryhi)
int kvm_mips_host_tlb_inv(struct kvm_vcpu *vcpu, unsigned long va, int kvm_mips_host_tlb_inv(struct kvm_vcpu *vcpu, unsigned long va,
bool user, bool kernel) bool user, bool kernel)
{ {
int idx_user, idx_kernel; /*
* Initialize idx_user and idx_kernel to workaround bogus
* maybe-initialized warning when using GCC 6.
*/
int idx_user = 0, idx_kernel = 0;
unsigned long flags, old_entryhi; unsigned long flags, old_entryhi;
local_irq_save(flags); local_irq_save(flags);

View File

@ -103,6 +103,7 @@ extern int kprobe_exceptions_notify(struct notifier_block *self,
extern int kprobe_fault_handler(struct pt_regs *regs, int trapnr); extern int kprobe_fault_handler(struct pt_regs *regs, int trapnr);
extern int kprobe_handler(struct pt_regs *regs); extern int kprobe_handler(struct pt_regs *regs);
extern int kprobe_post_handler(struct pt_regs *regs); extern int kprobe_post_handler(struct pt_regs *regs);
extern int is_current_kprobe_addr(unsigned long addr);
#ifdef CONFIG_KPROBES_ON_FTRACE #ifdef CONFIG_KPROBES_ON_FTRACE
extern int skip_singlestep(struct kprobe *p, struct pt_regs *regs, extern int skip_singlestep(struct kprobe *p, struct pt_regs *regs,
struct kprobe_ctlblk *kcb); struct kprobe_ctlblk *kcb);

View File

@ -1411,10 +1411,8 @@ USE_TEXT_SECTION()
.balign IFETCH_ALIGN_BYTES .balign IFETCH_ALIGN_BYTES
do_hash_page: do_hash_page:
#ifdef CONFIG_PPC_STD_MMU_64 #ifdef CONFIG_PPC_STD_MMU_64
andis. r0,r4,0xa410 /* weird error? */ andis. r0,r4,0xa450 /* weird error? */
bne- handle_page_fault /* if not, try to insert a HPTE */ bne- handle_page_fault /* if not, try to insert a HPTE */
andis. r0,r4,DSISR_DABRMATCH@h
bne- handle_dabr_fault
CURRENT_THREAD_INFO(r11, r1) CURRENT_THREAD_INFO(r11, r1)
lwz r0,TI_PREEMPT(r11) /* If we're in an "NMI" */ lwz r0,TI_PREEMPT(r11) /* If we're in an "NMI" */
andis. r0,r0,NMI_MASK@h /* (i.e. an irq when soft-disabled) */ andis. r0,r0,NMI_MASK@h /* (i.e. an irq when soft-disabled) */
@ -1438,11 +1436,16 @@ do_hash_page:
/* Error */ /* Error */
blt- 13f blt- 13f
/* Reload DSISR into r4 for the DABR check below */
ld r4,_DSISR(r1)
#endif /* CONFIG_PPC_STD_MMU_64 */ #endif /* CONFIG_PPC_STD_MMU_64 */
/* Here we have a page fault that hash_page can't handle. */ /* Here we have a page fault that hash_page can't handle. */
handle_page_fault: handle_page_fault:
11: ld r4,_DAR(r1) 11: andis. r0,r4,DSISR_DABRMATCH@h
bne- handle_dabr_fault
ld r4,_DAR(r1)
ld r5,_DSISR(r1) ld r5,_DSISR(r1)
addi r3,r1,STACK_FRAME_OVERHEAD addi r3,r1,STACK_FRAME_OVERHEAD
bl do_page_fault bl do_page_fault

View File

@ -43,6 +43,12 @@ DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
struct kretprobe_blackpoint kretprobe_blacklist[] = {{NULL, NULL}}; struct kretprobe_blackpoint kretprobe_blacklist[] = {{NULL, NULL}};
int is_current_kprobe_addr(unsigned long addr)
{
struct kprobe *p = kprobe_running();
return (p && (unsigned long)p->addr == addr) ? 1 : 0;
}
bool arch_within_kprobe_blacklist(unsigned long addr) bool arch_within_kprobe_blacklist(unsigned long addr)
{ {
return (addr >= (unsigned long)__kprobes_text_start && return (addr >= (unsigned long)__kprobes_text_start &&
@ -617,6 +623,15 @@ int setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
regs->gpr[2] = (unsigned long)(((func_descr_t *)jp->entry)->toc); regs->gpr[2] = (unsigned long)(((func_descr_t *)jp->entry)->toc);
#endif #endif
/*
* jprobes use jprobe_return() which skips the normal return
* path of the function, and this messes up the accounting of the
* function graph tracer.
*
* Pause function graph tracing while performing the jprobe function.
*/
pause_graph_tracing();
return 1; return 1;
} }
NOKPROBE_SYMBOL(setjmp_pre_handler); NOKPROBE_SYMBOL(setjmp_pre_handler);
@ -642,6 +657,8 @@ int longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
* saved regs... * saved regs...
*/ */
memcpy(regs, &kcb->jprobe_saved_regs, sizeof(struct pt_regs)); memcpy(regs, &kcb->jprobe_saved_regs, sizeof(struct pt_regs));
/* It's OK to start function graph tracing again */
unpause_graph_tracing();
preempt_enable_no_resched(); preempt_enable_no_resched();
return 1; return 1;
} }

View File

@ -615,6 +615,24 @@ void __init exc_lvl_early_init(void)
} }
#endif #endif
/*
* Emergency stacks are used for a range of things, from asynchronous
* NMIs (system reset, machine check) to synchronous, process context.
* We set preempt_count to zero, even though that isn't necessarily correct. To
* get the right value we'd need to copy it from the previous thread_info, but
* doing that might fault causing more problems.
* TODO: what to do with accounting?
*/
static void emerg_stack_init_thread_info(struct thread_info *ti, int cpu)
{
ti->task = NULL;
ti->cpu = cpu;
ti->preempt_count = 0;
ti->local_flags = 0;
ti->flags = 0;
klp_init_thread_info(ti);
}
/* /*
* Stack space used when we detect a bad kernel stack pointer, and * Stack space used when we detect a bad kernel stack pointer, and
* early in SMP boots before relocation is enabled. Exclusive emergency * early in SMP boots before relocation is enabled. Exclusive emergency
@ -633,24 +651,31 @@ void __init emergency_stack_init(void)
* Since we use these as temporary stacks during secondary CPU * Since we use these as temporary stacks during secondary CPU
* bringup, we need to get at them in real mode. This means they * bringup, we need to get at them in real mode. This means they
* must also be within the RMO region. * must also be within the RMO region.
*
* The IRQ stacks allocated elsewhere in this file are zeroed and
* initialized in kernel/irq.c. These are initialized here in order
* to have emergency stacks available as early as possible.
*/ */
limit = min(safe_stack_limit(), ppc64_rma_size); limit = min(safe_stack_limit(), ppc64_rma_size);
for_each_possible_cpu(i) { for_each_possible_cpu(i) {
struct thread_info *ti; struct thread_info *ti;
ti = __va(memblock_alloc_base(THREAD_SIZE, THREAD_SIZE, limit)); ti = __va(memblock_alloc_base(THREAD_SIZE, THREAD_SIZE, limit));
klp_init_thread_info(ti); memset(ti, 0, THREAD_SIZE);
emerg_stack_init_thread_info(ti, i);
paca[i].emergency_sp = (void *)ti + THREAD_SIZE; paca[i].emergency_sp = (void *)ti + THREAD_SIZE;
#ifdef CONFIG_PPC_BOOK3S_64 #ifdef CONFIG_PPC_BOOK3S_64
/* emergency stack for NMI exception handling. */ /* emergency stack for NMI exception handling. */
ti = __va(memblock_alloc_base(THREAD_SIZE, THREAD_SIZE, limit)); ti = __va(memblock_alloc_base(THREAD_SIZE, THREAD_SIZE, limit));
klp_init_thread_info(ti); memset(ti, 0, THREAD_SIZE);
emerg_stack_init_thread_info(ti, i);
paca[i].nmi_emergency_sp = (void *)ti + THREAD_SIZE; paca[i].nmi_emergency_sp = (void *)ti + THREAD_SIZE;
/* emergency stack for machine check exception handling. */ /* emergency stack for machine check exception handling. */
ti = __va(memblock_alloc_base(THREAD_SIZE, THREAD_SIZE, limit)); ti = __va(memblock_alloc_base(THREAD_SIZE, THREAD_SIZE, limit));
klp_init_thread_info(ti); memset(ti, 0, THREAD_SIZE);
emerg_stack_init_thread_info(ti, i);
paca[i].mc_emergency_sp = (void *)ti + THREAD_SIZE; paca[i].mc_emergency_sp = (void *)ti + THREAD_SIZE;
#endif #endif
} }

View File

@ -45,10 +45,14 @@ _GLOBAL(ftrace_caller)
stdu r1,-SWITCH_FRAME_SIZE(r1) stdu r1,-SWITCH_FRAME_SIZE(r1)
/* Save all gprs to pt_regs */ /* Save all gprs to pt_regs */
SAVE_8GPRS(0,r1) SAVE_GPR(0, r1)
SAVE_8GPRS(8,r1) SAVE_10GPRS(2, r1)
SAVE_8GPRS(16,r1) SAVE_10GPRS(12, r1)
SAVE_8GPRS(24,r1) SAVE_10GPRS(22, r1)
/* Save previous stack pointer (r1) */
addi r8, r1, SWITCH_FRAME_SIZE
std r8, GPR1(r1)
/* Load special regs for save below */ /* Load special regs for save below */
mfmsr r8 mfmsr r8
@ -95,18 +99,44 @@ ftrace_call:
bl ftrace_stub bl ftrace_stub
nop nop
/* Load ctr with the possibly modified NIP */ /* Load the possibly modified NIP */
ld r3, _NIP(r1) ld r15, _NIP(r1)
mtctr r3
#ifdef CONFIG_LIVEPATCH #ifdef CONFIG_LIVEPATCH
cmpd r14,r3 /* has NIP been altered? */ cmpd r14, r15 /* has NIP been altered? */
#endif #endif
#if defined(CONFIG_LIVEPATCH) && defined(CONFIG_KPROBES_ON_FTRACE)
/* NIP has not been altered, skip over further checks */
beq 1f
/* Check if there is an active kprobe on us */
subi r3, r14, 4
bl is_current_kprobe_addr
nop
/*
* If r3 == 1, then this is a kprobe/jprobe.
* else, this is livepatched function.
*
* The conditional branch for livepatch_handler below will use the
* result of this comparison. For kprobe/jprobe, we just need to branch to
* the new NIP, not call livepatch_handler. The branch below is bne, so we
* want CR0[EQ] to be true if this is a kprobe/jprobe. Which means we want
* CR0[EQ] = (r3 == 1).
*/
cmpdi r3, 1
1:
#endif
/* Load CTR with the possibly modified NIP */
mtctr r15
/* Restore gprs */ /* Restore gprs */
REST_8GPRS(0,r1) REST_GPR(0,r1)
REST_8GPRS(8,r1) REST_10GPRS(2,r1)
REST_8GPRS(16,r1) REST_10GPRS(12,r1)
REST_8GPRS(24,r1) REST_10GPRS(22,r1)
/* Restore possibly modified LR */ /* Restore possibly modified LR */
ld r0, _LINK(r1) ld r0, _LINK(r1)
@ -119,7 +149,10 @@ ftrace_call:
addi r1, r1, SWITCH_FRAME_SIZE addi r1, r1, SWITCH_FRAME_SIZE
#ifdef CONFIG_LIVEPATCH #ifdef CONFIG_LIVEPATCH
/* Based on the cmpd above, if the NIP was altered handle livepatch */ /*
* Based on the cmpd or cmpdi above, if the NIP was altered and we're
* not on a kprobe/jprobe, then handle livepatch.
*/
bne- livepatch_handler bne- livepatch_handler
#endif #endif

View File

@ -1486,6 +1486,14 @@ static int kvmppc_set_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
r = set_vpa(vcpu, &vcpu->arch.dtl, addr, len); r = set_vpa(vcpu, &vcpu->arch.dtl, addr, len);
break; break;
case KVM_REG_PPC_TB_OFFSET: case KVM_REG_PPC_TB_OFFSET:
/*
* POWER9 DD1 has an erratum where writing TBU40 causes
* the timebase to lose ticks. So we don't let the
* timebase offset be changed on P9 DD1. (It is
* initialized to zero.)
*/
if (cpu_has_feature(CPU_FTR_POWER9_DD1))
break;
/* round up to multiple of 2^24 */ /* round up to multiple of 2^24 */
vcpu->arch.vcore->tb_offset = vcpu->arch.vcore->tb_offset =
ALIGN(set_reg_val(id, *val), 1UL << 24); ALIGN(set_reg_val(id, *val), 1UL << 24);
@ -2907,12 +2915,36 @@ static int kvmppc_vcpu_run_hv(struct kvm_run *run, struct kvm_vcpu *vcpu)
{ {
int r; int r;
int srcu_idx; int srcu_idx;
unsigned long ebb_regs[3] = {}; /* shut up GCC */
unsigned long user_tar = 0;
unsigned int user_vrsave;
if (!vcpu->arch.sane) { if (!vcpu->arch.sane) {
run->exit_reason = KVM_EXIT_INTERNAL_ERROR; run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
return -EINVAL; return -EINVAL;
} }
/*
* Don't allow entry with a suspended transaction, because
* the guest entry/exit code will lose it.
* If the guest has TM enabled, save away their TM-related SPRs
* (they will get restored by the TM unavailable interrupt).
*/
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
if (cpu_has_feature(CPU_FTR_TM) && current->thread.regs &&
(current->thread.regs->msr & MSR_TM)) {
if (MSR_TM_ACTIVE(current->thread.regs->msr)) {
run->exit_reason = KVM_EXIT_FAIL_ENTRY;
run->fail_entry.hardware_entry_failure_reason = 0;
return -EINVAL;
}
current->thread.tm_tfhar = mfspr(SPRN_TFHAR);
current->thread.tm_tfiar = mfspr(SPRN_TFIAR);
current->thread.tm_texasr = mfspr(SPRN_TEXASR);
current->thread.regs->msr &= ~MSR_TM;
}
#endif
kvmppc_core_prepare_to_enter(vcpu); kvmppc_core_prepare_to_enter(vcpu);
/* No need to go into the guest when all we'll do is come back out */ /* No need to go into the guest when all we'll do is come back out */
@ -2934,6 +2966,15 @@ static int kvmppc_vcpu_run_hv(struct kvm_run *run, struct kvm_vcpu *vcpu)
flush_all_to_thread(current); flush_all_to_thread(current);
/* Save userspace EBB and other register values */
if (cpu_has_feature(CPU_FTR_ARCH_207S)) {
ebb_regs[0] = mfspr(SPRN_EBBHR);
ebb_regs[1] = mfspr(SPRN_EBBRR);
ebb_regs[2] = mfspr(SPRN_BESCR);
user_tar = mfspr(SPRN_TAR);
}
user_vrsave = mfspr(SPRN_VRSAVE);
vcpu->arch.wqp = &vcpu->arch.vcore->wq; vcpu->arch.wqp = &vcpu->arch.vcore->wq;
vcpu->arch.pgdir = current->mm->pgd; vcpu->arch.pgdir = current->mm->pgd;
vcpu->arch.state = KVMPPC_VCPU_BUSY_IN_HOST; vcpu->arch.state = KVMPPC_VCPU_BUSY_IN_HOST;
@ -2960,6 +3001,16 @@ static int kvmppc_vcpu_run_hv(struct kvm_run *run, struct kvm_vcpu *vcpu)
} }
} while (is_kvmppc_resume_guest(r)); } while (is_kvmppc_resume_guest(r));
/* Restore userspace EBB and other register values */
if (cpu_has_feature(CPU_FTR_ARCH_207S)) {
mtspr(SPRN_EBBHR, ebb_regs[0]);
mtspr(SPRN_EBBRR, ebb_regs[1]);
mtspr(SPRN_BESCR, ebb_regs[2]);
mtspr(SPRN_TAR, user_tar);
mtspr(SPRN_FSCR, current->thread.fscr);
}
mtspr(SPRN_VRSAVE, user_vrsave);
out: out:
vcpu->arch.state = KVMPPC_VCPU_NOTREADY; vcpu->arch.state = KVMPPC_VCPU_NOTREADY;
atomic_dec(&vcpu->kvm->arch.vcpus_running); atomic_dec(&vcpu->kvm->arch.vcpus_running);

View File

@ -121,10 +121,20 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
* Put whatever is in the decrementer into the * Put whatever is in the decrementer into the
* hypervisor decrementer. * hypervisor decrementer.
*/ */
BEGIN_FTR_SECTION
ld r5, HSTATE_KVM_VCORE(r13)
ld r6, VCORE_KVM(r5)
ld r9, KVM_HOST_LPCR(r6)
andis. r9, r9, LPCR_LD@h
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
mfspr r8,SPRN_DEC mfspr r8,SPRN_DEC
mftb r7 mftb r7
mtspr SPRN_HDEC,r8 BEGIN_FTR_SECTION
/* On POWER9, don't sign-extend if host LPCR[LD] bit is set */
bne 32f
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
extsw r8,r8 extsw r8,r8
32: mtspr SPRN_HDEC,r8
add r8,r8,r7 add r8,r8,r7
std r8,HSTATE_DECEXP(r13) std r8,HSTATE_DECEXP(r13)

View File

@ -32,12 +32,29 @@
#include <asm/opal.h> #include <asm/opal.h>
#include <asm/xive-regs.h> #include <asm/xive-regs.h>
/* Sign-extend HDEC if not on POWER9 */
#define EXTEND_HDEC(reg) \
BEGIN_FTR_SECTION; \
extsw reg, reg; \
END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
#define VCPU_GPRS_TM(reg) (((reg) * ULONG_SIZE) + VCPU_GPR_TM) #define VCPU_GPRS_TM(reg) (((reg) * ULONG_SIZE) + VCPU_GPR_TM)
/* Values in HSTATE_NAPPING(r13) */ /* Values in HSTATE_NAPPING(r13) */
#define NAPPING_CEDE 1 #define NAPPING_CEDE 1
#define NAPPING_NOVCPU 2 #define NAPPING_NOVCPU 2
/* Stack frame offsets for kvmppc_hv_entry */
#define SFS 144
#define STACK_SLOT_TRAP (SFS-4)
#define STACK_SLOT_TID (SFS-16)
#define STACK_SLOT_PSSCR (SFS-24)
#define STACK_SLOT_PID (SFS-32)
#define STACK_SLOT_IAMR (SFS-40)
#define STACK_SLOT_CIABR (SFS-48)
#define STACK_SLOT_DAWR (SFS-56)
#define STACK_SLOT_DAWRX (SFS-64)
/* /*
* Call kvmppc_hv_entry in real mode. * Call kvmppc_hv_entry in real mode.
* Must be called with interrupts hard-disabled. * Must be called with interrupts hard-disabled.
@ -214,6 +231,8 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
kvmppc_primary_no_guest: kvmppc_primary_no_guest:
/* We handle this much like a ceded vcpu */ /* We handle this much like a ceded vcpu */
/* put the HDEC into the DEC, since HDEC interrupts don't wake us */ /* put the HDEC into the DEC, since HDEC interrupts don't wake us */
/* HDEC may be larger than DEC for arch >= v3.00, but since the */
/* HDEC value came from DEC in the first place, it will fit */
mfspr r3, SPRN_HDEC mfspr r3, SPRN_HDEC
mtspr SPRN_DEC, r3 mtspr SPRN_DEC, r3
/* /*
@ -295,8 +314,9 @@ kvm_novcpu_wakeup:
/* See if our timeslice has expired (HDEC is negative) */ /* See if our timeslice has expired (HDEC is negative) */
mfspr r0, SPRN_HDEC mfspr r0, SPRN_HDEC
EXTEND_HDEC(r0)
li r12, BOOK3S_INTERRUPT_HV_DECREMENTER li r12, BOOK3S_INTERRUPT_HV_DECREMENTER
cmpwi r0, 0 cmpdi r0, 0
blt kvm_novcpu_exit blt kvm_novcpu_exit
/* Got an IPI but other vcpus aren't yet exiting, must be a latecomer */ /* Got an IPI but other vcpus aren't yet exiting, must be a latecomer */
@ -319,10 +339,10 @@ kvm_novcpu_exit:
bl kvmhv_accumulate_time bl kvmhv_accumulate_time
#endif #endif
13: mr r3, r12 13: mr r3, r12
stw r12, 112-4(r1) stw r12, STACK_SLOT_TRAP(r1)
bl kvmhv_commence_exit bl kvmhv_commence_exit
nop nop
lwz r12, 112-4(r1) lwz r12, STACK_SLOT_TRAP(r1)
b kvmhv_switch_to_host b kvmhv_switch_to_host
/* /*
@ -390,8 +410,8 @@ kvm_secondary_got_guest:
lbz r4, HSTATE_PTID(r13) lbz r4, HSTATE_PTID(r13)
cmpwi r4, 0 cmpwi r4, 0
bne 63f bne 63f
lis r6, 0x7fff LOAD_REG_ADDR(r6, decrementer_max)
ori r6, r6, 0xffff ld r6, 0(r6)
mtspr SPRN_HDEC, r6 mtspr SPRN_HDEC, r6
/* and set per-LPAR registers, if doing dynamic micro-threading */ /* and set per-LPAR registers, if doing dynamic micro-threading */
ld r6, HSTATE_SPLIT_MODE(r13) ld r6, HSTATE_SPLIT_MODE(r13)
@ -545,11 +565,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
* * * *
*****************************************************************************/ *****************************************************************************/
/* Stack frame offsets */
#define STACK_SLOT_TID (112-16)
#define STACK_SLOT_PSSCR (112-24)
#define STACK_SLOT_PID (112-32)
.global kvmppc_hv_entry .global kvmppc_hv_entry
kvmppc_hv_entry: kvmppc_hv_entry:
@ -565,7 +580,7 @@ kvmppc_hv_entry:
*/ */
mflr r0 mflr r0
std r0, PPC_LR_STKOFF(r1) std r0, PPC_LR_STKOFF(r1)
stdu r1, -112(r1) stdu r1, -SFS(r1)
/* Save R1 in the PACA */ /* Save R1 in the PACA */
std r1, HSTATE_HOST_R1(r13) std r1, HSTATE_HOST_R1(r13)
@ -749,10 +764,20 @@ BEGIN_FTR_SECTION
mfspr r5, SPRN_TIDR mfspr r5, SPRN_TIDR
mfspr r6, SPRN_PSSCR mfspr r6, SPRN_PSSCR
mfspr r7, SPRN_PID mfspr r7, SPRN_PID
mfspr r8, SPRN_IAMR
std r5, STACK_SLOT_TID(r1) std r5, STACK_SLOT_TID(r1)
std r6, STACK_SLOT_PSSCR(r1) std r6, STACK_SLOT_PSSCR(r1)
std r7, STACK_SLOT_PID(r1) std r7, STACK_SLOT_PID(r1)
std r8, STACK_SLOT_IAMR(r1)
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
BEGIN_FTR_SECTION
mfspr r5, SPRN_CIABR
mfspr r6, SPRN_DAWR
mfspr r7, SPRN_DAWRX
std r5, STACK_SLOT_CIABR(r1)
std r6, STACK_SLOT_DAWR(r1)
std r7, STACK_SLOT_DAWRX(r1)
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
BEGIN_FTR_SECTION BEGIN_FTR_SECTION
/* Set partition DABR */ /* Set partition DABR */
@ -968,7 +993,8 @@ ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300)
/* Check if HDEC expires soon */ /* Check if HDEC expires soon */
mfspr r3, SPRN_HDEC mfspr r3, SPRN_HDEC
cmpwi r3, 512 /* 1 microsecond */ EXTEND_HDEC(r3)
cmpdi r3, 512 /* 1 microsecond */
blt hdec_soon blt hdec_soon
#ifdef CONFIG_KVM_XICS #ifdef CONFIG_KVM_XICS
@ -1505,11 +1531,10 @@ ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300)
* set by the guest could disrupt the host. * set by the guest could disrupt the host.
*/ */
li r0, 0 li r0, 0
mtspr SPRN_IAMR, r0 mtspr SPRN_PSPB, r0
mtspr SPRN_CIABR, r0
mtspr SPRN_DAWRX, r0
mtspr SPRN_WORT, r0 mtspr SPRN_WORT, r0
BEGIN_FTR_SECTION BEGIN_FTR_SECTION
mtspr SPRN_IAMR, r0
mtspr SPRN_TCSCR, r0 mtspr SPRN_TCSCR, r0
/* Set MMCRS to 1<<31 to freeze and disable the SPMC counters */ /* Set MMCRS to 1<<31 to freeze and disable the SPMC counters */
li r0, 1 li r0, 1
@ -1525,6 +1550,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
std r6,VCPU_UAMOR(r9) std r6,VCPU_UAMOR(r9)
li r6,0 li r6,0
mtspr SPRN_AMR,r6 mtspr SPRN_AMR,r6
mtspr SPRN_UAMOR, r6
/* Switch DSCR back to host value */ /* Switch DSCR back to host value */
mfspr r8, SPRN_DSCR mfspr r8, SPRN_DSCR
@ -1669,13 +1695,23 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
ptesync ptesync
/* Restore host values of some registers */ /* Restore host values of some registers */
BEGIN_FTR_SECTION
ld r5, STACK_SLOT_CIABR(r1)
ld r6, STACK_SLOT_DAWR(r1)
ld r7, STACK_SLOT_DAWRX(r1)
mtspr SPRN_CIABR, r5
mtspr SPRN_DAWR, r6
mtspr SPRN_DAWRX, r7
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
BEGIN_FTR_SECTION BEGIN_FTR_SECTION
ld r5, STACK_SLOT_TID(r1) ld r5, STACK_SLOT_TID(r1)
ld r6, STACK_SLOT_PSSCR(r1) ld r6, STACK_SLOT_PSSCR(r1)
ld r7, STACK_SLOT_PID(r1) ld r7, STACK_SLOT_PID(r1)
ld r8, STACK_SLOT_IAMR(r1)
mtspr SPRN_TIDR, r5 mtspr SPRN_TIDR, r5
mtspr SPRN_PSSCR, r6 mtspr SPRN_PSSCR, r6
mtspr SPRN_PID, r7 mtspr SPRN_PID, r7
mtspr SPRN_IAMR, r8
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
BEGIN_FTR_SECTION BEGIN_FTR_SECTION
PPC_INVALIDATE_ERAT PPC_INVALIDATE_ERAT
@ -1819,8 +1855,8 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_RADIX)
li r0, KVM_GUEST_MODE_NONE li r0, KVM_GUEST_MODE_NONE
stb r0, HSTATE_IN_GUEST(r13) stb r0, HSTATE_IN_GUEST(r13)
ld r0, 112+PPC_LR_STKOFF(r1) ld r0, SFS+PPC_LR_STKOFF(r1)
addi r1, r1, 112 addi r1, r1, SFS
mtlr r0 mtlr r0
blr blr
@ -2366,12 +2402,13 @@ END_FTR_SECTION_IFSET(CPU_FTR_TM)
mfspr r3, SPRN_DEC mfspr r3, SPRN_DEC
mfspr r4, SPRN_HDEC mfspr r4, SPRN_HDEC
mftb r5 mftb r5
cmpw r3, r4 extsw r3, r3
EXTEND_HDEC(r4)
cmpd r3, r4
ble 67f ble 67f
mtspr SPRN_DEC, r4 mtspr SPRN_DEC, r4
67: 67:
/* save expiry time of guest decrementer */ /* save expiry time of guest decrementer */
extsw r3, r3
add r3, r3, r5 add r3, r3, r5
ld r4, HSTATE_KVM_VCPU(r13) ld r4, HSTATE_KVM_VCPU(r13)
ld r5, HSTATE_KVM_VCORE(r13) ld r5, HSTATE_KVM_VCORE(r13)

View File

@ -101,5 +101,6 @@ void perf_get_regs_user(struct perf_regs *regs_user,
struct pt_regs *regs_user_copy) struct pt_regs *regs_user_copy)
{ {
regs_user->regs = task_pt_regs(current); regs_user->regs = task_pt_regs(current);
regs_user->abi = perf_reg_abi(current); regs_user->abi = (regs_user->regs) ? perf_reg_abi(current) :
PERF_SAMPLE_REGS_ABI_NONE;
} }

View File

@ -449,7 +449,7 @@ static int mmio_launch_invalidate(struct npu *npu, unsigned long launch,
return mmio_atsd_reg; return mmio_atsd_reg;
} }
static int mmio_invalidate_pid(struct npu *npu, unsigned long pid) static int mmio_invalidate_pid(struct npu *npu, unsigned long pid, bool flush)
{ {
unsigned long launch; unsigned long launch;
@ -465,12 +465,15 @@ static int mmio_invalidate_pid(struct npu *npu, unsigned long pid)
/* PID */ /* PID */
launch |= pid << PPC_BITLSHIFT(38); launch |= pid << PPC_BITLSHIFT(38);
/* No flush */
launch |= !flush << PPC_BITLSHIFT(39);
/* Invalidating the entire process doesn't use a va */ /* Invalidating the entire process doesn't use a va */
return mmio_launch_invalidate(npu, launch, 0); return mmio_launch_invalidate(npu, launch, 0);
} }
static int mmio_invalidate_va(struct npu *npu, unsigned long va, static int mmio_invalidate_va(struct npu *npu, unsigned long va,
unsigned long pid) unsigned long pid, bool flush)
{ {
unsigned long launch; unsigned long launch;
@ -486,26 +489,60 @@ static int mmio_invalidate_va(struct npu *npu, unsigned long va,
/* PID */ /* PID */
launch |= pid << PPC_BITLSHIFT(38); launch |= pid << PPC_BITLSHIFT(38);
/* No flush */
launch |= !flush << PPC_BITLSHIFT(39);
return mmio_launch_invalidate(npu, launch, va); return mmio_launch_invalidate(npu, launch, va);
} }
#define mn_to_npu_context(x) container_of(x, struct npu_context, mn) #define mn_to_npu_context(x) container_of(x, struct npu_context, mn)
struct mmio_atsd_reg {
struct npu *npu;
int reg;
};
static void mmio_invalidate_wait(
struct mmio_atsd_reg mmio_atsd_reg[NV_MAX_NPUS], bool flush)
{
struct npu *npu;
int i, reg;
/* Wait for all invalidations to complete */
for (i = 0; i <= max_npu2_index; i++) {
if (mmio_atsd_reg[i].reg < 0)
continue;
/* Wait for completion */
npu = mmio_atsd_reg[i].npu;
reg = mmio_atsd_reg[i].reg;
while (__raw_readq(npu->mmio_atsd_regs[reg] + XTS_ATSD_STAT))
cpu_relax();
put_mmio_atsd_reg(npu, reg);
/*
* The GPU requires two flush ATSDs to ensure all entries have
* been flushed. We use PID 0 as it will never be used for a
* process on the GPU.
*/
if (flush)
mmio_invalidate_pid(npu, 0, true);
}
}
/* /*
* Invalidate either a single address or an entire PID depending on * Invalidate either a single address or an entire PID depending on
* the value of va. * the value of va.
*/ */
static void mmio_invalidate(struct npu_context *npu_context, int va, static void mmio_invalidate(struct npu_context *npu_context, int va,
unsigned long address) unsigned long address, bool flush)
{ {
int i, j, reg; int i, j;
struct npu *npu; struct npu *npu;
struct pnv_phb *nphb; struct pnv_phb *nphb;
struct pci_dev *npdev; struct pci_dev *npdev;
struct { struct mmio_atsd_reg mmio_atsd_reg[NV_MAX_NPUS];
struct npu *npu;
int reg;
} mmio_atsd_reg[NV_MAX_NPUS];
unsigned long pid = npu_context->mm->context.id; unsigned long pid = npu_context->mm->context.id;
/* /*
@ -525,10 +562,11 @@ static void mmio_invalidate(struct npu_context *npu_context, int va,
if (va) if (va)
mmio_atsd_reg[i].reg = mmio_atsd_reg[i].reg =
mmio_invalidate_va(npu, address, pid); mmio_invalidate_va(npu, address, pid,
flush);
else else
mmio_atsd_reg[i].reg = mmio_atsd_reg[i].reg =
mmio_invalidate_pid(npu, pid); mmio_invalidate_pid(npu, pid, flush);
/* /*
* The NPU hardware forwards the shootdown to all GPUs * The NPU hardware forwards the shootdown to all GPUs
@ -544,18 +582,10 @@ static void mmio_invalidate(struct npu_context *npu_context, int va,
*/ */
flush_tlb_mm(npu_context->mm); flush_tlb_mm(npu_context->mm);
/* Wait for all invalidations to complete */ mmio_invalidate_wait(mmio_atsd_reg, flush);
for (i = 0; i <= max_npu2_index; i++) { if (flush)
if (mmio_atsd_reg[i].reg < 0) /* Wait for the flush to complete */
continue; mmio_invalidate_wait(mmio_atsd_reg, false);
/* Wait for completion */
npu = mmio_atsd_reg[i].npu;
reg = mmio_atsd_reg[i].reg;
while (__raw_readq(npu->mmio_atsd_regs[reg] + XTS_ATSD_STAT))
cpu_relax();
put_mmio_atsd_reg(npu, reg);
}
} }
static void pnv_npu2_mn_release(struct mmu_notifier *mn, static void pnv_npu2_mn_release(struct mmu_notifier *mn,
@ -571,7 +601,7 @@ static void pnv_npu2_mn_release(struct mmu_notifier *mn,
* There should be no more translation requests for this PID, but we * There should be no more translation requests for this PID, but we
* need to ensure any entries for it are removed from the TLB. * need to ensure any entries for it are removed from the TLB.
*/ */
mmio_invalidate(npu_context, 0, 0); mmio_invalidate(npu_context, 0, 0, true);
} }
static void pnv_npu2_mn_change_pte(struct mmu_notifier *mn, static void pnv_npu2_mn_change_pte(struct mmu_notifier *mn,
@ -581,7 +611,7 @@ static void pnv_npu2_mn_change_pte(struct mmu_notifier *mn,
{ {
struct npu_context *npu_context = mn_to_npu_context(mn); struct npu_context *npu_context = mn_to_npu_context(mn);
mmio_invalidate(npu_context, 1, address); mmio_invalidate(npu_context, 1, address, true);
} }
static void pnv_npu2_mn_invalidate_page(struct mmu_notifier *mn, static void pnv_npu2_mn_invalidate_page(struct mmu_notifier *mn,
@ -590,7 +620,7 @@ static void pnv_npu2_mn_invalidate_page(struct mmu_notifier *mn,
{ {
struct npu_context *npu_context = mn_to_npu_context(mn); struct npu_context *npu_context = mn_to_npu_context(mn);
mmio_invalidate(npu_context, 1, address); mmio_invalidate(npu_context, 1, address, true);
} }
static void pnv_npu2_mn_invalidate_range(struct mmu_notifier *mn, static void pnv_npu2_mn_invalidate_range(struct mmu_notifier *mn,
@ -600,8 +630,11 @@ static void pnv_npu2_mn_invalidate_range(struct mmu_notifier *mn,
struct npu_context *npu_context = mn_to_npu_context(mn); struct npu_context *npu_context = mn_to_npu_context(mn);
unsigned long address; unsigned long address;
for (address = start; address <= end; address += PAGE_SIZE) for (address = start; address < end; address += PAGE_SIZE)
mmio_invalidate(npu_context, 1, address); mmio_invalidate(npu_context, 1, address, false);
/* Do the flush only on the final addess == end */
mmio_invalidate(npu_context, 1, address, true);
} }
static const struct mmu_notifier_ops nv_nmmu_notifier_ops = { static const struct mmu_notifier_ops nv_nmmu_notifier_ops = {
@ -651,8 +684,11 @@ struct npu_context *pnv_npu2_init_context(struct pci_dev *gpdev,
/* No nvlink associated with this GPU device */ /* No nvlink associated with this GPU device */
return ERR_PTR(-ENODEV); return ERR_PTR(-ENODEV);
if (!mm) { if (!mm || mm->context.id == 0) {
/* kernel thread contexts are not supported */ /*
* Kernel thread contexts are not supported and context id 0 is
* reserved on the GPU.
*/
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
} }

View File

@ -977,11 +977,12 @@ static int kvm_s390_shadow_tables(struct gmap *sg, unsigned long saddr,
ptr = asce.origin * 4096; ptr = asce.origin * 4096;
if (asce.r) { if (asce.r) {
*fake = 1; *fake = 1;
ptr = 0;
asce.dt = ASCE_TYPE_REGION1; asce.dt = ASCE_TYPE_REGION1;
} }
switch (asce.dt) { switch (asce.dt) {
case ASCE_TYPE_REGION1: case ASCE_TYPE_REGION1:
if (vaddr.rfx01 > asce.tl && !asce.r) if (vaddr.rfx01 > asce.tl && !*fake)
return PGM_REGION_FIRST_TRANS; return PGM_REGION_FIRST_TRANS;
break; break;
case ASCE_TYPE_REGION2: case ASCE_TYPE_REGION2:
@ -1009,8 +1010,7 @@ static int kvm_s390_shadow_tables(struct gmap *sg, unsigned long saddr,
union region1_table_entry rfte; union region1_table_entry rfte;
if (*fake) { if (*fake) {
/* offset in 16EB guest memory block */ ptr += (unsigned long) vaddr.rfx << 53;
ptr = ptr + ((unsigned long) vaddr.rsx << 53UL);
rfte.val = ptr; rfte.val = ptr;
goto shadow_r2t; goto shadow_r2t;
} }
@ -1036,8 +1036,7 @@ static int kvm_s390_shadow_tables(struct gmap *sg, unsigned long saddr,
union region2_table_entry rste; union region2_table_entry rste;
if (*fake) { if (*fake) {
/* offset in 8PB guest memory block */ ptr += (unsigned long) vaddr.rsx << 42;
ptr = ptr + ((unsigned long) vaddr.rtx << 42UL);
rste.val = ptr; rste.val = ptr;
goto shadow_r3t; goto shadow_r3t;
} }
@ -1064,8 +1063,7 @@ static int kvm_s390_shadow_tables(struct gmap *sg, unsigned long saddr,
union region3_table_entry rtte; union region3_table_entry rtte;
if (*fake) { if (*fake) {
/* offset in 4TB guest memory block */ ptr += (unsigned long) vaddr.rtx << 31;
ptr = ptr + ((unsigned long) vaddr.sx << 31UL);
rtte.val = ptr; rtte.val = ptr;
goto shadow_sgt; goto shadow_sgt;
} }
@ -1101,8 +1099,7 @@ static int kvm_s390_shadow_tables(struct gmap *sg, unsigned long saddr,
union segment_table_entry ste; union segment_table_entry ste;
if (*fake) { if (*fake) {
/* offset in 2G guest memory block */ ptr += (unsigned long) vaddr.sx << 20;
ptr = ptr + ((unsigned long) vaddr.sx << 20UL);
ste.val = ptr; ste.val = ptr;
goto shadow_pgt; goto shadow_pgt;
} }

View File

@ -296,6 +296,7 @@ struct x86_emulate_ctxt {
bool perm_ok; /* do not check permissions if true */ bool perm_ok; /* do not check permissions if true */
bool ud; /* inject an #UD if host doesn't support insn */ bool ud; /* inject an #UD if host doesn't support insn */
bool tf; /* TF value before instruction (after for syscall/sysret) */
bool have_exception; bool have_exception;
struct x86_exception exception; struct x86_exception exception;

View File

@ -2742,6 +2742,7 @@ static int em_syscall(struct x86_emulate_ctxt *ctxt)
ctxt->eflags &= ~(X86_EFLAGS_VM | X86_EFLAGS_IF); ctxt->eflags &= ~(X86_EFLAGS_VM | X86_EFLAGS_IF);
} }
ctxt->tf = (ctxt->eflags & X86_EFLAGS_TF) != 0;
return X86EMUL_CONTINUE; return X86EMUL_CONTINUE;
} }

View File

@ -5313,6 +5313,8 @@ static void init_emulate_ctxt(struct kvm_vcpu *vcpu)
kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l); kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
ctxt->eflags = kvm_get_rflags(vcpu); ctxt->eflags = kvm_get_rflags(vcpu);
ctxt->tf = (ctxt->eflags & X86_EFLAGS_TF) != 0;
ctxt->eip = kvm_rip_read(vcpu); ctxt->eip = kvm_rip_read(vcpu);
ctxt->mode = (!is_protmode(vcpu)) ? X86EMUL_MODE_REAL : ctxt->mode = (!is_protmode(vcpu)) ? X86EMUL_MODE_REAL :
(ctxt->eflags & X86_EFLAGS_VM) ? X86EMUL_MODE_VM86 : (ctxt->eflags & X86_EFLAGS_VM) ? X86EMUL_MODE_VM86 :
@ -5528,22 +5530,12 @@ static int kvm_vcpu_check_hw_bp(unsigned long addr, u32 type, u32 dr7,
return dr6; return dr6;
} }
static void kvm_vcpu_check_singlestep(struct kvm_vcpu *vcpu, unsigned long rflags, int *r) static void kvm_vcpu_do_singlestep(struct kvm_vcpu *vcpu, int *r)
{ {
struct kvm_run *kvm_run = vcpu->run; struct kvm_run *kvm_run = vcpu->run;
/*
* rflags is the old, "raw" value of the flags. The new value has
* not been saved yet.
*
* This is correct even for TF set by the guest, because "the
* processor will not generate this exception after the instruction
* that sets the TF flag".
*/
if (unlikely(rflags & X86_EFLAGS_TF)) {
if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) { if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) {
kvm_run->debug.arch.dr6 = DR6_BS | DR6_FIXED_1 | kvm_run->debug.arch.dr6 = DR6_BS | DR6_FIXED_1 | DR6_RTM;
DR6_RTM;
kvm_run->debug.arch.pc = vcpu->arch.singlestep_rip; kvm_run->debug.arch.pc = vcpu->arch.singlestep_rip;
kvm_run->debug.arch.exception = DB_VECTOR; kvm_run->debug.arch.exception = DB_VECTOR;
kvm_run->exit_reason = KVM_EXIT_DEBUG; kvm_run->exit_reason = KVM_EXIT_DEBUG;
@ -5558,7 +5550,6 @@ static void kvm_vcpu_check_singlestep(struct kvm_vcpu *vcpu, unsigned long rflag
vcpu->arch.dr6 |= DR6_BS | DR6_RTM; vcpu->arch.dr6 |= DR6_BS | DR6_RTM;
kvm_queue_exception(vcpu, DB_VECTOR); kvm_queue_exception(vcpu, DB_VECTOR);
} }
}
} }
int kvm_skip_emulated_instruction(struct kvm_vcpu *vcpu) int kvm_skip_emulated_instruction(struct kvm_vcpu *vcpu)
@ -5567,7 +5558,17 @@ int kvm_skip_emulated_instruction(struct kvm_vcpu *vcpu)
int r = EMULATE_DONE; int r = EMULATE_DONE;
kvm_x86_ops->skip_emulated_instruction(vcpu); kvm_x86_ops->skip_emulated_instruction(vcpu);
kvm_vcpu_check_singlestep(vcpu, rflags, &r);
/*
* rflags is the old, "raw" value of the flags. The new value has
* not been saved yet.
*
* This is correct even for TF set by the guest, because "the
* processor will not generate this exception after the instruction
* that sets the TF flag".
*/
if (unlikely(rflags & X86_EFLAGS_TF))
kvm_vcpu_do_singlestep(vcpu, &r);
return r == EMULATE_DONE; return r == EMULATE_DONE;
} }
EXPORT_SYMBOL_GPL(kvm_skip_emulated_instruction); EXPORT_SYMBOL_GPL(kvm_skip_emulated_instruction);
@ -5726,8 +5727,9 @@ int x86_emulate_instruction(struct kvm_vcpu *vcpu,
toggle_interruptibility(vcpu, ctxt->interruptibility); toggle_interruptibility(vcpu, ctxt->interruptibility);
vcpu->arch.emulate_regs_need_sync_to_vcpu = false; vcpu->arch.emulate_regs_need_sync_to_vcpu = false;
kvm_rip_write(vcpu, ctxt->eip); kvm_rip_write(vcpu, ctxt->eip);
if (r == EMULATE_DONE) if (r == EMULATE_DONE &&
kvm_vcpu_check_singlestep(vcpu, rflags, &r); (ctxt->tf || (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)))
kvm_vcpu_do_singlestep(vcpu, &r);
if (!ctxt->have_exception || if (!ctxt->have_exception ||
exception_type(ctxt->exception.vector) == EXCPT_TRAP) exception_type(ctxt->exception.vector) == EXCPT_TRAP)
__kvm_set_rflags(vcpu, ctxt->eflags); __kvm_set_rflags(vcpu, ctxt->eflags);

View File

@ -68,6 +68,45 @@ static void blk_mq_sched_assign_ioc(struct request_queue *q,
__blk_mq_sched_assign_ioc(q, rq, bio, ioc); __blk_mq_sched_assign_ioc(q, rq, bio, ioc);
} }
/*
* Mark a hardware queue as needing a restart. For shared queues, maintain
* a count of how many hardware queues are marked for restart.
*/
static void blk_mq_sched_mark_restart_hctx(struct blk_mq_hw_ctx *hctx)
{
if (test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state))
return;
if (hctx->flags & BLK_MQ_F_TAG_SHARED) {
struct request_queue *q = hctx->queue;
if (!test_and_set_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state))
atomic_inc(&q->shared_hctx_restart);
} else
set_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state);
}
static bool blk_mq_sched_restart_hctx(struct blk_mq_hw_ctx *hctx)
{
if (!test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state))
return false;
if (hctx->flags & BLK_MQ_F_TAG_SHARED) {
struct request_queue *q = hctx->queue;
if (test_and_clear_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state))
atomic_dec(&q->shared_hctx_restart);
} else
clear_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state);
if (blk_mq_hctx_has_pending(hctx)) {
blk_mq_run_hw_queue(hctx, true);
return true;
}
return false;
}
struct request *blk_mq_sched_get_request(struct request_queue *q, struct request *blk_mq_sched_get_request(struct request_queue *q,
struct bio *bio, struct bio *bio,
unsigned int op, unsigned int op,
@ -266,18 +305,6 @@ static bool blk_mq_sched_bypass_insert(struct blk_mq_hw_ctx *hctx,
return true; return true;
} }
static bool blk_mq_sched_restart_hctx(struct blk_mq_hw_ctx *hctx)
{
if (test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state)) {
clear_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state);
if (blk_mq_hctx_has_pending(hctx)) {
blk_mq_run_hw_queue(hctx, true);
return true;
}
}
return false;
}
/** /**
* list_for_each_entry_rcu_rr - iterate in a round-robin fashion over rcu list * list_for_each_entry_rcu_rr - iterate in a round-robin fashion over rcu list
* @pos: loop cursor. * @pos: loop cursor.
@ -309,6 +336,13 @@ void blk_mq_sched_restart(struct blk_mq_hw_ctx *const hctx)
unsigned int i, j; unsigned int i, j;
if (set->flags & BLK_MQ_F_TAG_SHARED) { if (set->flags & BLK_MQ_F_TAG_SHARED) {
/*
* If this is 0, then we know that no hardware queues
* have RESTART marked. We're done.
*/
if (!atomic_read(&queue->shared_hctx_restart))
return;
rcu_read_lock(); rcu_read_lock();
list_for_each_entry_rcu_rr(q, queue, &set->tag_list, list_for_each_entry_rcu_rr(q, queue, &set->tag_list,
tag_set_list) { tag_set_list) {

View File

@ -115,15 +115,6 @@ static inline bool blk_mq_sched_has_work(struct blk_mq_hw_ctx *hctx)
return false; return false;
} }
/*
* Mark a hardware queue as needing a restart.
*/
static inline void blk_mq_sched_mark_restart_hctx(struct blk_mq_hw_ctx *hctx)
{
if (!test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state))
set_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state);
}
static inline bool blk_mq_sched_needs_restart(struct blk_mq_hw_ctx *hctx) static inline bool blk_mq_sched_needs_restart(struct blk_mq_hw_ctx *hctx)
{ {
return test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state); return test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state);

View File

@ -2103,20 +2103,30 @@ static void blk_mq_map_swqueue(struct request_queue *q,
} }
} }
/*
* Caller needs to ensure that we're either frozen/quiesced, or that
* the queue isn't live yet.
*/
static void queue_set_hctx_shared(struct request_queue *q, bool shared) static void queue_set_hctx_shared(struct request_queue *q, bool shared)
{ {
struct blk_mq_hw_ctx *hctx; struct blk_mq_hw_ctx *hctx;
int i; int i;
queue_for_each_hw_ctx(q, hctx, i) { queue_for_each_hw_ctx(q, hctx, i) {
if (shared) if (shared) {
if (test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state))
atomic_inc(&q->shared_hctx_restart);
hctx->flags |= BLK_MQ_F_TAG_SHARED; hctx->flags |= BLK_MQ_F_TAG_SHARED;
else } else {
if (test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state))
atomic_dec(&q->shared_hctx_restart);
hctx->flags &= ~BLK_MQ_F_TAG_SHARED; hctx->flags &= ~BLK_MQ_F_TAG_SHARED;
} }
}
} }
static void blk_mq_update_tag_set_depth(struct blk_mq_tag_set *set, bool shared) static void blk_mq_update_tag_set_depth(struct blk_mq_tag_set *set,
bool shared)
{ {
struct request_queue *q; struct request_queue *q;

View File

@ -1428,6 +1428,37 @@ static void acpi_init_coherency(struct acpi_device *adev)
adev->flags.coherent_dma = cca; adev->flags.coherent_dma = cca;
} }
static int acpi_check_spi_i2c_slave(struct acpi_resource *ares, void *data)
{
bool *is_spi_i2c_slave_p = data;
if (ares->type != ACPI_RESOURCE_TYPE_SERIAL_BUS)
return 1;
/*
* devices that are connected to UART still need to be enumerated to
* platform bus
*/
if (ares->data.common_serial_bus.type != ACPI_RESOURCE_SERIAL_TYPE_UART)
*is_spi_i2c_slave_p = true;
/* no need to do more checking */
return -1;
}
static bool acpi_is_spi_i2c_slave(struct acpi_device *device)
{
struct list_head resource_list;
bool is_spi_i2c_slave = false;
INIT_LIST_HEAD(&resource_list);
acpi_dev_get_resources(device, &resource_list, acpi_check_spi_i2c_slave,
&is_spi_i2c_slave);
acpi_dev_free_resource_list(&resource_list);
return is_spi_i2c_slave;
}
void acpi_init_device_object(struct acpi_device *device, acpi_handle handle, void acpi_init_device_object(struct acpi_device *device, acpi_handle handle,
int type, unsigned long long sta) int type, unsigned long long sta)
{ {
@ -1443,6 +1474,7 @@ void acpi_init_device_object(struct acpi_device *device, acpi_handle handle,
acpi_bus_get_flags(device); acpi_bus_get_flags(device);
device->flags.match_driver = false; device->flags.match_driver = false;
device->flags.initialized = true; device->flags.initialized = true;
device->flags.spi_i2c_slave = acpi_is_spi_i2c_slave(device);
acpi_device_clear_enumerated(device); acpi_device_clear_enumerated(device);
device_initialize(&device->dev); device_initialize(&device->dev);
dev_set_uevent_suppress(&device->dev, true); dev_set_uevent_suppress(&device->dev, true);
@ -1727,38 +1759,13 @@ static acpi_status acpi_bus_check_add(acpi_handle handle, u32 lvl_not_used,
return AE_OK; return AE_OK;
} }
static int acpi_check_spi_i2c_slave(struct acpi_resource *ares, void *data)
{
bool *is_spi_i2c_slave_p = data;
if (ares->type != ACPI_RESOURCE_TYPE_SERIAL_BUS)
return 1;
/*
* devices that are connected to UART still need to be enumerated to
* platform bus
*/
if (ares->data.common_serial_bus.type != ACPI_RESOURCE_SERIAL_TYPE_UART)
*is_spi_i2c_slave_p = true;
/* no need to do more checking */
return -1;
}
static void acpi_default_enumeration(struct acpi_device *device) static void acpi_default_enumeration(struct acpi_device *device)
{ {
struct list_head resource_list;
bool is_spi_i2c_slave = false;
/* /*
* Do not enumerate SPI/I2C slaves as they will be enumerated by their * Do not enumerate SPI/I2C slaves as they will be enumerated by their
* respective parents. * respective parents.
*/ */
INIT_LIST_HEAD(&resource_list); if (!device->flags.spi_i2c_slave) {
acpi_dev_get_resources(device, &resource_list, acpi_check_spi_i2c_slave,
&is_spi_i2c_slave);
acpi_dev_free_resource_list(&resource_list);
if (!is_spi_i2c_slave) {
acpi_create_platform_device(device, NULL); acpi_create_platform_device(device, NULL);
acpi_device_set_enumerated(device); acpi_device_set_enumerated(device);
} else { } else {
@ -1854,7 +1861,7 @@ static void acpi_bus_attach(struct acpi_device *device)
return; return;
device->flags.match_driver = true; device->flags.match_driver = true;
if (ret > 0) { if (ret > 0 && !device->flags.spi_i2c_slave) {
acpi_device_set_enumerated(device); acpi_device_set_enumerated(device);
goto ok; goto ok;
} }
@ -1863,10 +1870,10 @@ static void acpi_bus_attach(struct acpi_device *device)
if (ret < 0) if (ret < 0)
return; return;
if (device->pnp.type.platform_id) if (!device->pnp.type.platform_id && !device->flags.spi_i2c_slave)
acpi_default_enumeration(device);
else
acpi_device_set_enumerated(device); acpi_device_set_enumerated(device);
else
acpi_default_enumeration(device);
ok: ok:
list_for_each_entry(child, &device->children, node) list_for_each_entry(child, &device->children, node)

View File

@ -609,8 +609,6 @@ int xen_blkif_schedule(void *arg)
unsigned long timeout; unsigned long timeout;
int ret; int ret;
xen_blkif_get(blkif);
set_freezable(); set_freezable();
while (!kthread_should_stop()) { while (!kthread_should_stop()) {
if (try_to_freeze()) if (try_to_freeze())
@ -665,7 +663,6 @@ int xen_blkif_schedule(void *arg)
print_stats(ring); print_stats(ring);
ring->xenblkd = NULL; ring->xenblkd = NULL;
xen_blkif_put(blkif);
return 0; return 0;
} }
@ -1436,34 +1433,35 @@ static int dispatch_rw_block_io(struct xen_blkif_ring *ring,
static void make_response(struct xen_blkif_ring *ring, u64 id, static void make_response(struct xen_blkif_ring *ring, u64 id,
unsigned short op, int st) unsigned short op, int st)
{ {
struct blkif_response resp; struct blkif_response *resp;
unsigned long flags; unsigned long flags;
union blkif_back_rings *blk_rings; union blkif_back_rings *blk_rings;
int notify; int notify;
resp.id = id;
resp.operation = op;
resp.status = st;
spin_lock_irqsave(&ring->blk_ring_lock, flags); spin_lock_irqsave(&ring->blk_ring_lock, flags);
blk_rings = &ring->blk_rings; blk_rings = &ring->blk_rings;
/* Place on the response ring for the relevant domain. */ /* Place on the response ring for the relevant domain. */
switch (ring->blkif->blk_protocol) { switch (ring->blkif->blk_protocol) {
case BLKIF_PROTOCOL_NATIVE: case BLKIF_PROTOCOL_NATIVE:
memcpy(RING_GET_RESPONSE(&blk_rings->native, blk_rings->native.rsp_prod_pvt), resp = RING_GET_RESPONSE(&blk_rings->native,
&resp, sizeof(resp)); blk_rings->native.rsp_prod_pvt);
break; break;
case BLKIF_PROTOCOL_X86_32: case BLKIF_PROTOCOL_X86_32:
memcpy(RING_GET_RESPONSE(&blk_rings->x86_32, blk_rings->x86_32.rsp_prod_pvt), resp = RING_GET_RESPONSE(&blk_rings->x86_32,
&resp, sizeof(resp)); blk_rings->x86_32.rsp_prod_pvt);
break; break;
case BLKIF_PROTOCOL_X86_64: case BLKIF_PROTOCOL_X86_64:
memcpy(RING_GET_RESPONSE(&blk_rings->x86_64, blk_rings->x86_64.rsp_prod_pvt), resp = RING_GET_RESPONSE(&blk_rings->x86_64,
&resp, sizeof(resp)); blk_rings->x86_64.rsp_prod_pvt);
break; break;
default: default:
BUG(); BUG();
} }
resp->id = id;
resp->operation = op;
resp->status = st;
blk_rings->common.rsp_prod_pvt++; blk_rings->common.rsp_prod_pvt++;
RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&blk_rings->common, notify); RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&blk_rings->common, notify);
spin_unlock_irqrestore(&ring->blk_ring_lock, flags); spin_unlock_irqrestore(&ring->blk_ring_lock, flags);

View File

@ -75,9 +75,8 @@ extern unsigned int xenblk_max_queues;
struct blkif_common_request { struct blkif_common_request {
char dummy; char dummy;
}; };
struct blkif_common_response {
char dummy; /* i386 protocol version */
};
struct blkif_x86_32_request_rw { struct blkif_x86_32_request_rw {
uint8_t nr_segments; /* number of segments */ uint8_t nr_segments; /* number of segments */
@ -129,14 +128,6 @@ struct blkif_x86_32_request {
} u; } u;
} __attribute__((__packed__)); } __attribute__((__packed__));
/* i386 protocol version */
#pragma pack(push, 4)
struct blkif_x86_32_response {
uint64_t id; /* copied from request */
uint8_t operation; /* copied from request */
int16_t status; /* BLKIF_RSP_??? */
};
#pragma pack(pop)
/* x86_64 protocol version */ /* x86_64 protocol version */
struct blkif_x86_64_request_rw { struct blkif_x86_64_request_rw {
@ -193,18 +184,12 @@ struct blkif_x86_64_request {
} u; } u;
} __attribute__((__packed__)); } __attribute__((__packed__));
struct blkif_x86_64_response {
uint64_t __attribute__((__aligned__(8))) id;
uint8_t operation; /* copied from request */
int16_t status; /* BLKIF_RSP_??? */
};
DEFINE_RING_TYPES(blkif_common, struct blkif_common_request, DEFINE_RING_TYPES(blkif_common, struct blkif_common_request,
struct blkif_common_response); struct blkif_response);
DEFINE_RING_TYPES(blkif_x86_32, struct blkif_x86_32_request, DEFINE_RING_TYPES(blkif_x86_32, struct blkif_x86_32_request,
struct blkif_x86_32_response); struct blkif_response __packed);
DEFINE_RING_TYPES(blkif_x86_64, struct blkif_x86_64_request, DEFINE_RING_TYPES(blkif_x86_64, struct blkif_x86_64_request,
struct blkif_x86_64_response); struct blkif_response);
union blkif_back_rings { union blkif_back_rings {
struct blkif_back_ring native; struct blkif_back_ring native;
@ -281,6 +266,7 @@ struct xen_blkif_ring {
wait_queue_head_t wq; wait_queue_head_t wq;
atomic_t inflight; atomic_t inflight;
bool active;
/* One thread per blkif ring. */ /* One thread per blkif ring. */
struct task_struct *xenblkd; struct task_struct *xenblkd;
unsigned int waiting_reqs; unsigned int waiting_reqs;

View File

@ -159,7 +159,7 @@ static int xen_blkif_alloc_rings(struct xen_blkif *blkif)
init_waitqueue_head(&ring->shutdown_wq); init_waitqueue_head(&ring->shutdown_wq);
ring->blkif = blkif; ring->blkif = blkif;
ring->st_print = jiffies; ring->st_print = jiffies;
xen_blkif_get(blkif); ring->active = true;
} }
return 0; return 0;
@ -249,10 +249,12 @@ static int xen_blkif_disconnect(struct xen_blkif *blkif)
struct xen_blkif_ring *ring = &blkif->rings[r]; struct xen_blkif_ring *ring = &blkif->rings[r];
unsigned int i = 0; unsigned int i = 0;
if (!ring->active)
continue;
if (ring->xenblkd) { if (ring->xenblkd) {
kthread_stop(ring->xenblkd); kthread_stop(ring->xenblkd);
wake_up(&ring->shutdown_wq); wake_up(&ring->shutdown_wq);
ring->xenblkd = NULL;
} }
/* The above kthread_stop() guarantees that at this point we /* The above kthread_stop() guarantees that at this point we
@ -296,7 +298,7 @@ static int xen_blkif_disconnect(struct xen_blkif *blkif)
BUG_ON(ring->free_pages_num != 0); BUG_ON(ring->free_pages_num != 0);
BUG_ON(ring->persistent_gnt_c != 0); BUG_ON(ring->persistent_gnt_c != 0);
WARN_ON(i != (XEN_BLKIF_REQS_PER_PAGE * blkif->nr_ring_pages)); WARN_ON(i != (XEN_BLKIF_REQS_PER_PAGE * blkif->nr_ring_pages));
xen_blkif_put(blkif); ring->active = false;
} }
blkif->nr_ring_pages = 0; blkif->nr_ring_pages = 0;
/* /*
@ -312,9 +314,10 @@ static int xen_blkif_disconnect(struct xen_blkif *blkif)
static void xen_blkif_free(struct xen_blkif *blkif) static void xen_blkif_free(struct xen_blkif *blkif)
{ {
WARN_ON(xen_blkif_disconnect(blkif));
xen_blkif_disconnect(blkif);
xen_vbd_free(&blkif->vbd); xen_vbd_free(&blkif->vbd);
kfree(blkif->be->mode);
kfree(blkif->be);
/* Make sure everything is drained before shutting down */ /* Make sure everything is drained before shutting down */
kmem_cache_free(xen_blkif_cachep, blkif); kmem_cache_free(xen_blkif_cachep, blkif);
@ -511,8 +514,6 @@ static int xen_blkbk_remove(struct xenbus_device *dev)
xen_blkif_put(be->blkif); xen_blkif_put(be->blkif);
} }
kfree(be->mode);
kfree(be);
return 0; return 0;
} }

View File

@ -803,13 +803,13 @@ static int crng_fast_load(const char *cp, size_t len)
p[crng_init_cnt % CHACHA20_KEY_SIZE] ^= *cp; p[crng_init_cnt % CHACHA20_KEY_SIZE] ^= *cp;
cp++; crng_init_cnt++; len--; cp++; crng_init_cnt++; len--;
} }
spin_unlock_irqrestore(&primary_crng.lock, flags);
if (crng_init_cnt >= CRNG_INIT_CNT_THRESH) { if (crng_init_cnt >= CRNG_INIT_CNT_THRESH) {
invalidate_batched_entropy(); invalidate_batched_entropy();
crng_init = 1; crng_init = 1;
wake_up_interruptible(&crng_init_wait); wake_up_interruptible(&crng_init_wait);
pr_notice("random: fast init done\n"); pr_notice("random: fast init done\n");
} }
spin_unlock_irqrestore(&primary_crng.lock, flags);
return 1; return 1;
} }
@ -841,6 +841,7 @@ static void crng_reseed(struct crng_state *crng, struct entropy_store *r)
} }
memzero_explicit(&buf, sizeof(buf)); memzero_explicit(&buf, sizeof(buf));
crng->init_time = jiffies; crng->init_time = jiffies;
spin_unlock_irqrestore(&primary_crng.lock, flags);
if (crng == &primary_crng && crng_init < 2) { if (crng == &primary_crng && crng_init < 2) {
invalidate_batched_entropy(); invalidate_batched_entropy();
crng_init = 2; crng_init = 2;
@ -848,7 +849,6 @@ static void crng_reseed(struct crng_state *crng, struct entropy_store *r)
wake_up_interruptible(&crng_init_wait); wake_up_interruptible(&crng_init_wait);
pr_notice("random: crng init done\n"); pr_notice("random: crng init done\n");
} }
spin_unlock_irqrestore(&primary_crng.lock, flags);
} }
static inline void crng_wait_ready(void) static inline void crng_wait_ready(void)
@ -2041,8 +2041,8 @@ static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u64);
u64 get_random_u64(void) u64 get_random_u64(void)
{ {
u64 ret; u64 ret;
bool use_lock = crng_init < 2; bool use_lock = READ_ONCE(crng_init) < 2;
unsigned long flags; unsigned long flags = 0;
struct batched_entropy *batch; struct batched_entropy *batch;
#if BITS_PER_LONG == 64 #if BITS_PER_LONG == 64
@ -2073,8 +2073,8 @@ static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u32);
u32 get_random_u32(void) u32 get_random_u32(void)
{ {
u32 ret; u32 ret;
bool use_lock = crng_init < 2; bool use_lock = READ_ONCE(crng_init) < 2;
unsigned long flags; unsigned long flags = 0;
struct batched_entropy *batch; struct batched_entropy *batch;
if (arch_get_random_int(&ret)) if (arch_get_random_int(&ret))

View File

@ -721,7 +721,7 @@ static int mvebu_pwm_probe(struct platform_device *pdev,
u32 set; u32 set;
if (!of_device_is_compatible(mvchip->chip.of_node, if (!of_device_is_compatible(mvchip->chip.of_node,
"marvell,armada-370-xp-gpio")) "marvell,armada-370-gpio"))
return 0; return 0;
if (IS_ERR(mvchip->clk)) if (IS_ERR(mvchip->clk))
@ -852,7 +852,7 @@ static const struct of_device_id mvebu_gpio_of_match[] = {
.data = (void *) MVEBU_GPIO_SOC_VARIANT_ARMADAXP, .data = (void *) MVEBU_GPIO_SOC_VARIANT_ARMADAXP,
}, },
{ {
.compatible = "marvell,armada-370-xp-gpio", .compatible = "marvell,armada-370-gpio",
.data = (void *) MVEBU_GPIO_SOC_VARIANT_ORION, .data = (void *) MVEBU_GPIO_SOC_VARIANT_ORION,
}, },
{ {
@ -1128,7 +1128,7 @@ static int mvebu_gpio_probe(struct platform_device *pdev)
mvchip); mvchip);
} }
/* Armada 370/XP has simple PWM support for GPIO lines */ /* Some MVEBU SoCs have simple PWM support for GPIO lines */
if (IS_ENABLED(CONFIG_PWM)) if (IS_ENABLED(CONFIG_PWM))
return mvebu_pwm_probe(pdev, mvchip, id); return mvebu_pwm_probe(pdev, mvchip, id);

View File

@ -693,6 +693,10 @@ int amdgpu_atombios_get_clock_info(struct amdgpu_device *adev)
DRM_INFO("Changing default dispclk from %dMhz to 600Mhz\n", DRM_INFO("Changing default dispclk from %dMhz to 600Mhz\n",
adev->clock.default_dispclk / 100); adev->clock.default_dispclk / 100);
adev->clock.default_dispclk = 60000; adev->clock.default_dispclk = 60000;
} else if (adev->clock.default_dispclk <= 60000) {
DRM_INFO("Changing default dispclk from %dMhz to 625Mhz\n",
adev->clock.default_dispclk / 100);
adev->clock.default_dispclk = 62500;
} }
adev->clock.dp_extclk = adev->clock.dp_extclk =
le16_to_cpu(firmware_info->info_21.usUniphyDPModeExtClkFreq); le16_to_cpu(firmware_info->info_21.usUniphyDPModeExtClkFreq);

View File

@ -449,6 +449,7 @@ static const struct pci_device_id pciidlist[] = {
{0x1002, 0x6986, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12}, {0x1002, 0x6986, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12},
{0x1002, 0x6987, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12}, {0x1002, 0x6987, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12},
{0x1002, 0x6995, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12}, {0x1002, 0x6995, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12},
{0x1002, 0x6997, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12},
{0x1002, 0x699F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12}, {0x1002, 0x699F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12},
/* Vega 10 */ /* Vega 10 */
{0x1002, 0x6860, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10|AMD_EXP_HW_SUPPORT}, {0x1002, 0x6860, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10|AMD_EXP_HW_SUPPORT},

View File

@ -165,7 +165,7 @@ void amdgpu_atombios_crtc_powergate(struct drm_crtc *crtc, int state)
struct drm_device *dev = crtc->dev; struct drm_device *dev = crtc->dev;
struct amdgpu_device *adev = dev->dev_private; struct amdgpu_device *adev = dev->dev_private;
int index = GetIndexIntoMasterTable(COMMAND, EnableDispPowerGating); int index = GetIndexIntoMasterTable(COMMAND, EnableDispPowerGating);
ENABLE_DISP_POWER_GATING_PARAMETERS_V2_1 args; ENABLE_DISP_POWER_GATING_PS_ALLOCATION args;
memset(&args, 0, sizeof(args)); memset(&args, 0, sizeof(args));
@ -178,7 +178,7 @@ void amdgpu_atombios_crtc_powergate(struct drm_crtc *crtc, int state)
void amdgpu_atombios_crtc_powergate_init(struct amdgpu_device *adev) void amdgpu_atombios_crtc_powergate_init(struct amdgpu_device *adev)
{ {
int index = GetIndexIntoMasterTable(COMMAND, EnableDispPowerGating); int index = GetIndexIntoMasterTable(COMMAND, EnableDispPowerGating);
ENABLE_DISP_POWER_GATING_PARAMETERS_V2_1 args; ENABLE_DISP_POWER_GATING_PS_ALLOCATION args;
memset(&args, 0, sizeof(args)); memset(&args, 0, sizeof(args));

View File

@ -1229,21 +1229,6 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
if (!connector) if (!connector)
return -ENOENT; return -ENOENT;
drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
encoder = drm_connector_get_encoder(connector);
if (encoder)
out_resp->encoder_id = encoder->base.id;
else
out_resp->encoder_id = 0;
ret = drm_mode_object_get_properties(&connector->base, file_priv->atomic,
(uint32_t __user *)(unsigned long)(out_resp->props_ptr),
(uint64_t __user *)(unsigned long)(out_resp->prop_values_ptr),
&out_resp->count_props);
drm_modeset_unlock(&dev->mode_config.connection_mutex);
if (ret)
goto out_unref;
for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++)
if (connector->encoder_ids[i] != 0) if (connector->encoder_ids[i] != 0)
encoders_count++; encoders_count++;
@ -1256,7 +1241,7 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
if (put_user(connector->encoder_ids[i], if (put_user(connector->encoder_ids[i],
encoder_ptr + copied)) { encoder_ptr + copied)) {
ret = -EFAULT; ret = -EFAULT;
goto out_unref; goto out;
} }
copied++; copied++;
} }
@ -1300,15 +1285,32 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
if (copy_to_user(mode_ptr + copied, if (copy_to_user(mode_ptr + copied,
&u_mode, sizeof(u_mode))) { &u_mode, sizeof(u_mode))) {
ret = -EFAULT; ret = -EFAULT;
mutex_unlock(&dev->mode_config.mutex);
goto out; goto out;
} }
copied++; copied++;
} }
} }
out_resp->count_modes = mode_count; out_resp->count_modes = mode_count;
out:
mutex_unlock(&dev->mode_config.mutex); mutex_unlock(&dev->mode_config.mutex);
out_unref:
drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
encoder = drm_connector_get_encoder(connector);
if (encoder)
out_resp->encoder_id = encoder->base.id;
else
out_resp->encoder_id = 0;
/* Only grab properties after probing, to make sure EDID and other
* properties reflect the latest status. */
ret = drm_mode_object_get_properties(&connector->base, file_priv->atomic,
(uint32_t __user *)(unsigned long)(out_resp->props_ptr),
(uint64_t __user *)(unsigned long)(out_resp->prop_values_ptr),
&out_resp->count_props);
drm_modeset_unlock(&dev->mode_config.connection_mutex);
out:
drm_connector_put(connector); drm_connector_put(connector);
return ret; return ret;

View File

@ -2285,8 +2285,8 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
struct page *page; struct page *page;
unsigned long last_pfn = 0; /* suppress gcc warning */ unsigned long last_pfn = 0; /* suppress gcc warning */
unsigned int max_segment; unsigned int max_segment;
gfp_t noreclaim;
int ret; int ret;
gfp_t gfp;
/* Assert that the object is not currently in any GPU domain. As it /* Assert that the object is not currently in any GPU domain. As it
* wasn't in the GTT, there shouldn't be any way it could have been in * wasn't in the GTT, there shouldn't be any way it could have been in
@ -2315,22 +2315,31 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
* Fail silently without starting the shrinker * Fail silently without starting the shrinker
*/ */
mapping = obj->base.filp->f_mapping; mapping = obj->base.filp->f_mapping;
gfp = mapping_gfp_constraint(mapping, ~(__GFP_IO | __GFP_RECLAIM)); noreclaim = mapping_gfp_constraint(mapping,
gfp |= __GFP_NORETRY | __GFP_NOWARN; ~(__GFP_IO | __GFP_RECLAIM));
noreclaim |= __GFP_NORETRY | __GFP_NOWARN;
sg = st->sgl; sg = st->sgl;
st->nents = 0; st->nents = 0;
for (i = 0; i < page_count; i++) { for (i = 0; i < page_count; i++) {
const unsigned int shrink[] = {
I915_SHRINK_BOUND | I915_SHRINK_UNBOUND | I915_SHRINK_PURGEABLE,
0,
}, *s = shrink;
gfp_t gfp = noreclaim;
do {
page = shmem_read_mapping_page_gfp(mapping, i, gfp); page = shmem_read_mapping_page_gfp(mapping, i, gfp);
if (unlikely(IS_ERR(page))) { if (likely(!IS_ERR(page)))
i915_gem_shrink(dev_priv, break;
page_count,
I915_SHRINK_BOUND | if (!*s) {
I915_SHRINK_UNBOUND | ret = PTR_ERR(page);
I915_SHRINK_PURGEABLE); goto err_sg;
page = shmem_read_mapping_page_gfp(mapping, i, gfp);
} }
if (unlikely(IS_ERR(page))) {
gfp_t reclaim; i915_gem_shrink(dev_priv, 2 * page_count, *s++);
cond_resched();
/* We've tried hard to allocate the memory by reaping /* We've tried hard to allocate the memory by reaping
* our own buffer, now let the real VM do its job and * our own buffer, now let the real VM do its job and
@ -2340,15 +2349,26 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
* defer the oom here by reporting the ENOMEM back * defer the oom here by reporting the ENOMEM back
* to userspace. * to userspace.
*/ */
reclaim = mapping_gfp_mask(mapping); if (!*s) {
reclaim |= __GFP_NORETRY; /* reclaim, but no oom */ /* reclaim and warn, but no oom */
gfp = mapping_gfp_mask(mapping);
page = shmem_read_mapping_page_gfp(mapping, i, reclaim); /* Our bo are always dirty and so we require
if (IS_ERR(page)) { * kswapd to reclaim our pages (direct reclaim
ret = PTR_ERR(page); * does not effectively begin pageout of our
goto err_sg; * buffers on its own). However, direct reclaim
} * only waits for kswapd when under allocation
* congestion. So as a result __GFP_RECLAIM is
* unreliable and fails to actually reclaim our
* dirty pages -- unless you try over and over
* again with !__GFP_NORETRY. However, we still
* want to fail this allocation rather than
* trigger the out-of-memory killer and for
* this we want the future __GFP_MAYFAIL.
*/
} }
} while (1);
if (!i || if (!i ||
sg->length >= max_segment || sg->length >= max_segment ||
page_to_pfn(page) != last_pfn + 1) { page_to_pfn(page) != last_pfn + 1) {
@ -4222,6 +4242,7 @@ i915_gem_object_create(struct drm_i915_private *dev_priv, u64 size)
mapping = obj->base.filp->f_mapping; mapping = obj->base.filp->f_mapping;
mapping_set_gfp_mask(mapping, mask); mapping_set_gfp_mask(mapping, mask);
GEM_BUG_ON(!(mapping_gfp_mask(mapping) & __GFP_RECLAIM));
i915_gem_object_init(obj, &i915_gem_object_ops); i915_gem_object_init(obj, &i915_gem_object_ops);

View File

@ -623,7 +623,7 @@ i915_gem_request_alloc(struct intel_engine_cs *engine,
* GPU processing the request, we never over-estimate the * GPU processing the request, we never over-estimate the
* position of the head. * position of the head.
*/ */
req->head = req->ring->tail; req->head = req->ring->emit;
/* Check that we didn't interrupt ourselves with a new request */ /* Check that we didn't interrupt ourselves with a new request */
GEM_BUG_ON(req->timeline->seqno != req->fence.seqno); GEM_BUG_ON(req->timeline->seqno != req->fence.seqno);

View File

@ -480,9 +480,7 @@ static void guc_wq_item_append(struct i915_guc_client *client,
GEM_BUG_ON(freespace < wqi_size); GEM_BUG_ON(freespace < wqi_size);
/* The GuC firmware wants the tail index in QWords, not bytes */ /* The GuC firmware wants the tail index in QWords, not bytes */
tail = rq->tail; tail = intel_ring_set_tail(rq->ring, rq->tail) >> 3;
assert_ring_tail_valid(rq->ring, rq->tail);
tail >>= 3;
GEM_BUG_ON(tail > WQ_RING_TAIL_MAX); GEM_BUG_ON(tail > WQ_RING_TAIL_MAX);
/* For now workqueue item is 4 DWs; workqueue buffer is 2 pages. So we /* For now workqueue item is 4 DWs; workqueue buffer is 2 pages. So we

View File

@ -120,7 +120,8 @@ static void intel_crtc_init_scalers(struct intel_crtc *crtc,
static void skylake_pfit_enable(struct intel_crtc *crtc); static void skylake_pfit_enable(struct intel_crtc *crtc);
static void ironlake_pfit_disable(struct intel_crtc *crtc, bool force); static void ironlake_pfit_disable(struct intel_crtc *crtc, bool force);
static void ironlake_pfit_enable(struct intel_crtc *crtc); static void ironlake_pfit_enable(struct intel_crtc *crtc);
static void intel_modeset_setup_hw_state(struct drm_device *dev); static void intel_modeset_setup_hw_state(struct drm_device *dev,
struct drm_modeset_acquire_ctx *ctx);
static void intel_pre_disable_primary_noatomic(struct drm_crtc *crtc); static void intel_pre_disable_primary_noatomic(struct drm_crtc *crtc);
struct intel_limit { struct intel_limit {
@ -3449,7 +3450,7 @@ __intel_display_resume(struct drm_device *dev,
struct drm_crtc *crtc; struct drm_crtc *crtc;
int i, ret; int i, ret;
intel_modeset_setup_hw_state(dev); intel_modeset_setup_hw_state(dev, ctx);
i915_redisable_vga(to_i915(dev)); i915_redisable_vga(to_i915(dev));
if (!state) if (!state)
@ -5825,7 +5826,8 @@ static void i9xx_crtc_disable(struct intel_crtc_state *old_crtc_state,
intel_update_watermarks(intel_crtc); intel_update_watermarks(intel_crtc);
} }
static void intel_crtc_disable_noatomic(struct drm_crtc *crtc) static void intel_crtc_disable_noatomic(struct drm_crtc *crtc,
struct drm_modeset_acquire_ctx *ctx)
{ {
struct intel_encoder *encoder; struct intel_encoder *encoder;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc); struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
@ -5855,7 +5857,7 @@ static void intel_crtc_disable_noatomic(struct drm_crtc *crtc)
return; return;
} }
state->acquire_ctx = crtc->dev->mode_config.acquire_ctx; state->acquire_ctx = ctx;
/* Everything's already locked, -EDEADLK can't happen. */ /* Everything's already locked, -EDEADLK can't happen. */
crtc_state = intel_atomic_get_crtc_state(state, intel_crtc); crtc_state = intel_atomic_get_crtc_state(state, intel_crtc);
@ -15030,7 +15032,7 @@ int intel_modeset_init(struct drm_device *dev)
intel_setup_outputs(dev_priv); intel_setup_outputs(dev_priv);
drm_modeset_lock_all(dev); drm_modeset_lock_all(dev);
intel_modeset_setup_hw_state(dev); intel_modeset_setup_hw_state(dev, dev->mode_config.acquire_ctx);
drm_modeset_unlock_all(dev); drm_modeset_unlock_all(dev);
for_each_intel_crtc(dev, crtc) { for_each_intel_crtc(dev, crtc) {
@ -15067,13 +15069,13 @@ int intel_modeset_init(struct drm_device *dev)
return 0; return 0;
} }
static void intel_enable_pipe_a(struct drm_device *dev) static void intel_enable_pipe_a(struct drm_device *dev,
struct drm_modeset_acquire_ctx *ctx)
{ {
struct intel_connector *connector; struct intel_connector *connector;
struct drm_connector_list_iter conn_iter; struct drm_connector_list_iter conn_iter;
struct drm_connector *crt = NULL; struct drm_connector *crt = NULL;
struct intel_load_detect_pipe load_detect_temp; struct intel_load_detect_pipe load_detect_temp;
struct drm_modeset_acquire_ctx *ctx = dev->mode_config.acquire_ctx;
int ret; int ret;
/* We can't just switch on the pipe A, we need to set things up with a /* We can't just switch on the pipe A, we need to set things up with a
@ -15145,7 +15147,8 @@ static bool has_pch_trancoder(struct drm_i915_private *dev_priv,
(HAS_PCH_LPT_H(dev_priv) && pch_transcoder == TRANSCODER_A); (HAS_PCH_LPT_H(dev_priv) && pch_transcoder == TRANSCODER_A);
} }
static void intel_sanitize_crtc(struct intel_crtc *crtc) static void intel_sanitize_crtc(struct intel_crtc *crtc,
struct drm_modeset_acquire_ctx *ctx)
{ {
struct drm_device *dev = crtc->base.dev; struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev); struct drm_i915_private *dev_priv = to_i915(dev);
@ -15191,7 +15194,7 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc)
plane = crtc->plane; plane = crtc->plane;
crtc->base.primary->state->visible = true; crtc->base.primary->state->visible = true;
crtc->plane = !plane; crtc->plane = !plane;
intel_crtc_disable_noatomic(&crtc->base); intel_crtc_disable_noatomic(&crtc->base, ctx);
crtc->plane = plane; crtc->plane = plane;
} }
@ -15201,13 +15204,13 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc)
* resume. Force-enable the pipe to fix this, the update_dpms * resume. Force-enable the pipe to fix this, the update_dpms
* call below we restore the pipe to the right state, but leave * call below we restore the pipe to the right state, but leave
* the required bits on. */ * the required bits on. */
intel_enable_pipe_a(dev); intel_enable_pipe_a(dev, ctx);
} }
/* Adjust the state of the output pipe according to whether we /* Adjust the state of the output pipe according to whether we
* have active connectors/encoders. */ * have active connectors/encoders. */
if (crtc->active && !intel_crtc_has_encoders(crtc)) if (crtc->active && !intel_crtc_has_encoders(crtc))
intel_crtc_disable_noatomic(&crtc->base); intel_crtc_disable_noatomic(&crtc->base, ctx);
if (crtc->active || HAS_GMCH_DISPLAY(dev_priv)) { if (crtc->active || HAS_GMCH_DISPLAY(dev_priv)) {
/* /*
@ -15505,7 +15508,8 @@ get_encoder_power_domains(struct drm_i915_private *dev_priv)
* and sanitizes it to the current state * and sanitizes it to the current state
*/ */
static void static void
intel_modeset_setup_hw_state(struct drm_device *dev) intel_modeset_setup_hw_state(struct drm_device *dev,
struct drm_modeset_acquire_ctx *ctx)
{ {
struct drm_i915_private *dev_priv = to_i915(dev); struct drm_i915_private *dev_priv = to_i915(dev);
enum pipe pipe; enum pipe pipe;
@ -15525,7 +15529,7 @@ intel_modeset_setup_hw_state(struct drm_device *dev)
for_each_pipe(dev_priv, pipe) { for_each_pipe(dev_priv, pipe) {
crtc = intel_get_crtc_for_pipe(dev_priv, pipe); crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
intel_sanitize_crtc(crtc); intel_sanitize_crtc(crtc, ctx);
intel_dump_pipe_config(crtc, crtc->config, intel_dump_pipe_config(crtc, crtc->config,
"[setup_hw_state]"); "[setup_hw_state]");
} }

View File

@ -119,8 +119,6 @@ static int intel_dp_aux_setup_backlight(struct intel_connector *connector,
struct intel_dp *intel_dp = enc_to_intel_dp(&connector->encoder->base); struct intel_dp *intel_dp = enc_to_intel_dp(&connector->encoder->base);
struct intel_panel *panel = &connector->panel; struct intel_panel *panel = &connector->panel;
intel_dp_aux_enable_backlight(connector);
if (intel_dp->edp_dpcd[2] & DP_EDP_BACKLIGHT_BRIGHTNESS_BYTE_COUNT) if (intel_dp->edp_dpcd[2] & DP_EDP_BACKLIGHT_BRIGHTNESS_BYTE_COUNT)
panel->backlight.max = 0xFFFF; panel->backlight.max = 0xFFFF;
else else

View File

@ -326,8 +326,7 @@ static u64 execlists_update_context(struct drm_i915_gem_request *rq)
rq->ctx->ppgtt ?: rq->i915->mm.aliasing_ppgtt; rq->ctx->ppgtt ?: rq->i915->mm.aliasing_ppgtt;
u32 *reg_state = ce->lrc_reg_state; u32 *reg_state = ce->lrc_reg_state;
assert_ring_tail_valid(rq->ring, rq->tail); reg_state[CTX_RING_TAIL+1] = intel_ring_set_tail(rq->ring, rq->tail);
reg_state[CTX_RING_TAIL+1] = rq->tail;
/* True 32b PPGTT with dynamic page allocation: update PDP /* True 32b PPGTT with dynamic page allocation: update PDP
* registers and point the unallocated PDPs to scratch page. * registers and point the unallocated PDPs to scratch page.
@ -2036,8 +2035,7 @@ void intel_lr_context_resume(struct drm_i915_private *dev_priv)
ce->state->obj->mm.dirty = true; ce->state->obj->mm.dirty = true;
i915_gem_object_unpin_map(ce->state->obj); i915_gem_object_unpin_map(ce->state->obj);
ce->ring->head = ce->ring->tail = 0; intel_ring_reset(ce->ring, 0);
intel_ring_update_space(ce->ring);
} }
} }
} }

View File

@ -49,7 +49,7 @@ static int __intel_ring_space(int head, int tail, int size)
void intel_ring_update_space(struct intel_ring *ring) void intel_ring_update_space(struct intel_ring *ring)
{ {
ring->space = __intel_ring_space(ring->head, ring->tail, ring->size); ring->space = __intel_ring_space(ring->head, ring->emit, ring->size);
} }
static int static int
@ -774,8 +774,8 @@ static void i9xx_submit_request(struct drm_i915_gem_request *request)
i915_gem_request_submit(request); i915_gem_request_submit(request);
assert_ring_tail_valid(request->ring, request->tail); I915_WRITE_TAIL(request->engine,
I915_WRITE_TAIL(request->engine, request->tail); intel_ring_set_tail(request->ring, request->tail));
} }
static void i9xx_emit_breadcrumb(struct drm_i915_gem_request *req, u32 *cs) static void i9xx_emit_breadcrumb(struct drm_i915_gem_request *req, u32 *cs)
@ -1316,11 +1316,23 @@ int intel_ring_pin(struct intel_ring *ring, unsigned int offset_bias)
return PTR_ERR(addr); return PTR_ERR(addr);
} }
void intel_ring_reset(struct intel_ring *ring, u32 tail)
{
GEM_BUG_ON(!list_empty(&ring->request_list));
ring->tail = tail;
ring->head = tail;
ring->emit = tail;
intel_ring_update_space(ring);
}
void intel_ring_unpin(struct intel_ring *ring) void intel_ring_unpin(struct intel_ring *ring)
{ {
GEM_BUG_ON(!ring->vma); GEM_BUG_ON(!ring->vma);
GEM_BUG_ON(!ring->vaddr); GEM_BUG_ON(!ring->vaddr);
/* Discard any unused bytes beyond that submitted to hw. */
intel_ring_reset(ring, ring->tail);
if (i915_vma_is_map_and_fenceable(ring->vma)) if (i915_vma_is_map_and_fenceable(ring->vma))
i915_vma_unpin_iomap(ring->vma); i915_vma_unpin_iomap(ring->vma);
else else
@ -1562,8 +1574,9 @@ void intel_legacy_submission_resume(struct drm_i915_private *dev_priv)
struct intel_engine_cs *engine; struct intel_engine_cs *engine;
enum intel_engine_id id; enum intel_engine_id id;
/* Restart from the beginning of the rings for convenience */
for_each_engine(engine, dev_priv, id) for_each_engine(engine, dev_priv, id)
engine->buffer->head = engine->buffer->tail; intel_ring_reset(engine->buffer, 0);
} }
static int ring_request_alloc(struct drm_i915_gem_request *request) static int ring_request_alloc(struct drm_i915_gem_request *request)
@ -1616,7 +1629,7 @@ static int wait_for_space(struct drm_i915_gem_request *req, int bytes)
unsigned space; unsigned space;
/* Would completion of this request free enough space? */ /* Would completion of this request free enough space? */
space = __intel_ring_space(target->postfix, ring->tail, space = __intel_ring_space(target->postfix, ring->emit,
ring->size); ring->size);
if (space >= bytes) if (space >= bytes)
break; break;
@ -1641,8 +1654,8 @@ static int wait_for_space(struct drm_i915_gem_request *req, int bytes)
u32 *intel_ring_begin(struct drm_i915_gem_request *req, int num_dwords) u32 *intel_ring_begin(struct drm_i915_gem_request *req, int num_dwords)
{ {
struct intel_ring *ring = req->ring; struct intel_ring *ring = req->ring;
int remain_actual = ring->size - ring->tail; int remain_actual = ring->size - ring->emit;
int remain_usable = ring->effective_size - ring->tail; int remain_usable = ring->effective_size - ring->emit;
int bytes = num_dwords * sizeof(u32); int bytes = num_dwords * sizeof(u32);
int total_bytes, wait_bytes; int total_bytes, wait_bytes;
bool need_wrap = false; bool need_wrap = false;
@ -1678,17 +1691,17 @@ u32 *intel_ring_begin(struct drm_i915_gem_request *req, int num_dwords)
if (unlikely(need_wrap)) { if (unlikely(need_wrap)) {
GEM_BUG_ON(remain_actual > ring->space); GEM_BUG_ON(remain_actual > ring->space);
GEM_BUG_ON(ring->tail + remain_actual > ring->size); GEM_BUG_ON(ring->emit + remain_actual > ring->size);
/* Fill the tail with MI_NOOP */ /* Fill the tail with MI_NOOP */
memset(ring->vaddr + ring->tail, 0, remain_actual); memset(ring->vaddr + ring->emit, 0, remain_actual);
ring->tail = 0; ring->emit = 0;
ring->space -= remain_actual; ring->space -= remain_actual;
} }
GEM_BUG_ON(ring->tail > ring->size - bytes); GEM_BUG_ON(ring->emit > ring->size - bytes);
cs = ring->vaddr + ring->tail; cs = ring->vaddr + ring->emit;
ring->tail += bytes; ring->emit += bytes;
ring->space -= bytes; ring->space -= bytes;
GEM_BUG_ON(ring->space < 0); GEM_BUG_ON(ring->space < 0);
@ -1699,7 +1712,7 @@ u32 *intel_ring_begin(struct drm_i915_gem_request *req, int num_dwords)
int intel_ring_cacheline_align(struct drm_i915_gem_request *req) int intel_ring_cacheline_align(struct drm_i915_gem_request *req)
{ {
int num_dwords = int num_dwords =
(req->ring->tail & (CACHELINE_BYTES - 1)) / sizeof(uint32_t); (req->ring->emit & (CACHELINE_BYTES - 1)) / sizeof(uint32_t);
u32 *cs; u32 *cs;
if (num_dwords == 0) if (num_dwords == 0)

View File

@ -145,6 +145,7 @@ struct intel_ring {
u32 head; u32 head;
u32 tail; u32 tail;
u32 emit;
int space; int space;
int size; int size;
@ -488,6 +489,8 @@ intel_write_status_page(struct intel_engine_cs *engine, int reg, u32 value)
struct intel_ring * struct intel_ring *
intel_engine_create_ring(struct intel_engine_cs *engine, int size); intel_engine_create_ring(struct intel_engine_cs *engine, int size);
int intel_ring_pin(struct intel_ring *ring, unsigned int offset_bias); int intel_ring_pin(struct intel_ring *ring, unsigned int offset_bias);
void intel_ring_reset(struct intel_ring *ring, u32 tail);
void intel_ring_update_space(struct intel_ring *ring);
void intel_ring_unpin(struct intel_ring *ring); void intel_ring_unpin(struct intel_ring *ring);
void intel_ring_free(struct intel_ring *ring); void intel_ring_free(struct intel_ring *ring);
@ -511,7 +514,7 @@ intel_ring_advance(struct drm_i915_gem_request *req, u32 *cs)
* reserved for the command packet (i.e. the value passed to * reserved for the command packet (i.e. the value passed to
* intel_ring_begin()). * intel_ring_begin()).
*/ */
GEM_BUG_ON((req->ring->vaddr + req->ring->tail) != cs); GEM_BUG_ON((req->ring->vaddr + req->ring->emit) != cs);
} }
static inline u32 static inline u32
@ -540,7 +543,19 @@ assert_ring_tail_valid(const struct intel_ring *ring, unsigned int tail)
GEM_BUG_ON(tail >= ring->size); GEM_BUG_ON(tail >= ring->size);
} }
void intel_ring_update_space(struct intel_ring *ring); static inline unsigned int
intel_ring_set_tail(struct intel_ring *ring, unsigned int tail)
{
/* Whilst writes to the tail are strictly order, there is no
* serialisation between readers and the writers. The tail may be
* read by i915_gem_request_retire() just as it is being updated
* by execlists, as although the breadcrumb is complete, the context
* switch hasn't been seen.
*/
assert_ring_tail_valid(ring, tail);
ring->tail = tail;
return tail;
}
void intel_engine_init_global_seqno(struct intel_engine_cs *engine, u32 seqno); void intel_engine_init_global_seqno(struct intel_engine_cs *engine, u32 seqno);

View File

@ -3393,6 +3393,13 @@ void radeon_combios_asic_init(struct drm_device *dev)
rdev->pdev->subsystem_vendor == 0x103c && rdev->pdev->subsystem_vendor == 0x103c &&
rdev->pdev->subsystem_device == 0x280a) rdev->pdev->subsystem_device == 0x280a)
return; return;
/* quirk for rs4xx Toshiba Sattellite L20-183 latop to make it resume
* - it hangs on resume inside the dynclk 1 table.
*/
if (rdev->family == CHIP_RS400 &&
rdev->pdev->subsystem_vendor == 0x1179 &&
rdev->pdev->subsystem_device == 0xff31)
return;
/* DYN CLK 1 */ /* DYN CLK 1 */
table = combios_get_table_offset(dev, COMBIOS_DYN_CLK_1_TABLE); table = combios_get_table_offset(dev, COMBIOS_DYN_CLK_1_TABLE);

View File

@ -136,6 +136,10 @@ static struct radeon_px_quirk radeon_px_quirk_list[] = {
* https://bugzilla.kernel.org/show_bug.cgi?id=51381 * https://bugzilla.kernel.org/show_bug.cgi?id=51381
*/ */
{ PCI_VENDOR_ID_ATI, 0x6840, 0x1043, 0x2122, RADEON_PX_QUIRK_DISABLE_PX }, { PCI_VENDOR_ID_ATI, 0x6840, 0x1043, 0x2122, RADEON_PX_QUIRK_DISABLE_PX },
/* Asus K53TK laptop with AMD A6-3420M APU and Radeon 7670m GPU
* https://bugs.freedesktop.org/show_bug.cgi?id=101491
*/
{ PCI_VENDOR_ID_ATI, 0x6741, 0x1043, 0x2122, RADEON_PX_QUIRK_DISABLE_PX },
/* macbook pro 8.2 */ /* macbook pro 8.2 */
{ PCI_VENDOR_ID_ATI, 0x6741, PCI_VENDOR_ID_APPLE, 0x00e2, RADEON_PX_QUIRK_LONG_WAKEUP }, { PCI_VENDOR_ID_ATI, 0x6741, PCI_VENDOR_ID_APPLE, 0x00e2, RADEON_PX_QUIRK_LONG_WAKEUP },
{ 0, 0, 0, 0, 0 }, { 0, 0, 0, 0, 0 },

View File

@ -319,6 +319,9 @@
#define USB_VENDOR_ID_DELCOM 0x0fc5 #define USB_VENDOR_ID_DELCOM 0x0fc5
#define USB_DEVICE_ID_DELCOM_VISUAL_IND 0xb080 #define USB_DEVICE_ID_DELCOM_VISUAL_IND 0xb080
#define USB_VENDOR_ID_DELL 0x413c
#define USB_DEVICE_ID_DELL_PIXART_USB_OPTICAL_MOUSE 0x301a
#define USB_VENDOR_ID_DELORME 0x1163 #define USB_VENDOR_ID_DELORME 0x1163
#define USB_DEVICE_ID_DELORME_EARTHMATE 0x0100 #define USB_DEVICE_ID_DELORME_EARTHMATE 0x0100
#define USB_DEVICE_ID_DELORME_EM_LT20 0x0200 #define USB_DEVICE_ID_DELORME_EM_LT20 0x0200

View File

@ -349,7 +349,6 @@ static int magicmouse_raw_event(struct hid_device *hdev,
if (input->id.product == USB_DEVICE_ID_APPLE_MAGICMOUSE) { if (input->id.product == USB_DEVICE_ID_APPLE_MAGICMOUSE) {
magicmouse_emit_buttons(msc, clicks & 3); magicmouse_emit_buttons(msc, clicks & 3);
input_mt_report_pointer_emulation(input, true);
input_report_rel(input, REL_X, x); input_report_rel(input, REL_X, x);
input_report_rel(input, REL_Y, y); input_report_rel(input, REL_Y, y);
} else { /* USB_DEVICE_ID_APPLE_MAGICTRACKPAD */ } else { /* USB_DEVICE_ID_APPLE_MAGICTRACKPAD */
@ -389,9 +388,6 @@ static int magicmouse_setup_input(struct input_dev *input, struct hid_device *hd
__clear_bit(BTN_RIGHT, input->keybit); __clear_bit(BTN_RIGHT, input->keybit);
__clear_bit(BTN_MIDDLE, input->keybit); __clear_bit(BTN_MIDDLE, input->keybit);
__set_bit(BTN_MOUSE, input->keybit); __set_bit(BTN_MOUSE, input->keybit);
__set_bit(INPUT_PROP_BUTTONPAD, input->propbit);
}
__set_bit(BTN_TOOL_FINGER, input->keybit); __set_bit(BTN_TOOL_FINGER, input->keybit);
__set_bit(BTN_TOOL_DOUBLETAP, input->keybit); __set_bit(BTN_TOOL_DOUBLETAP, input->keybit);
__set_bit(BTN_TOOL_TRIPLETAP, input->keybit); __set_bit(BTN_TOOL_TRIPLETAP, input->keybit);
@ -399,6 +395,9 @@ static int magicmouse_setup_input(struct input_dev *input, struct hid_device *hd
__set_bit(BTN_TOOL_QUINTTAP, input->keybit); __set_bit(BTN_TOOL_QUINTTAP, input->keybit);
__set_bit(BTN_TOUCH, input->keybit); __set_bit(BTN_TOUCH, input->keybit);
__set_bit(INPUT_PROP_POINTER, input->propbit); __set_bit(INPUT_PROP_POINTER, input->propbit);
__set_bit(INPUT_PROP_BUTTONPAD, input->propbit);
}
__set_bit(EV_ABS, input->evbit); __set_bit(EV_ABS, input->evbit);

View File

@ -85,6 +85,7 @@ static const struct hid_blacklist {
{ USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_K65RGB_RAPIDFIRE, HID_QUIRK_NO_INIT_REPORTS | HID_QUIRK_ALWAYS_POLL }, { USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_K65RGB_RAPIDFIRE, HID_QUIRK_NO_INIT_REPORTS | HID_QUIRK_ALWAYS_POLL },
{ USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_SCIMITAR_PRO_RGB, HID_QUIRK_NO_INIT_REPORTS | HID_QUIRK_ALWAYS_POLL }, { USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_SCIMITAR_PRO_RGB, HID_QUIRK_NO_INIT_REPORTS | HID_QUIRK_ALWAYS_POLL },
{ USB_VENDOR_ID_CREATIVELABS, USB_DEVICE_ID_CREATIVE_SB_OMNI_SURROUND_51, HID_QUIRK_NOGET }, { USB_VENDOR_ID_CREATIVELABS, USB_DEVICE_ID_CREATIVE_SB_OMNI_SURROUND_51, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_DELL, USB_DEVICE_ID_DELL_PIXART_USB_OPTICAL_MOUSE, HID_QUIRK_ALWAYS_POLL },
{ USB_VENDOR_ID_DMI, USB_DEVICE_ID_DMI_ENC, HID_QUIRK_NOGET }, { USB_VENDOR_ID_DMI, USB_DEVICE_ID_DMI_ENC, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_WIIU, HID_QUIRK_MULTI_INPUT }, { USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_WIIU, HID_QUIRK_MULTI_INPUT },
{ USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_PS3, HID_QUIRK_MULTI_INPUT }, { USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_PS3, HID_QUIRK_MULTI_INPUT },

View File

@ -734,9 +734,9 @@ static int i2c_imx_dma_read(struct imx_i2c_struct *i2c_imx,
* the first read operation, otherwise the first read cost * the first read operation, otherwise the first read cost
* one extra clock cycle. * one extra clock cycle.
*/ */
temp = readb(i2c_imx->base + IMX_I2C_I2CR); temp = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2CR);
temp |= I2CR_MTX; temp |= I2CR_MTX;
writeb(temp, i2c_imx->base + IMX_I2C_I2CR); imx_i2c_write_reg(temp, i2c_imx, IMX_I2C_I2CR);
} }
msgs->buf[msgs->len-1] = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2DR); msgs->buf[msgs->len-1] = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2DR);
@ -857,9 +857,9 @@ static int i2c_imx_read(struct imx_i2c_struct *i2c_imx, struct i2c_msg *msgs, bo
* the first read operation, otherwise the first read cost * the first read operation, otherwise the first read cost
* one extra clock cycle. * one extra clock cycle.
*/ */
temp = readb(i2c_imx->base + IMX_I2C_I2CR); temp = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2CR);
temp |= I2CR_MTX; temp |= I2CR_MTX;
writeb(temp, i2c_imx->base + IMX_I2C_I2CR); imx_i2c_write_reg(temp, i2c_imx, IMX_I2C_I2CR);
} }
} else if (i == (msgs->len - 2)) { } else if (i == (msgs->len - 2)) {
dev_dbg(&i2c_imx->adapter.dev, dev_dbg(&i2c_imx->adapter.dev,

View File

@ -1105,10 +1105,13 @@ static void schedule_autocommit(struct dm_integrity_c *ic)
static void submit_flush_bio(struct dm_integrity_c *ic, struct dm_integrity_io *dio) static void submit_flush_bio(struct dm_integrity_c *ic, struct dm_integrity_io *dio)
{ {
struct bio *bio; struct bio *bio;
spin_lock_irq(&ic->endio_wait.lock); unsigned long flags;
spin_lock_irqsave(&ic->endio_wait.lock, flags);
bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io)); bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io));
bio_list_add(&ic->flush_bio_list, bio); bio_list_add(&ic->flush_bio_list, bio);
spin_unlock_irq(&ic->endio_wait.lock); spin_unlock_irqrestore(&ic->endio_wait.lock, flags);
queue_work(ic->commit_wq, &ic->commit_work); queue_work(ic->commit_wq, &ic->commit_work);
} }
@ -3040,6 +3043,11 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv)
ti->error = "The device is too small"; ti->error = "The device is too small";
goto bad; goto bad;
} }
if (ti->len > ic->provided_data_sectors) {
r = -EINVAL;
ti->error = "Not enough provided sectors for requested mapping size";
goto bad;
}
if (!buffer_sectors) if (!buffer_sectors)
buffer_sectors = 1; buffer_sectors = 1;

View File

@ -317,8 +317,8 @@ static void do_region(int op, int op_flags, unsigned region,
else if (op == REQ_OP_WRITE_SAME) else if (op == REQ_OP_WRITE_SAME)
special_cmd_max_sectors = q->limits.max_write_same_sectors; special_cmd_max_sectors = q->limits.max_write_same_sectors;
if ((op == REQ_OP_DISCARD || op == REQ_OP_WRITE_ZEROES || if ((op == REQ_OP_DISCARD || op == REQ_OP_WRITE_ZEROES ||
op == REQ_OP_WRITE_SAME) && op == REQ_OP_WRITE_SAME) && special_cmd_max_sectors == 0) {
special_cmd_max_sectors == 0) { atomic_inc(&io->count);
dec_count(io, region, -EOPNOTSUPP); dec_count(io, region, -EOPNOTSUPP);
return; return;
} }

View File

@ -145,6 +145,7 @@ static void dispatch_bios(void *context, struct bio_list *bio_list)
struct dm_raid1_bio_record { struct dm_raid1_bio_record {
struct mirror *m; struct mirror *m;
/* if details->bi_bdev == NULL, details were not saved */
struct dm_bio_details details; struct dm_bio_details details;
region_t write_region; region_t write_region;
}; };
@ -1198,6 +1199,8 @@ static int mirror_map(struct dm_target *ti, struct bio *bio)
struct dm_raid1_bio_record *bio_record = struct dm_raid1_bio_record *bio_record =
dm_per_bio_data(bio, sizeof(struct dm_raid1_bio_record)); dm_per_bio_data(bio, sizeof(struct dm_raid1_bio_record));
bio_record->details.bi_bdev = NULL;
if (rw == WRITE) { if (rw == WRITE) {
/* Save region for mirror_end_io() handler */ /* Save region for mirror_end_io() handler */
bio_record->write_region = dm_rh_bio_to_region(ms->rh, bio); bio_record->write_region = dm_rh_bio_to_region(ms->rh, bio);
@ -1256,12 +1259,22 @@ static int mirror_end_io(struct dm_target *ti, struct bio *bio, int error)
} }
if (error == -EOPNOTSUPP) if (error == -EOPNOTSUPP)
return error; goto out;
if ((error == -EWOULDBLOCK) && (bio->bi_opf & REQ_RAHEAD)) if ((error == -EWOULDBLOCK) && (bio->bi_opf & REQ_RAHEAD))
return error; goto out;
if (unlikely(error)) { if (unlikely(error)) {
if (!bio_record->details.bi_bdev) {
/*
* There wasn't enough memory to record necessary
* information for a retry or there was no other
* mirror in-sync.
*/
DMERR_LIMIT("Mirror read failed.");
return -EIO;
}
m = bio_record->m; m = bio_record->m;
DMERR("Mirror read failed from %s. Trying alternative device.", DMERR("Mirror read failed from %s. Trying alternative device.",
@ -1277,6 +1290,7 @@ static int mirror_end_io(struct dm_target *ti, struct bio *bio, int error)
bd = &bio_record->details; bd = &bio_record->details;
dm_bio_restore(bd, bio); dm_bio_restore(bd, bio);
bio_record->details.bi_bdev = NULL;
bio->bi_error = 0; bio->bi_error = 0;
queue_bio(ms, bio, rw); queue_bio(ms, bio, rw);
@ -1285,6 +1299,9 @@ static int mirror_end_io(struct dm_target *ti, struct bio *bio, int error)
DMERR("All replicated volumes dead, failing I/O"); DMERR("All replicated volumes dead, failing I/O");
} }
out:
bio_record->details.bi_bdev = NULL;
return error; return error;
} }

View File

@ -245,8 +245,7 @@ static int arizona_poll_reg(struct arizona *arizona,
int ret; int ret;
ret = regmap_read_poll_timeout(arizona->regmap, ret = regmap_read_poll_timeout(arizona->regmap,
ARIZONA_INTERRUPT_RAW_STATUS_5, val, reg, val, ((val & mask) == target),
((val & mask) == target),
ARIZONA_REG_POLL_DELAY_US, ARIZONA_REG_POLL_DELAY_US,
timeout_ms * 1000); timeout_ms * 1000);
if (ret) if (ret)

View File

@ -2171,9 +2171,10 @@ static int cxgb_up(struct adapter *adap)
{ {
int err; int err;
mutex_lock(&uld_mutex);
err = setup_sge_queues(adap); err = setup_sge_queues(adap);
if (err) if (err)
goto out; goto rel_lock;
err = setup_rss(adap); err = setup_rss(adap);
if (err) if (err)
goto freeq; goto freeq;
@ -2197,7 +2198,6 @@ static int cxgb_up(struct adapter *adap)
goto irq_err; goto irq_err;
} }
mutex_lock(&uld_mutex);
enable_rx(adap); enable_rx(adap);
t4_sge_start(adap); t4_sge_start(adap);
t4_intr_enable(adap); t4_intr_enable(adap);
@ -2210,13 +2210,15 @@ static int cxgb_up(struct adapter *adap)
#endif #endif
/* Initialize hash mac addr list*/ /* Initialize hash mac addr list*/
INIT_LIST_HEAD(&adap->mac_hlist); INIT_LIST_HEAD(&adap->mac_hlist);
out:
return err; return err;
irq_err: irq_err:
dev_err(adap->pdev_dev, "request_irq failed, err %d\n", err); dev_err(adap->pdev_dev, "request_irq failed, err %d\n", err);
freeq: freeq:
t4_free_sge_resources(adap); t4_free_sge_resources(adap);
goto out; rel_lock:
mutex_unlock(&uld_mutex);
return err;
} }
static void cxgb_down(struct adapter *adapter) static void cxgb_down(struct adapter *adapter)

View File

@ -2647,7 +2647,7 @@ static int dpaa_eth_probe(struct platform_device *pdev)
priv->buf_layout[TX].priv_data_size = DPAA_TX_PRIV_DATA_SIZE; /* Tx */ priv->buf_layout[TX].priv_data_size = DPAA_TX_PRIV_DATA_SIZE; /* Tx */
/* device used for DMA mapping */ /* device used for DMA mapping */
arch_setup_dma_ops(dev, 0, 0, NULL, false); set_dma_ops(dev, get_dma_ops(&pdev->dev));
err = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(40)); err = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(40));
if (err) { if (err) {
dev_err(dev, "dma_coerce_mask_and_coherent() failed\n"); dev_err(dev, "dma_coerce_mask_and_coherent() failed\n");

View File

@ -623,6 +623,8 @@ static struct platform_device *dpaa_eth_add_device(int fman_id,
goto no_mem; goto no_mem;
} }
set_dma_ops(&pdev->dev, get_dma_ops(priv->dev));
ret = platform_device_add_data(pdev, &data, sizeof(data)); ret = platform_device_add_data(pdev, &data, sizeof(data));
if (ret) if (ret)
goto err; goto err;

View File

@ -288,9 +288,15 @@ static int hns_nic_config_phy_loopback(struct phy_device *phy_dev, u8 en)
/* Force 1000M Link, Default is 0x0200 */ /* Force 1000M Link, Default is 0x0200 */
phy_write(phy_dev, 7, 0x20C); phy_write(phy_dev, 7, 0x20C);
phy_write(phy_dev, HNS_PHY_PAGE_REG, 0);
/* Enable PHY loop-back */ /* Powerup Fiber */
phy_write(phy_dev, HNS_PHY_PAGE_REG, 1);
val = phy_read(phy_dev, COPPER_CONTROL_REG);
val &= ~PHY_POWER_DOWN;
phy_write(phy_dev, COPPER_CONTROL_REG, val);
/* Enable Phy Loopback */
phy_write(phy_dev, HNS_PHY_PAGE_REG, 0);
val = phy_read(phy_dev, COPPER_CONTROL_REG); val = phy_read(phy_dev, COPPER_CONTROL_REG);
val |= PHY_LOOP_BACK; val |= PHY_LOOP_BACK;
val &= ~PHY_POWER_DOWN; val &= ~PHY_POWER_DOWN;
@ -299,6 +305,12 @@ static int hns_nic_config_phy_loopback(struct phy_device *phy_dev, u8 en)
phy_write(phy_dev, HNS_PHY_PAGE_REG, 0xFA); phy_write(phy_dev, HNS_PHY_PAGE_REG, 0xFA);
phy_write(phy_dev, 1, 0x400); phy_write(phy_dev, 1, 0x400);
phy_write(phy_dev, 7, 0x200); phy_write(phy_dev, 7, 0x200);
phy_write(phy_dev, HNS_PHY_PAGE_REG, 1);
val = phy_read(phy_dev, COPPER_CONTROL_REG);
val |= PHY_POWER_DOWN;
phy_write(phy_dev, COPPER_CONTROL_REG, val);
phy_write(phy_dev, HNS_PHY_PAGE_REG, 0); phy_write(phy_dev, HNS_PHY_PAGE_REG, 0);
phy_write(phy_dev, 9, 0xF00); phy_write(phy_dev, 9, 0xF00);

View File

@ -1242,11 +1242,11 @@ static int mlx5e_get_ts_info(struct net_device *dev,
SOF_TIMESTAMPING_RX_HARDWARE | SOF_TIMESTAMPING_RX_HARDWARE |
SOF_TIMESTAMPING_RAW_HARDWARE; SOF_TIMESTAMPING_RAW_HARDWARE;
info->tx_types = (BIT(1) << HWTSTAMP_TX_OFF) | info->tx_types = BIT(HWTSTAMP_TX_OFF) |
(BIT(1) << HWTSTAMP_TX_ON); BIT(HWTSTAMP_TX_ON);
info->rx_filters = (BIT(1) << HWTSTAMP_FILTER_NONE) | info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) |
(BIT(1) << HWTSTAMP_FILTER_ALL); BIT(HWTSTAMP_FILTER_ALL);
return 0; return 0;
} }

View File

@ -4241,6 +4241,7 @@ struct net_device *mlx5e_create_netdev(struct mlx5_core_dev *mdev,
return netdev; return netdev;
err_cleanup_nic: err_cleanup_nic:
if (profile->cleanup)
profile->cleanup(priv); profile->cleanup(priv);
free_netdev(netdev); free_netdev(netdev);

View File

@ -791,6 +791,8 @@ static void mlx5e_build_rep_params(struct mlx5_core_dev *mdev,
params->tx_max_inline = mlx5e_get_max_inline_cap(mdev); params->tx_max_inline = mlx5e_get_max_inline_cap(mdev);
params->num_tc = 1; params->num_tc = 1;
params->lro_wqe_sz = MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ; params->lro_wqe_sz = MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ;
mlx5_query_min_inline(mdev, &params->tx_min_inline_mode);
} }
static void mlx5e_build_rep_netdev(struct net_device *netdev) static void mlx5e_build_rep_netdev(struct net_device *netdev)

View File

@ -895,7 +895,6 @@ static struct mlx5_fields fields[] = {
{MLX5_ACTION_IN_FIELD_OUT_SMAC_15_0, 2, offsetof(struct pedit_headers, eth.h_source[4])}, {MLX5_ACTION_IN_FIELD_OUT_SMAC_15_0, 2, offsetof(struct pedit_headers, eth.h_source[4])},
{MLX5_ACTION_IN_FIELD_OUT_ETHERTYPE, 2, offsetof(struct pedit_headers, eth.h_proto)}, {MLX5_ACTION_IN_FIELD_OUT_ETHERTYPE, 2, offsetof(struct pedit_headers, eth.h_proto)},
{MLX5_ACTION_IN_FIELD_OUT_IP_DSCP, 1, offsetof(struct pedit_headers, ip4.tos)},
{MLX5_ACTION_IN_FIELD_OUT_IP_TTL, 1, offsetof(struct pedit_headers, ip4.ttl)}, {MLX5_ACTION_IN_FIELD_OUT_IP_TTL, 1, offsetof(struct pedit_headers, ip4.ttl)},
{MLX5_ACTION_IN_FIELD_OUT_SIPV4, 4, offsetof(struct pedit_headers, ip4.saddr)}, {MLX5_ACTION_IN_FIELD_OUT_SIPV4, 4, offsetof(struct pedit_headers, ip4.saddr)},
{MLX5_ACTION_IN_FIELD_OUT_DIPV4, 4, offsetof(struct pedit_headers, ip4.daddr)}, {MLX5_ACTION_IN_FIELD_OUT_DIPV4, 4, offsetof(struct pedit_headers, ip4.daddr)},

View File

@ -906,21 +906,34 @@ static int esw_inline_mode_to_devlink(u8 mlx5_mode, u8 *mode)
return 0; return 0;
} }
int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode) static int mlx5_devlink_eswitch_check(struct devlink *devlink)
{ {
struct mlx5_core_dev *dev; struct mlx5_core_dev *dev = devlink_priv(devlink);
u16 cur_mlx5_mode, mlx5_mode = 0;
dev = devlink_priv(devlink); if (MLX5_CAP_GEN(dev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
return -EOPNOTSUPP;
if (!MLX5_CAP_GEN(dev, vport_group_manager)) if (!MLX5_CAP_GEN(dev, vport_group_manager))
return -EOPNOTSUPP; return -EOPNOTSUPP;
cur_mlx5_mode = dev->priv.eswitch->mode; if (dev->priv.eswitch->mode == SRIOV_NONE)
if (cur_mlx5_mode == SRIOV_NONE)
return -EOPNOTSUPP; return -EOPNOTSUPP;
return 0;
}
int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode)
{
struct mlx5_core_dev *dev = devlink_priv(devlink);
u16 cur_mlx5_mode, mlx5_mode = 0;
int err;
err = mlx5_devlink_eswitch_check(devlink);
if (err)
return err;
cur_mlx5_mode = dev->priv.eswitch->mode;
if (esw_mode_from_devlink(mode, &mlx5_mode)) if (esw_mode_from_devlink(mode, &mlx5_mode))
return -EINVAL; return -EINVAL;
@ -937,15 +950,12 @@ int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode)
int mlx5_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode) int mlx5_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode)
{ {
struct mlx5_core_dev *dev; struct mlx5_core_dev *dev = devlink_priv(devlink);
int err;
dev = devlink_priv(devlink); err = mlx5_devlink_eswitch_check(devlink);
if (err)
if (!MLX5_CAP_GEN(dev, vport_group_manager)) return err;
return -EOPNOTSUPP;
if (dev->priv.eswitch->mode == SRIOV_NONE)
return -EOPNOTSUPP;
return esw_mode_to_devlink(dev->priv.eswitch->mode, mode); return esw_mode_to_devlink(dev->priv.eswitch->mode, mode);
} }
@ -954,15 +964,12 @@ int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode)
{ {
struct mlx5_core_dev *dev = devlink_priv(devlink); struct mlx5_core_dev *dev = devlink_priv(devlink);
struct mlx5_eswitch *esw = dev->priv.eswitch; struct mlx5_eswitch *esw = dev->priv.eswitch;
int num_vports = esw->enabled_vports;
int err, vport; int err, vport;
u8 mlx5_mode; u8 mlx5_mode;
if (!MLX5_CAP_GEN(dev, vport_group_manager)) err = mlx5_devlink_eswitch_check(devlink);
return -EOPNOTSUPP; if (err)
return err;
if (esw->mode == SRIOV_NONE)
return -EOPNOTSUPP;
switch (MLX5_CAP_ETH(dev, wqe_inline_mode)) { switch (MLX5_CAP_ETH(dev, wqe_inline_mode)) {
case MLX5_CAP_INLINE_MODE_NOT_REQUIRED: case MLX5_CAP_INLINE_MODE_NOT_REQUIRED:
@ -985,7 +992,7 @@ int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode)
if (err) if (err)
goto out; goto out;
for (vport = 1; vport < num_vports; vport++) { for (vport = 1; vport < esw->enabled_vports; vport++) {
err = mlx5_modify_nic_vport_min_inline(dev, vport, mlx5_mode); err = mlx5_modify_nic_vport_min_inline(dev, vport, mlx5_mode);
if (err) { if (err) {
esw_warn(dev, "Failed to set min inline on vport %d\n", esw_warn(dev, "Failed to set min inline on vport %d\n",
@ -1010,12 +1017,11 @@ int mlx5_devlink_eswitch_inline_mode_get(struct devlink *devlink, u8 *mode)
{ {
struct mlx5_core_dev *dev = devlink_priv(devlink); struct mlx5_core_dev *dev = devlink_priv(devlink);
struct mlx5_eswitch *esw = dev->priv.eswitch; struct mlx5_eswitch *esw = dev->priv.eswitch;
int err;
if (!MLX5_CAP_GEN(dev, vport_group_manager)) err = mlx5_devlink_eswitch_check(devlink);
return -EOPNOTSUPP; if (err)
return err;
if (esw->mode == SRIOV_NONE)
return -EOPNOTSUPP;
return esw_inline_mode_to_devlink(esw->offloads.inline_mode, mode); return esw_inline_mode_to_devlink(esw->offloads.inline_mode, mode);
} }
@ -1062,11 +1068,9 @@ int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink, u8 encap)
struct mlx5_eswitch *esw = dev->priv.eswitch; struct mlx5_eswitch *esw = dev->priv.eswitch;
int err; int err;
if (!MLX5_CAP_GEN(dev, vport_group_manager)) err = mlx5_devlink_eswitch_check(devlink);
return -EOPNOTSUPP; if (err)
return err;
if (esw->mode == SRIOV_NONE)
return -EOPNOTSUPP;
if (encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE && if (encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE &&
(!MLX5_CAP_ESW_FLOWTABLE_FDB(dev, encap) || (!MLX5_CAP_ESW_FLOWTABLE_FDB(dev, encap) ||
@ -1105,12 +1109,11 @@ int mlx5_devlink_eswitch_encap_mode_get(struct devlink *devlink, u8 *encap)
{ {
struct mlx5_core_dev *dev = devlink_priv(devlink); struct mlx5_core_dev *dev = devlink_priv(devlink);
struct mlx5_eswitch *esw = dev->priv.eswitch; struct mlx5_eswitch *esw = dev->priv.eswitch;
int err;
if (!MLX5_CAP_GEN(dev, vport_group_manager)) err = mlx5_devlink_eswitch_check(devlink);
return -EOPNOTSUPP; if (err)
return err;
if (esw->mode == SRIOV_NONE)
return -EOPNOTSUPP;
*encap = esw->offloads.encap; *encap = esw->offloads.encap;
return 0; return 0;

View File

@ -177,6 +177,7 @@ static struct mlx5_profile profile[] = {
#define FW_INIT_TIMEOUT_MILI 2000 #define FW_INIT_TIMEOUT_MILI 2000
#define FW_INIT_WAIT_MS 2 #define FW_INIT_WAIT_MS 2
#define FW_PRE_INIT_TIMEOUT_MILI 10000
static int wait_fw_init(struct mlx5_core_dev *dev, u32 max_wait_mili) static int wait_fw_init(struct mlx5_core_dev *dev, u32 max_wait_mili)
{ {
@ -1013,6 +1014,15 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,
*/ */
dev->state = MLX5_DEVICE_STATE_UP; dev->state = MLX5_DEVICE_STATE_UP;
/* wait for firmware to accept initialization segments configurations
*/
err = wait_fw_init(dev, FW_PRE_INIT_TIMEOUT_MILI);
if (err) {
dev_err(&dev->pdev->dev, "Firmware over %d MS in pre-initializing state, aborting\n",
FW_PRE_INIT_TIMEOUT_MILI);
goto out;
}
err = mlx5_cmd_init(dev); err = mlx5_cmd_init(dev);
if (err) { if (err) {
dev_err(&pdev->dev, "Failed initializing command interface, aborting\n"); dev_err(&pdev->dev, "Failed initializing command interface, aborting\n");

View File

@ -661,8 +661,6 @@ int efx_ef10_sriov_set_vf_vlan(struct efx_nic *efx, int vf_i, u16 vlan,
up_write(&vf->efx->filter_sem); up_write(&vf->efx->filter_sem);
mutex_unlock(&vf->efx->mac_lock); mutex_unlock(&vf->efx->mac_lock);
up_write(&vf->efx->filter_sem);
rc2 = efx_net_open(vf->efx->net_dev); rc2 = efx_net_open(vf->efx->net_dev);
if (rc2) if (rc2)
goto reset_nic; goto reset_nic;

View File

@ -2831,7 +2831,6 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
tx_q->tx_skbuff_dma[first_entry].buf = des; tx_q->tx_skbuff_dma[first_entry].buf = des;
tx_q->tx_skbuff_dma[first_entry].len = skb_headlen(skb); tx_q->tx_skbuff_dma[first_entry].len = skb_headlen(skb);
tx_q->tx_skbuff[first_entry] = skb;
first->des0 = cpu_to_le32(des); first->des0 = cpu_to_le32(des);
@ -2865,6 +2864,14 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
tx_q->tx_skbuff_dma[tx_q->cur_tx].last_segment = true; tx_q->tx_skbuff_dma[tx_q->cur_tx].last_segment = true;
/* Only the last descriptor gets to point to the skb. */
tx_q->tx_skbuff[tx_q->cur_tx] = skb;
/* We've used all descriptors we need for this skb, however,
* advance cur_tx so that it references a fresh descriptor.
* ndo_start_xmit will fill this descriptor the next time it's
* called and stmmac_tx_clean may clean up to this descriptor.
*/
tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE); tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) { if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
@ -2998,8 +3005,6 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
first = desc; first = desc;
tx_q->tx_skbuff[first_entry] = skb;
enh_desc = priv->plat->enh_desc; enh_desc = priv->plat->enh_desc;
/* To program the descriptors according to the size of the frame */ /* To program the descriptors according to the size of the frame */
if (enh_desc) if (enh_desc)
@ -3047,8 +3052,15 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
skb->len); skb->len);
} }
entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE); /* Only the last descriptor gets to point to the skb. */
tx_q->tx_skbuff[entry] = skb;
/* We've used all descriptors we need for this skb, however,
* advance cur_tx so that it references a fresh descriptor.
* ndo_start_xmit will fill this descriptor the next time it's
* called and stmmac_tx_clean may clean up to this descriptor.
*/
entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
tx_q->cur_tx = entry; tx_q->cur_tx = entry;
if (netif_msg_pktdata(priv)) { if (netif_msg_pktdata(priv)) {

View File

@ -442,7 +442,7 @@ struct brcmf_fw {
const char *nvram_name; const char *nvram_name;
u16 domain_nr; u16 domain_nr;
u16 bus_nr; u16 bus_nr;
void (*done)(struct device *dev, const struct firmware *fw, void (*done)(struct device *dev, int err, const struct firmware *fw,
void *nvram_image, u32 nvram_len); void *nvram_image, u32 nvram_len);
}; };
@ -477,52 +477,51 @@ static void brcmf_fw_request_nvram_done(const struct firmware *fw, void *ctx)
if (!nvram && !(fwctx->flags & BRCMF_FW_REQ_NV_OPTIONAL)) if (!nvram && !(fwctx->flags & BRCMF_FW_REQ_NV_OPTIONAL))
goto fail; goto fail;
fwctx->done(fwctx->dev, fwctx->code, nvram, nvram_length); fwctx->done(fwctx->dev, 0, fwctx->code, nvram, nvram_length);
kfree(fwctx); kfree(fwctx);
return; return;
fail: fail:
brcmf_dbg(TRACE, "failed: dev=%s\n", dev_name(fwctx->dev)); brcmf_dbg(TRACE, "failed: dev=%s\n", dev_name(fwctx->dev));
release_firmware(fwctx->code); release_firmware(fwctx->code);
device_release_driver(fwctx->dev); fwctx->done(fwctx->dev, -ENOENT, NULL, NULL, 0);
kfree(fwctx); kfree(fwctx);
} }
static void brcmf_fw_request_code_done(const struct firmware *fw, void *ctx) static void brcmf_fw_request_code_done(const struct firmware *fw, void *ctx)
{ {
struct brcmf_fw *fwctx = ctx; struct brcmf_fw *fwctx = ctx;
int ret; int ret = 0;
brcmf_dbg(TRACE, "enter: dev=%s\n", dev_name(fwctx->dev)); brcmf_dbg(TRACE, "enter: dev=%s\n", dev_name(fwctx->dev));
if (!fw) if (!fw) {
ret = -ENOENT;
goto fail; goto fail;
/* only requested code so done here */
if (!(fwctx->flags & BRCMF_FW_REQUEST_NVRAM)) {
fwctx->done(fwctx->dev, fw, NULL, 0);
kfree(fwctx);
return;
} }
/* only requested code so done here */
if (!(fwctx->flags & BRCMF_FW_REQUEST_NVRAM))
goto done;
fwctx->code = fw; fwctx->code = fw;
ret = request_firmware_nowait(THIS_MODULE, true, fwctx->nvram_name, ret = request_firmware_nowait(THIS_MODULE, true, fwctx->nvram_name,
fwctx->dev, GFP_KERNEL, fwctx, fwctx->dev, GFP_KERNEL, fwctx,
brcmf_fw_request_nvram_done); brcmf_fw_request_nvram_done);
if (!ret) /* pass NULL to nvram callback for bcm47xx fallback */
return; if (ret)
brcmf_fw_request_nvram_done(NULL, fwctx); brcmf_fw_request_nvram_done(NULL, fwctx);
return; return;
fail: fail:
brcmf_dbg(TRACE, "failed: dev=%s\n", dev_name(fwctx->dev)); brcmf_dbg(TRACE, "failed: dev=%s\n", dev_name(fwctx->dev));
device_release_driver(fwctx->dev); done:
fwctx->done(fwctx->dev, ret, fw, NULL, 0);
kfree(fwctx); kfree(fwctx);
} }
int brcmf_fw_get_firmwares_pcie(struct device *dev, u16 flags, int brcmf_fw_get_firmwares_pcie(struct device *dev, u16 flags,
const char *code, const char *nvram, const char *code, const char *nvram,
void (*fw_cb)(struct device *dev, void (*fw_cb)(struct device *dev, int err,
const struct firmware *fw, const struct firmware *fw,
void *nvram_image, u32 nvram_len), void *nvram_image, u32 nvram_len),
u16 domain_nr, u16 bus_nr) u16 domain_nr, u16 bus_nr)
@ -555,7 +554,7 @@ int brcmf_fw_get_firmwares_pcie(struct device *dev, u16 flags,
int brcmf_fw_get_firmwares(struct device *dev, u16 flags, int brcmf_fw_get_firmwares(struct device *dev, u16 flags,
const char *code, const char *nvram, const char *code, const char *nvram,
void (*fw_cb)(struct device *dev, void (*fw_cb)(struct device *dev, int err,
const struct firmware *fw, const struct firmware *fw,
void *nvram_image, u32 nvram_len)) void *nvram_image, u32 nvram_len))
{ {

View File

@ -73,13 +73,13 @@ void brcmf_fw_nvram_free(void *nvram);
*/ */
int brcmf_fw_get_firmwares_pcie(struct device *dev, u16 flags, int brcmf_fw_get_firmwares_pcie(struct device *dev, u16 flags,
const char *code, const char *nvram, const char *code, const char *nvram,
void (*fw_cb)(struct device *dev, void (*fw_cb)(struct device *dev, int err,
const struct firmware *fw, const struct firmware *fw,
void *nvram_image, u32 nvram_len), void *nvram_image, u32 nvram_len),
u16 domain_nr, u16 bus_nr); u16 domain_nr, u16 bus_nr);
int brcmf_fw_get_firmwares(struct device *dev, u16 flags, int brcmf_fw_get_firmwares(struct device *dev, u16 flags,
const char *code, const char *nvram, const char *code, const char *nvram,
void (*fw_cb)(struct device *dev, void (*fw_cb)(struct device *dev, int err,
const struct firmware *fw, const struct firmware *fw,
void *nvram_image, u32 nvram_len)); void *nvram_image, u32 nvram_len));

View File

@ -2145,7 +2145,7 @@ void brcmf_fws_add_interface(struct brcmf_if *ifp)
struct brcmf_fws_info *fws = drvr_to_fws(ifp->drvr); struct brcmf_fws_info *fws = drvr_to_fws(ifp->drvr);
struct brcmf_fws_mac_descriptor *entry; struct brcmf_fws_mac_descriptor *entry;
if (!ifp->ndev || fws->fcmode == BRCMF_FWS_FCMODE_NONE) if (!ifp->ndev || !brcmf_fws_queue_skbs(fws))
return; return;
entry = &fws->desc.iface[ifp->ifidx]; entry = &fws->desc.iface[ifp->ifidx];

View File

@ -1650,16 +1650,23 @@ static const struct brcmf_buscore_ops brcmf_pcie_buscore_ops = {
.write32 = brcmf_pcie_buscore_write32, .write32 = brcmf_pcie_buscore_write32,
}; };
static void brcmf_pcie_setup(struct device *dev, const struct firmware *fw, static void brcmf_pcie_setup(struct device *dev, int ret,
const struct firmware *fw,
void *nvram, u32 nvram_len) void *nvram, u32 nvram_len)
{ {
struct brcmf_bus *bus = dev_get_drvdata(dev); struct brcmf_bus *bus;
struct brcmf_pciedev *pcie_bus_dev = bus->bus_priv.pcie; struct brcmf_pciedev *pcie_bus_dev;
struct brcmf_pciedev_info *devinfo = pcie_bus_dev->devinfo; struct brcmf_pciedev_info *devinfo;
struct brcmf_commonring **flowrings; struct brcmf_commonring **flowrings;
int ret;
u32 i; u32 i;
/* check firmware loading result */
if (ret)
goto fail;
bus = dev_get_drvdata(dev);
pcie_bus_dev = bus->bus_priv.pcie;
devinfo = pcie_bus_dev->devinfo;
brcmf_pcie_attach(devinfo); brcmf_pcie_attach(devinfo);
/* Some of the firmwares have the size of the memory of the device /* Some of the firmwares have the size of the memory of the device

View File

@ -3982,21 +3982,26 @@ static const struct brcmf_bus_ops brcmf_sdio_bus_ops = {
.get_memdump = brcmf_sdio_bus_get_memdump, .get_memdump = brcmf_sdio_bus_get_memdump,
}; };
static void brcmf_sdio_firmware_callback(struct device *dev, static void brcmf_sdio_firmware_callback(struct device *dev, int err,
const struct firmware *code, const struct firmware *code,
void *nvram, u32 nvram_len) void *nvram, u32 nvram_len)
{ {
struct brcmf_bus *bus_if = dev_get_drvdata(dev); struct brcmf_bus *bus_if;
struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio; struct brcmf_sdio_dev *sdiodev;
struct brcmf_sdio *bus = sdiodev->bus; struct brcmf_sdio *bus;
int err = 0;
u8 saveclk; u8 saveclk;
brcmf_dbg(TRACE, "Enter: dev=%s\n", dev_name(dev)); brcmf_dbg(TRACE, "Enter: dev=%s, err=%d\n", dev_name(dev), err);
bus_if = dev_get_drvdata(dev);
sdiodev = bus_if->bus_priv.sdio;
if (err)
goto fail;
if (!bus_if->drvr) if (!bus_if->drvr)
return; return;
bus = sdiodev->bus;
/* try to download image and nvram to the dongle */ /* try to download image and nvram to the dongle */
bus->alp_only = true; bus->alp_only = true;
err = brcmf_sdio_download_firmware(bus, code, nvram, nvram_len); err = brcmf_sdio_download_firmware(bus, code, nvram, nvram_len);
@ -4083,6 +4088,7 @@ static void brcmf_sdio_firmware_callback(struct device *dev,
fail: fail:
brcmf_dbg(TRACE, "failed: dev=%s, err=%d\n", dev_name(dev), err); brcmf_dbg(TRACE, "failed: dev=%s, err=%d\n", dev_name(dev), err);
device_release_driver(dev); device_release_driver(dev);
device_release_driver(&sdiodev->func[2]->dev);
} }
struct brcmf_sdio *brcmf_sdio_probe(struct brcmf_sdio_dev *sdiodev) struct brcmf_sdio *brcmf_sdio_probe(struct brcmf_sdio_dev *sdiodev)

View File

@ -1159,17 +1159,18 @@ static int brcmf_usb_bus_setup(struct brcmf_usbdev_info *devinfo)
return ret; return ret;
} }
static void brcmf_usb_probe_phase2(struct device *dev, static void brcmf_usb_probe_phase2(struct device *dev, int ret,
const struct firmware *fw, const struct firmware *fw,
void *nvram, u32 nvlen) void *nvram, u32 nvlen)
{ {
struct brcmf_bus *bus = dev_get_drvdata(dev); struct brcmf_bus *bus = dev_get_drvdata(dev);
struct brcmf_usbdev_info *devinfo; struct brcmf_usbdev_info *devinfo = bus->bus_priv.usb->devinfo;
int ret;
if (ret)
goto error;
brcmf_dbg(USB, "Start fw downloading\n"); brcmf_dbg(USB, "Start fw downloading\n");
devinfo = bus->bus_priv.usb->devinfo;
ret = check_file(fw->data); ret = check_file(fw->data);
if (ret < 0) { if (ret < 0) {
brcmf_err("invalid firmware\n"); brcmf_err("invalid firmware\n");

View File

@ -495,64 +495,54 @@ static struct irq_chip amd_gpio_irqchip = {
.flags = IRQCHIP_SKIP_SET_WAKE, .flags = IRQCHIP_SKIP_SET_WAKE,
}; };
static void amd_gpio_irq_handler(struct irq_desc *desc) #define PIN_IRQ_PENDING (BIT(INTERRUPT_STS_OFF) | BIT(WAKE_STS_OFF))
static irqreturn_t amd_gpio_irq_handler(int irq, void *dev_id)
{ {
u32 i; struct amd_gpio *gpio_dev = dev_id;
u32 off; struct gpio_chip *gc = &gpio_dev->gc;
u32 reg; irqreturn_t ret = IRQ_NONE;
u32 pin_reg; unsigned int i, irqnr;
u64 reg64;
int handled = 0;
unsigned int irq;
unsigned long flags; unsigned long flags;
struct irq_chip *chip = irq_desc_get_chip(desc); u32 *regs, regval;
struct gpio_chip *gc = irq_desc_get_handler_data(desc); u64 status, mask;
struct amd_gpio *gpio_dev = gpiochip_get_data(gc);
chained_irq_enter(chip, desc); /* Read the wake status */
/*enable GPIO interrupt again*/
raw_spin_lock_irqsave(&gpio_dev->lock, flags); raw_spin_lock_irqsave(&gpio_dev->lock, flags);
reg = readl(gpio_dev->base + WAKE_INT_STATUS_REG1); status = readl(gpio_dev->base + WAKE_INT_STATUS_REG1);
reg64 = reg; status <<= 32;
reg64 = reg64 << 32; status |= readl(gpio_dev->base + WAKE_INT_STATUS_REG0);
reg = readl(gpio_dev->base + WAKE_INT_STATUS_REG0);
reg64 |= reg;
raw_spin_unlock_irqrestore(&gpio_dev->lock, flags); raw_spin_unlock_irqrestore(&gpio_dev->lock, flags);
/* /* Bit 0-45 contain the relevant status bits */
* first 46 bits indicates interrupt status. status &= (1ULL << 46) - 1;
* one bit represents four interrupt sources. regs = gpio_dev->base;
*/ for (mask = 1, irqnr = 0; status; mask <<= 1, regs += 4, irqnr += 4) {
for (off = 0; off < 46 ; off++) { if (!(status & mask))
if (reg64 & BIT(off)) { continue;
status &= ~mask;
/* Each status bit covers four pins */
for (i = 0; i < 4; i++) { for (i = 0; i < 4; i++) {
pin_reg = readl(gpio_dev->base + regval = readl(regs + i);
(off * 4 + i) * 4); if (!(regval & PIN_IRQ_PENDING))
if ((pin_reg & BIT(INTERRUPT_STS_OFF)) || continue;
(pin_reg & BIT(WAKE_STS_OFF))) { irq = irq_find_mapping(gc->irqdomain, irqnr + i);
irq = irq_find_mapping(gc->irqdomain,
off * 4 + i);
generic_handle_irq(irq); generic_handle_irq(irq);
writel(pin_reg, /* Clear interrupt */
gpio_dev->base writel(regval, regs + i);
+ (off * 4 + i) * 4); ret = IRQ_HANDLED;
handled++;
}
}
} }
} }
if (handled == 0) /* Signal EOI to the GPIO unit */
handle_bad_irq(desc);
raw_spin_lock_irqsave(&gpio_dev->lock, flags); raw_spin_lock_irqsave(&gpio_dev->lock, flags);
reg = readl(gpio_dev->base + WAKE_INT_MASTER_REG); regval = readl(gpio_dev->base + WAKE_INT_MASTER_REG);
reg |= EOI_MASK; regval |= EOI_MASK;
writel(reg, gpio_dev->base + WAKE_INT_MASTER_REG); writel(regval, gpio_dev->base + WAKE_INT_MASTER_REG);
raw_spin_unlock_irqrestore(&gpio_dev->lock, flags); raw_spin_unlock_irqrestore(&gpio_dev->lock, flags);
chained_irq_exit(chip, desc); return ret;
} }
static int amd_get_groups_count(struct pinctrl_dev *pctldev) static int amd_get_groups_count(struct pinctrl_dev *pctldev)
@ -821,10 +811,11 @@ static int amd_gpio_probe(struct platform_device *pdev)
goto out2; goto out2;
} }
gpiochip_set_chained_irqchip(&gpio_dev->gc, ret = devm_request_irq(&pdev->dev, irq_base, amd_gpio_irq_handler, 0,
&amd_gpio_irqchip, KBUILD_MODNAME, gpio_dev);
irq_base, if (ret)
amd_gpio_irq_handler); goto out2;
platform_set_drvdata(pdev, gpio_dev); platform_set_drvdata(pdev, gpio_dev);
dev_dbg(&pdev->dev, "amd gpio driver loaded\n"); dev_dbg(&pdev->dev, "amd gpio driver loaded\n");

View File

@ -798,7 +798,7 @@ static int stm32_pconf_parse_conf(struct pinctrl_dev *pctldev,
break; break;
case PIN_CONFIG_OUTPUT: case PIN_CONFIG_OUTPUT:
__stm32_gpio_set(bank, offset, arg); __stm32_gpio_set(bank, offset, arg);
ret = stm32_pmx_gpio_set_direction(pctldev, NULL, pin, false); ret = stm32_pmx_gpio_set_direction(pctldev, range, pin, false);
break; break;
default: default:
ret = -EINVAL; ret = -EINVAL;

View File

@ -870,7 +870,6 @@ static void qedi_process_cmd_cleanup_resp(struct qedi_ctx *qedi,
QEDI_ERR(&qedi->dbg_ctx, QEDI_ERR(&qedi->dbg_ctx,
"Delayed or untracked cleanup response, itt=0x%x, tid=0x%x, cid=0x%x, task=%p\n", "Delayed or untracked cleanup response, itt=0x%x, tid=0x%x, cid=0x%x, task=%p\n",
protoitt, cqe->itid, qedi_conn->iscsi_conn_id, task); protoitt, cqe->itid, qedi_conn->iscsi_conn_id, task);
WARN_ON(1);
} }
} }

View File

@ -1499,11 +1499,9 @@ int qedi_get_task_idx(struct qedi_ctx *qedi)
void qedi_clear_task_idx(struct qedi_ctx *qedi, int idx) void qedi_clear_task_idx(struct qedi_ctx *qedi, int idx)
{ {
if (!test_and_clear_bit(idx, qedi->task_idx_map)) { if (!test_and_clear_bit(idx, qedi->task_idx_map))
QEDI_ERR(&qedi->dbg_ctx, QEDI_ERR(&qedi->dbg_ctx,
"FW task context, already cleared, tid=0x%x\n", idx); "FW task context, already cleared, tid=0x%x\n", idx);
WARN_ON(1);
}
} }
void qedi_update_itt_map(struct qedi_ctx *qedi, u32 tid, u32 proto_itt, void qedi_update_itt_map(struct qedi_ctx *qedi, u32 tid, u32 proto_itt,

View File

@ -344,7 +344,7 @@ static int autofs_dev_ioctl_fail(struct file *fp,
int status; int status;
token = (autofs_wqt_t) param->fail.token; token = (autofs_wqt_t) param->fail.token;
status = param->fail.status ? param->fail.status : -ENOENT; status = param->fail.status < 0 ? param->fail.status : -ENOENT;
return autofs4_wait_release(sbi, token, status); return autofs4_wait_release(sbi, token, status);
} }

View File

@ -3271,7 +3271,7 @@ ssize_t cifs_user_readv(struct kiocb *iocb, struct iov_iter *to)
if (!is_sync_kiocb(iocb)) if (!is_sync_kiocb(iocb))
ctx->iocb = iocb; ctx->iocb = iocb;
if (to->type & ITER_IOVEC) if (to->type == ITER_IOVEC)
ctx->should_dirty = true; ctx->should_dirty = true;
rc = setup_aio_ctx_iter(ctx, to, READ); rc = setup_aio_ctx_iter(ctx, to, READ);

View File

@ -810,7 +810,7 @@ setup_aio_ctx_iter(struct cifs_aio_ctx *ctx, struct iov_iter *iter, int rw)
if (!pages) { if (!pages) {
pages = vmalloc(max_pages * sizeof(struct page *)); pages = vmalloc(max_pages * sizeof(struct page *));
if (!bv) { if (!pages) {
kvfree(bv); kvfree(bv);
return -ENOMEM; return -ENOMEM;
} }

View File

@ -849,8 +849,13 @@ cifs_query_dir_first(const unsigned int xid, struct cifs_tcon *tcon,
struct cifs_fid *fid, __u16 search_flags, struct cifs_fid *fid, __u16 search_flags,
struct cifs_search_info *srch_inf) struct cifs_search_info *srch_inf)
{ {
return CIFSFindFirst(xid, tcon, path, cifs_sb, int rc;
rc = CIFSFindFirst(xid, tcon, path, cifs_sb,
&fid->netfid, search_flags, srch_inf, true); &fid->netfid, search_flags, srch_inf, true);
if (rc)
cifs_dbg(FYI, "find first failed=%d\n", rc);
return rc;
} }
static int static int

View File

@ -982,7 +982,7 @@ smb2_query_dir_first(const unsigned int xid, struct cifs_tcon *tcon,
rc = SMB2_open(xid, &oparms, utf16_path, &oplock, NULL, NULL); rc = SMB2_open(xid, &oparms, utf16_path, &oplock, NULL, NULL);
kfree(utf16_path); kfree(utf16_path);
if (rc) { if (rc) {
cifs_dbg(VFS, "open dir failed\n"); cifs_dbg(FYI, "open dir failed rc=%d\n", rc);
return rc; return rc;
} }
@ -992,7 +992,7 @@ smb2_query_dir_first(const unsigned int xid, struct cifs_tcon *tcon,
rc = SMB2_query_directory(xid, tcon, fid->persistent_fid, rc = SMB2_query_directory(xid, tcon, fid->persistent_fid,
fid->volatile_fid, 0, srch_inf); fid->volatile_fid, 0, srch_inf);
if (rc) { if (rc) {
cifs_dbg(VFS, "query directory failed\n"); cifs_dbg(FYI, "query directory failed rc=%d\n", rc);
SMB2_close(xid, tcon, fid->persistent_fid, fid->volatile_fid); SMB2_close(xid, tcon, fid->persistent_fid, fid->volatile_fid);
} }
return rc; return rc;
@ -1809,7 +1809,8 @@ crypt_message(struct TCP_Server_Info *server, struct smb_rqst *rqst, int enc)
sg = init_sg(rqst, sign); sg = init_sg(rqst, sign);
if (!sg) { if (!sg) {
cifs_dbg(VFS, "%s: Failed to init sg %d", __func__, rc); cifs_dbg(VFS, "%s: Failed to init sg", __func__);
rc = -ENOMEM;
goto free_req; goto free_req;
} }
@ -1817,6 +1818,7 @@ crypt_message(struct TCP_Server_Info *server, struct smb_rqst *rqst, int enc)
iv = kzalloc(iv_len, GFP_KERNEL); iv = kzalloc(iv_len, GFP_KERNEL);
if (!iv) { if (!iv) {
cifs_dbg(VFS, "%s: Failed to alloc IV", __func__); cifs_dbg(VFS, "%s: Failed to alloc IV", __func__);
rc = -ENOMEM;
goto free_sg; goto free_sg;
} }
iv[0] = 3; iv[0] = 3;

View File

@ -188,8 +188,6 @@ static int cifs_creation_time_get(struct dentry *dentry, struct inode *inode,
pcreatetime = (__u64 *)value; pcreatetime = (__u64 *)value;
*pcreatetime = CIFS_I(inode)->createtime; *pcreatetime = CIFS_I(inode)->createtime;
return sizeof(__u64); return sizeof(__u64);
return rc;
} }

View File

@ -859,6 +859,7 @@ int dax_writeback_mapping_range(struct address_space *mapping,
if (ret < 0) if (ret < 0)
goto out; goto out;
} }
start_index = indices[pvec.nr - 1] + 1;
} }
out: out:
put_dax(dax_dev); put_dax(dax_dev);

View File

@ -220,8 +220,26 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
if (write) { if (write) {
unsigned long size = bprm->vma->vm_end - bprm->vma->vm_start; unsigned long size = bprm->vma->vm_end - bprm->vma->vm_start;
unsigned long ptr_size;
struct rlimit *rlim; struct rlimit *rlim;
/*
* Since the stack will hold pointers to the strings, we
* must account for them as well.
*
* The size calculation is the entire vma while each arg page is
* built, so each time we get here it's calculating how far it
* is currently (rather than each call being just the newly
* added size from the arg page). As a result, we need to
* always add the entire size of the pointers, so that on the
* last call to get_arg_page() we'll actually have the entire
* correct size.
*/
ptr_size = (bprm->argc + bprm->envc) * sizeof(void *);
if (ptr_size > ULONG_MAX - size)
goto fail;
size += ptr_size;
acct_arg_size(bprm, size / PAGE_SIZE); acct_arg_size(bprm, size / PAGE_SIZE);
/* /*
@ -239,13 +257,15 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
* to work from. * to work from.
*/ */
rlim = current->signal->rlim; rlim = current->signal->rlim;
if (size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur) / 4) { if (size > READ_ONCE(rlim[RLIMIT_STACK].rlim_cur) / 4)
put_page(page); goto fail;
return NULL;
}
} }
return page; return page;
fail:
put_page(page);
return NULL;
} }
static void put_arg_page(struct page *page) static void put_arg_page(struct page *page)

View File

@ -2591,6 +2591,10 @@ void ocfs2_inode_unlock_tracker(struct inode *inode,
struct ocfs2_lock_res *lockres; struct ocfs2_lock_res *lockres;
lockres = &OCFS2_I(inode)->ip_inode_lockres; lockres = &OCFS2_I(inode)->ip_inode_lockres;
/* had_lock means that the currect process already takes the cluster
* lock previously. If had_lock is 1, we have nothing to do here, and
* it will get unlocked where we got the lock.
*/
if (!had_lock) { if (!had_lock) {
ocfs2_remove_holder(lockres, oh); ocfs2_remove_holder(lockres, oh);
ocfs2_inode_unlock(inode, ex); ocfs2_inode_unlock(inode, ex);

View File

@ -1328,20 +1328,21 @@ static int ocfs2_xattr_get(struct inode *inode,
void *buffer, void *buffer,
size_t buffer_size) size_t buffer_size)
{ {
int ret; int ret, had_lock;
struct buffer_head *di_bh = NULL; struct buffer_head *di_bh = NULL;
struct ocfs2_lock_holder oh;
ret = ocfs2_inode_lock(inode, &di_bh, 0); had_lock = ocfs2_inode_lock_tracker(inode, &di_bh, 0, &oh);
if (ret < 0) { if (had_lock < 0) {
mlog_errno(ret); mlog_errno(had_lock);
return ret; return had_lock;
} }
down_read(&OCFS2_I(inode)->ip_xattr_sem); down_read(&OCFS2_I(inode)->ip_xattr_sem);
ret = ocfs2_xattr_get_nolock(inode, di_bh, name_index, ret = ocfs2_xattr_get_nolock(inode, di_bh, name_index,
name, buffer, buffer_size); name, buffer, buffer_size);
up_read(&OCFS2_I(inode)->ip_xattr_sem); up_read(&OCFS2_I(inode)->ip_xattr_sem);
ocfs2_inode_unlock(inode, 0); ocfs2_inode_unlock_tracker(inode, 0, &oh, had_lock);
brelse(di_bh); brelse(di_bh);
@ -3537,11 +3538,12 @@ int ocfs2_xattr_set(struct inode *inode,
{ {
struct buffer_head *di_bh = NULL; struct buffer_head *di_bh = NULL;
struct ocfs2_dinode *di; struct ocfs2_dinode *di;
int ret, credits, ref_meta = 0, ref_credits = 0; int ret, credits, had_lock, ref_meta = 0, ref_credits = 0;
struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
struct inode *tl_inode = osb->osb_tl_inode; struct inode *tl_inode = osb->osb_tl_inode;
struct ocfs2_xattr_set_ctxt ctxt = { NULL, NULL, NULL, }; struct ocfs2_xattr_set_ctxt ctxt = { NULL, NULL, NULL, };
struct ocfs2_refcount_tree *ref_tree = NULL; struct ocfs2_refcount_tree *ref_tree = NULL;
struct ocfs2_lock_holder oh;
struct ocfs2_xattr_info xi = { struct ocfs2_xattr_info xi = {
.xi_name_index = name_index, .xi_name_index = name_index,
@ -3572,8 +3574,9 @@ int ocfs2_xattr_set(struct inode *inode,
return -ENOMEM; return -ENOMEM;
} }
ret = ocfs2_inode_lock(inode, &di_bh, 1); had_lock = ocfs2_inode_lock_tracker(inode, &di_bh, 1, &oh);
if (ret < 0) { if (had_lock < 0) {
ret = had_lock;
mlog_errno(ret); mlog_errno(ret);
goto cleanup_nolock; goto cleanup_nolock;
} }
@ -3670,7 +3673,7 @@ int ocfs2_xattr_set(struct inode *inode,
if (ret) if (ret)
mlog_errno(ret); mlog_errno(ret);
} }
ocfs2_inode_unlock(inode, 1); ocfs2_inode_unlock_tracker(inode, 1, &oh, had_lock);
cleanup_nolock: cleanup_nolock:
brelse(di_bh); brelse(di_bh);
brelse(xbs.xattr_bh); brelse(xbs.xattr_bh);

View File

@ -455,24 +455,14 @@ u64 ufs_new_fragments(struct inode *inode, void *p, u64 fragment,
/* /*
* allocate new block and move data * allocate new block and move data
*/ */
switch (fs32_to_cpu(sb, usb1->fs_optim)) { if (fs32_to_cpu(sb, usb1->fs_optim) == UFS_OPTSPACE) {
case UFS_OPTSPACE:
request = newcount; request = newcount;
if (uspi->s_minfree < 5 || uspi->cs_total.cs_nffree if (uspi->cs_total.cs_nffree < uspi->s_space_to_time)
> uspi->s_dsize * uspi->s_minfree / (2 * 100))
break;
usb1->fs_optim = cpu_to_fs32(sb, UFS_OPTTIME); usb1->fs_optim = cpu_to_fs32(sb, UFS_OPTTIME);
break; } else {
default:
usb1->fs_optim = cpu_to_fs32(sb, UFS_OPTTIME);
case UFS_OPTTIME:
request = uspi->s_fpb; request = uspi->s_fpb;
if (uspi->cs_total.cs_nffree < uspi->s_dsize * if (uspi->cs_total.cs_nffree > uspi->s_time_to_space)
(uspi->s_minfree - 2) / 100) usb1->fs_optim = cpu_to_fs32(sb, UFS_OPTSPACE);
break;
usb1->fs_optim = cpu_to_fs32(sb, UFS_OPTTIME);
break;
} }
result = ufs_alloc_fragments (inode, cgno, goal, request, err); result = ufs_alloc_fragments (inode, cgno, goal, request, err);
if (result) { if (result) {

View File

@ -566,10 +566,8 @@ static int ufs1_read_inode(struct inode *inode, struct ufs_inode *ufs_inode)
*/ */
inode->i_mode = mode = fs16_to_cpu(sb, ufs_inode->ui_mode); inode->i_mode = mode = fs16_to_cpu(sb, ufs_inode->ui_mode);
set_nlink(inode, fs16_to_cpu(sb, ufs_inode->ui_nlink)); set_nlink(inode, fs16_to_cpu(sb, ufs_inode->ui_nlink));
if (inode->i_nlink == 0) { if (inode->i_nlink == 0)
ufs_error (sb, "ufs_read_inode", "inode %lu has zero nlink\n", inode->i_ino); return -ESTALE;
return -1;
}
/* /*
* Linux now has 32-bit uid and gid, so we can support EFT. * Linux now has 32-bit uid and gid, so we can support EFT.
@ -578,9 +576,9 @@ static int ufs1_read_inode(struct inode *inode, struct ufs_inode *ufs_inode)
i_gid_write(inode, ufs_get_inode_gid(sb, ufs_inode)); i_gid_write(inode, ufs_get_inode_gid(sb, ufs_inode));
inode->i_size = fs64_to_cpu(sb, ufs_inode->ui_size); inode->i_size = fs64_to_cpu(sb, ufs_inode->ui_size);
inode->i_atime.tv_sec = fs32_to_cpu(sb, ufs_inode->ui_atime.tv_sec); inode->i_atime.tv_sec = (signed)fs32_to_cpu(sb, ufs_inode->ui_atime.tv_sec);
inode->i_ctime.tv_sec = fs32_to_cpu(sb, ufs_inode->ui_ctime.tv_sec); inode->i_ctime.tv_sec = (signed)fs32_to_cpu(sb, ufs_inode->ui_ctime.tv_sec);
inode->i_mtime.tv_sec = fs32_to_cpu(sb, ufs_inode->ui_mtime.tv_sec); inode->i_mtime.tv_sec = (signed)fs32_to_cpu(sb, ufs_inode->ui_mtime.tv_sec);
inode->i_mtime.tv_nsec = 0; inode->i_mtime.tv_nsec = 0;
inode->i_atime.tv_nsec = 0; inode->i_atime.tv_nsec = 0;
inode->i_ctime.tv_nsec = 0; inode->i_ctime.tv_nsec = 0;
@ -614,10 +612,8 @@ static int ufs2_read_inode(struct inode *inode, struct ufs2_inode *ufs2_inode)
*/ */
inode->i_mode = mode = fs16_to_cpu(sb, ufs2_inode->ui_mode); inode->i_mode = mode = fs16_to_cpu(sb, ufs2_inode->ui_mode);
set_nlink(inode, fs16_to_cpu(sb, ufs2_inode->ui_nlink)); set_nlink(inode, fs16_to_cpu(sb, ufs2_inode->ui_nlink));
if (inode->i_nlink == 0) { if (inode->i_nlink == 0)
ufs_error (sb, "ufs_read_inode", "inode %lu has zero nlink\n", inode->i_ino); return -ESTALE;
return -1;
}
/* /*
* Linux now has 32-bit uid and gid, so we can support EFT. * Linux now has 32-bit uid and gid, so we can support EFT.
@ -657,7 +653,7 @@ struct inode *ufs_iget(struct super_block *sb, unsigned long ino)
struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi; struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
struct buffer_head * bh; struct buffer_head * bh;
struct inode *inode; struct inode *inode;
int err; int err = -EIO;
UFSD("ENTER, ino %lu\n", ino); UFSD("ENTER, ino %lu\n", ino);
@ -692,9 +688,10 @@ struct inode *ufs_iget(struct super_block *sb, unsigned long ino)
err = ufs1_read_inode(inode, err = ufs1_read_inode(inode,
ufs_inode + ufs_inotofsbo(inode->i_ino)); ufs_inode + ufs_inotofsbo(inode->i_ino));
} }
brelse(bh);
if (err) if (err)
goto bad_inode; goto bad_inode;
inode->i_version++; inode->i_version++;
ufsi->i_lastfrag = ufsi->i_lastfrag =
(inode->i_size + uspi->s_fsize - 1) >> uspi->s_fshift; (inode->i_size + uspi->s_fsize - 1) >> uspi->s_fshift;
@ -703,15 +700,13 @@ struct inode *ufs_iget(struct super_block *sb, unsigned long ino)
ufs_set_inode_ops(inode); ufs_set_inode_ops(inode);
brelse(bh);
UFSD("EXIT\n"); UFSD("EXIT\n");
unlock_new_inode(inode); unlock_new_inode(inode);
return inode; return inode;
bad_inode: bad_inode:
iget_failed(inode); iget_failed(inode);
return ERR_PTR(-EIO); return ERR_PTR(err);
} }
static void ufs1_update_inode(struct inode *inode, struct ufs_inode *ufs_inode) static void ufs1_update_inode(struct inode *inode, struct ufs_inode *ufs_inode)

View File

@ -1210,6 +1210,15 @@ static int ufs_fill_super(struct super_block *sb, void *data, int silent)
uspi->s_root_blocks = mul_u64_u32_div(uspi->s_dsize, uspi->s_root_blocks = mul_u64_u32_div(uspi->s_dsize,
uspi->s_minfree, 100); uspi->s_minfree, 100);
if (uspi->s_minfree <= 5) {
uspi->s_time_to_space = ~0ULL;
uspi->s_space_to_time = 0;
usb1->fs_optim = cpu_to_fs32(sb, UFS_OPTSPACE);
} else {
uspi->s_time_to_space = (uspi->s_root_blocks / 2) + 1;
uspi->s_space_to_time = mul_u64_u32_div(uspi->s_dsize,
uspi->s_minfree - 2, 100) - 1;
}
/* /*
* Compute another frequently used values * Compute another frequently used values

View File

@ -792,6 +792,8 @@ struct ufs_sb_private_info {
__s32 fs_magic; /* filesystem magic */ __s32 fs_magic; /* filesystem magic */
unsigned int s_dirblksize; unsigned int s_dirblksize;
__u64 s_root_blocks; __u64 s_root_blocks;
__u64 s_time_to_space;
__u64 s_space_to_time;
}; };
/* /*

View File

@ -1316,9 +1316,12 @@ xfs_vm_bmap(
* The swap code (ab-)uses ->bmap to get a block mapping and then * The swap code (ab-)uses ->bmap to get a block mapping and then
* bypasseѕ the file system for actual I/O. We really can't allow * bypasseѕ the file system for actual I/O. We really can't allow
* that on reflinks inodes, so we have to skip out here. And yes, * that on reflinks inodes, so we have to skip out here. And yes,
* 0 is the magic code for a bmap error.. * 0 is the magic code for a bmap error.
*
* Since we don't pass back blockdev info, we can't return bmap
* information for rt files either.
*/ */
if (xfs_is_reflink_inode(ip)) if (xfs_is_reflink_inode(ip) || XFS_IS_REALTIME_INODE(ip))
return 0; return 0;
filemap_write_and_wait(mapping); filemap_write_and_wait(mapping);

View File

@ -210,7 +210,8 @@ struct acpi_device_flags {
u32 of_compatible_ok:1; u32 of_compatible_ok:1;
u32 coherent_dma:1; u32 coherent_dma:1;
u32 cca_seen:1; u32 cca_seen:1;
u32 reserved:20; u32 spi_i2c_slave:1;
u32 reserved:19;
}; };
/* File System */ /* File System */

View File

@ -391,6 +391,8 @@ struct request_queue {
int nr_rqs[2]; /* # allocated [a]sync rqs */ int nr_rqs[2]; /* # allocated [a]sync rqs */
int nr_rqs_elvpriv; /* # allocated rqs w/ elvpriv */ int nr_rqs_elvpriv; /* # allocated rqs w/ elvpriv */
atomic_t shared_hctx_restart;
struct blk_queue_stats *stats; struct blk_queue_stats *stats;
struct rq_wb *rq_wb; struct rq_wb *rq_wb;

View File

@ -84,6 +84,7 @@ struct kmem_cache {
int red_left_pad; /* Left redzone padding size */ int red_left_pad; /* Left redzone padding size */
#ifdef CONFIG_SYSFS #ifdef CONFIG_SYSFS
struct kobject kobj; /* For sysfs */ struct kobject kobj; /* For sysfs */
struct work_struct kobj_remove_work;
#endif #endif
#ifdef CONFIG_MEMCG #ifdef CONFIG_MEMCG
struct memcg_cache_params memcg_params; struct memcg_cache_params memcg_params;

View File

@ -6,7 +6,7 @@
struct net; struct net;
#ifdef CONFIG_WEXT_CORE #ifdef CONFIG_WEXT_CORE
int wext_handle_ioctl(struct net *net, struct ifreq *ifr, unsigned int cmd, int wext_handle_ioctl(struct net *net, struct iwreq *iwr, unsigned int cmd,
void __user *arg); void __user *arg);
int compat_wext_handle_ioctl(struct net *net, unsigned int cmd, int compat_wext_handle_ioctl(struct net *net, unsigned int cmd,
unsigned long arg); unsigned long arg);
@ -14,7 +14,7 @@ int compat_wext_handle_ioctl(struct net *net, unsigned int cmd,
struct iw_statistics *get_wireless_stats(struct net_device *dev); struct iw_statistics *get_wireless_stats(struct net_device *dev);
int call_commit_handler(struct net_device *dev); int call_commit_handler(struct net_device *dev);
#else #else
static inline int wext_handle_ioctl(struct net *net, struct ifreq *ifr, unsigned int cmd, static inline int wext_handle_ioctl(struct net *net, struct iwreq *iwr, unsigned int cmd,
void __user *arg) void __user *arg)
{ {
return -EINVAL; return -EINVAL;

View File

@ -59,7 +59,11 @@ static void notrace klp_ftrace_handler(unsigned long ip,
ops = container_of(fops, struct klp_ops, fops); ops = container_of(fops, struct klp_ops, fops);
rcu_read_lock(); /*
* A variant of synchronize_sched() is used to allow patching functions
* where RCU is not watching, see klp_synchronize_transition().
*/
preempt_disable_notrace();
func = list_first_or_null_rcu(&ops->func_stack, struct klp_func, func = list_first_or_null_rcu(&ops->func_stack, struct klp_func,
stack_node); stack_node);
@ -115,7 +119,7 @@ static void notrace klp_ftrace_handler(unsigned long ip,
klp_arch_set_pc(regs, (unsigned long)func->new_func); klp_arch_set_pc(regs, (unsigned long)func->new_func);
unlock: unlock:
rcu_read_unlock(); preempt_enable_notrace();
} }
/* /*

View File

@ -48,6 +48,28 @@ static void klp_transition_work_fn(struct work_struct *work)
} }
static DECLARE_DELAYED_WORK(klp_transition_work, klp_transition_work_fn); static DECLARE_DELAYED_WORK(klp_transition_work, klp_transition_work_fn);
/*
* This function is just a stub to implement a hard force
* of synchronize_sched(). This requires synchronizing
* tasks even in userspace and idle.
*/
static void klp_sync(struct work_struct *work)
{
}
/*
* We allow to patch also functions where RCU is not watching,
* e.g. before user_exit(). We can not rely on the RCU infrastructure
* to do the synchronization. Instead hard force the sched synchronization.
*
* This approach allows to use RCU functions for manipulating func_stack
* safely.
*/
static void klp_synchronize_transition(void)
{
schedule_on_each_cpu(klp_sync);
}
/* /*
* The transition to the target patch state is complete. Clean up the data * The transition to the target patch state is complete. Clean up the data
* structures. * structures.
@ -73,7 +95,7 @@ static void klp_complete_transition(void)
* func->transition gets cleared, the handler may choose a * func->transition gets cleared, the handler may choose a
* removed function. * removed function.
*/ */
synchronize_rcu(); klp_synchronize_transition();
} }
if (klp_transition_patch->immediate) if (klp_transition_patch->immediate)
@ -92,7 +114,7 @@ static void klp_complete_transition(void)
/* Prevent klp_ftrace_handler() from seeing KLP_UNDEFINED state */ /* Prevent klp_ftrace_handler() from seeing KLP_UNDEFINED state */
if (klp_target_state == KLP_PATCHED) if (klp_target_state == KLP_PATCHED)
synchronize_rcu(); klp_synchronize_transition();
read_lock(&tasklist_lock); read_lock(&tasklist_lock);
for_each_process_thread(g, task) { for_each_process_thread(g, task) {
@ -136,7 +158,11 @@ void klp_cancel_transition(void)
*/ */
void klp_update_patch_state(struct task_struct *task) void klp_update_patch_state(struct task_struct *task)
{ {
rcu_read_lock(); /*
* A variant of synchronize_sched() is used to allow patching functions
* where RCU is not watching, see klp_synchronize_transition().
*/
preempt_disable_notrace();
/* /*
* This test_and_clear_tsk_thread_flag() call also serves as a read * This test_and_clear_tsk_thread_flag() call also serves as a read
@ -153,7 +179,7 @@ void klp_update_patch_state(struct task_struct *task)
if (test_and_clear_tsk_thread_flag(task, TIF_PATCH_PENDING)) if (test_and_clear_tsk_thread_flag(task, TIF_PATCH_PENDING))
task->patch_state = READ_ONCE(klp_target_state); task->patch_state = READ_ONCE(klp_target_state);
rcu_read_unlock(); preempt_enable_notrace();
} }
/* /*
@ -539,7 +565,7 @@ void klp_reverse_transition(void)
clear_tsk_thread_flag(idle_task(cpu), TIF_PATCH_PENDING); clear_tsk_thread_flag(idle_task(cpu), TIF_PATCH_PENDING);
/* Let any remaining calls to klp_update_patch_state() complete */ /* Let any remaining calls to klp_update_patch_state() complete */
synchronize_rcu(); klp_synchronize_transition();
klp_start_transition(); klp_start_transition();
} }

View File

@ -23,14 +23,14 @@
* the values[M, M+1, ..., N] into the ints array in get_options. * the values[M, M+1, ..., N] into the ints array in get_options.
*/ */
static int get_range(char **str, int *pint) static int get_range(char **str, int *pint, int n)
{ {
int x, inc_counter, upper_range; int x, inc_counter, upper_range;
(*str)++; (*str)++;
upper_range = simple_strtol((*str), NULL, 0); upper_range = simple_strtol((*str), NULL, 0);
inc_counter = upper_range - *pint; inc_counter = upper_range - *pint;
for (x = *pint; x < upper_range; x++) for (x = *pint; n && x < upper_range; x++, n--)
*pint++ = x; *pint++ = x;
return inc_counter; return inc_counter;
} }
@ -97,7 +97,7 @@ char *get_options(const char *str, int nints, int *ints)
break; break;
if (res == 3) { if (res == 3) {
int range_nums; int range_nums;
range_nums = get_range((char **)&str, ints + i); range_nums = get_range((char **)&str, ints + i, nints - i);
if (range_nums < 0) if (range_nums < 0)
break; break;
/* /*

View File

@ -652,7 +652,6 @@ static void __collapse_huge_page_copy(pte_t *pte, struct page *page,
spin_unlock(ptl); spin_unlock(ptl);
free_page_and_swap_cache(src_page); free_page_and_swap_cache(src_page);
} }
cond_resched();
} }
} }

View File

@ -1817,7 +1817,8 @@ unsigned long unmapped_area(struct vm_unmapped_area_info *info)
/* Check if current node has a suitable gap */ /* Check if current node has a suitable gap */
if (gap_start > high_limit) if (gap_start > high_limit)
return -ENOMEM; return -ENOMEM;
if (gap_end >= low_limit && gap_end - gap_start >= length) if (gap_end >= low_limit &&
gap_end > gap_start && gap_end - gap_start >= length)
goto found; goto found;
/* Visit right subtree if it looks promising */ /* Visit right subtree if it looks promising */
@ -1920,7 +1921,8 @@ unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info)
gap_end = vm_start_gap(vma); gap_end = vm_start_gap(vma);
if (gap_end < low_limit) if (gap_end < low_limit)
return -ENOMEM; return -ENOMEM;
if (gap_start <= high_limit && gap_end - gap_start >= length) if (gap_start <= high_limit &&
gap_end > gap_start && gap_end - gap_start >= length)
goto found; goto found;
/* Visit left subtree if it looks promising */ /* Visit left subtree if it looks promising */
@ -2228,16 +2230,19 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
if (!(vma->vm_flags & VM_GROWSUP)) if (!(vma->vm_flags & VM_GROWSUP))
return -EFAULT; return -EFAULT;
/* Guard against wrapping around to address 0. */ /* Guard against exceeding limits of the address space. */
address &= PAGE_MASK; address &= PAGE_MASK;
address += PAGE_SIZE; if (address >= TASK_SIZE)
if (!address)
return -ENOMEM; return -ENOMEM;
address += PAGE_SIZE;
/* Enforce stack_guard_gap */ /* Enforce stack_guard_gap */
gap_addr = address + stack_guard_gap; gap_addr = address + stack_guard_gap;
if (gap_addr < address)
return -ENOMEM; /* Guard against overflow */
if (gap_addr < address || gap_addr > TASK_SIZE)
gap_addr = TASK_SIZE;
next = vma->vm_next; next = vma->vm_next;
if (next && next->vm_start < gap_addr) { if (next && next->vm_start < gap_addr) {
if (!(next->vm_flags & VM_GROWSUP)) if (!(next->vm_flags & VM_GROWSUP))

View File

@ -5625,6 +5625,28 @@ static char *create_unique_id(struct kmem_cache *s)
return name; return name;
} }
static void sysfs_slab_remove_workfn(struct work_struct *work)
{
struct kmem_cache *s =
container_of(work, struct kmem_cache, kobj_remove_work);
if (!s->kobj.state_in_sysfs)
/*
* For a memcg cache, this may be called during
* deactivation and again on shutdown. Remove only once.
* A cache is never shut down before deactivation is
* complete, so no need to worry about synchronization.
*/
return;
#ifdef CONFIG_MEMCG
kset_unregister(s->memcg_kset);
#endif
kobject_uevent(&s->kobj, KOBJ_REMOVE);
kobject_del(&s->kobj);
kobject_put(&s->kobj);
}
static int sysfs_slab_add(struct kmem_cache *s) static int sysfs_slab_add(struct kmem_cache *s)
{ {
int err; int err;
@ -5632,6 +5654,8 @@ static int sysfs_slab_add(struct kmem_cache *s)
struct kset *kset = cache_kset(s); struct kset *kset = cache_kset(s);
int unmergeable = slab_unmergeable(s); int unmergeable = slab_unmergeable(s);
INIT_WORK(&s->kobj_remove_work, sysfs_slab_remove_workfn);
if (!kset) { if (!kset) {
kobject_init(&s->kobj, &slab_ktype); kobject_init(&s->kobj, &slab_ktype);
return 0; return 0;
@ -5695,20 +5719,8 @@ static void sysfs_slab_remove(struct kmem_cache *s)
*/ */
return; return;
if (!s->kobj.state_in_sysfs) kobject_get(&s->kobj);
/* schedule_work(&s->kobj_remove_work);
* For a memcg cache, this may be called during
* deactivation and again on shutdown. Remove only once.
* A cache is never shut down before deactivation is
* complete, so no need to worry about synchronization.
*/
return;
#ifdef CONFIG_MEMCG
kset_unregister(s->memcg_kset);
#endif
kobject_uevent(&s->kobj, KOBJ_REMOVE);
kobject_del(&s->kobj);
} }
void sysfs_slab_release(struct kmem_cache *s) void sysfs_slab_release(struct kmem_cache *s)

View File

@ -287,10 +287,21 @@ struct page *vmalloc_to_page(const void *vmalloc_addr)
if (p4d_none(*p4d)) if (p4d_none(*p4d))
return NULL; return NULL;
pud = pud_offset(p4d, addr); pud = pud_offset(p4d, addr);
if (pud_none(*pud))
/*
* Don't dereference bad PUD or PMD (below) entries. This will also
* identify huge mappings, which we may encounter on architectures
* that define CONFIG_HAVE_ARCH_HUGE_VMAP=y. Such regions will be
* identified as vmalloc addresses by is_vmalloc_addr(), but are
* not [unambiguously] associated with a struct page, so there is
* no correct value to return for them.
*/
WARN_ON_ONCE(pud_bad(*pud));
if (pud_none(*pud) || pud_bad(*pud))
return NULL; return NULL;
pmd = pmd_offset(pud, addr); pmd = pmd_offset(pud, addr);
if (pmd_none(*pmd)) WARN_ON_ONCE(pmd_bad(*pmd));
if (pmd_none(*pmd) || pmd_bad(*pmd))
return NULL; return NULL;
ptep = pte_offset_map(pmd, addr); ptep = pte_offset_map(pmd, addr);

View File

@ -277,6 +277,7 @@ static int register_vlan_device(struct net_device *real_dev, u16 vlan_id)
return 0; return 0;
out_free_newdev: out_free_newdev:
if (new_dev->reg_state == NETREG_UNINITIALIZED)
free_netdev(new_dev); free_netdev(new_dev);
return err; return err;
} }

View File

@ -5206,8 +5206,6 @@ static void busy_poll_stop(struct napi_struct *napi, void *have_poll_lock)
if (rc == BUSY_POLL_BUDGET) if (rc == BUSY_POLL_BUDGET)
__napi_schedule(napi); __napi_schedule(napi);
local_bh_enable(); local_bh_enable();
if (local_softirq_pending())
do_softirq();
} }
void napi_busy_loop(unsigned int napi_id, void napi_busy_loop(unsigned int napi_id,

Some files were not shown because too many files have changed in this diff Show More