Linux 5.15-rc5
-----BEGIN PGP SIGNATURE----- iQFSBAABCAA8FiEEq68RxlopcLEwq+PEeb4+QwBBGIYFAmFjfvceHHRvcnZhbGRz QGxpbnV4LWZvdW5kYXRpb24ub3JnAAoJEHm+PkMAQRiGxF0IAJMmDpKux9gncV8+ Lm3nB6+V5IkXNdp/Az0DOcObCf/j2tEP1DM+N4JAuOqqVh/LUjweN2Ev02TQQ++V oObOhlcoLXcNX7RXtxzvOO8FJAkQskD2tnhUWMZ+RPO8r5Nz1swMkTfD3owIrbrn npMg22qEcAK49V9TR7mMEfyKrL46CRv/M2K235o+13xf4D+CW2s+UCT1xWK3TaD/ mCce7g2xAbQFltKyF7cP1stwzHZVP5U1FxStCUHRVkIQuMNpgrAmW0ihIpvf5f69 B6kA04faj1bbqrNHpYeZA/6qlrCyivUdnSK/oV9j0cMvteD55TdBbWfUP7KXMWbn l5ZzpU8= =hoF/ -----END PGP SIGNATURE----- Merge 'v5.15-rc5' into 'android-mainline' Linux 5.15-rc5 Signed-off-by: Greg Kroah-Hartman <gregkh@google.com> Change-Id: I8e4f9c30e55dd0083e1e659c6485be90ddde1b8a
This commit is contained in:
commit
1f1e86960b
2
Makefile
2
Makefile
|
@ -2,7 +2,7 @@
|
|||
VERSION = 5
|
||||
PATCHLEVEL = 15
|
||||
SUBLEVEL = 0
|
||||
EXTRAVERSION = -rc4
|
||||
EXTRAVERSION = -rc5
|
||||
NAME = Opossums on Parade
|
||||
|
||||
# *DOCUMENTATION*
|
||||
|
|
|
@ -136,6 +136,14 @@ static inline void kuap_kernel_restore(struct pt_regs *regs, unsigned long kuap)
|
|||
if (kuap_is_disabled())
|
||||
return;
|
||||
|
||||
if (unlikely(kuap != KUAP_NONE)) {
|
||||
current->thread.kuap = KUAP_NONE;
|
||||
kuap_lock(kuap, false);
|
||||
}
|
||||
|
||||
if (likely(regs->kuap == KUAP_NONE))
|
||||
return;
|
||||
|
||||
current->thread.kuap = regs->kuap;
|
||||
|
||||
kuap_unlock(regs->kuap, false);
|
||||
|
|
|
@ -23,6 +23,7 @@
|
|||
#define BRANCH_ABSOLUTE 0x2
|
||||
|
||||
bool is_offset_in_branch_range(long offset);
|
||||
bool is_offset_in_cond_branch_range(long offset);
|
||||
int create_branch(struct ppc_inst *instr, const u32 *addr,
|
||||
unsigned long target, int flags);
|
||||
int create_cond_branch(struct ppc_inst *instr, const u32 *addr,
|
||||
|
|
|
@ -265,13 +265,16 @@ static inline void interrupt_nmi_enter_prepare(struct pt_regs *regs, struct inte
|
|||
local_paca->irq_soft_mask = IRQS_ALL_DISABLED;
|
||||
local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
|
||||
|
||||
if (is_implicit_soft_masked(regs)) {
|
||||
// Adjust regs->softe soft implicit soft-mask, so
|
||||
// arch_irq_disabled_regs(regs) behaves as expected.
|
||||
if (!(regs->msr & MSR_EE) || is_implicit_soft_masked(regs)) {
|
||||
/*
|
||||
* Adjust regs->softe to be soft-masked if it had not been
|
||||
* reconcied (e.g., interrupt entry with MSR[EE]=0 but softe
|
||||
* not yet set disabled), or if it was in an implicit soft
|
||||
* masked state. This makes arch_irq_disabled_regs(regs)
|
||||
* behave as expected.
|
||||
*/
|
||||
regs->softe = IRQS_ALL_DISABLED;
|
||||
}
|
||||
if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG))
|
||||
BUG_ON(!arch_irq_disabled_regs(regs) && !(regs->msr & MSR_EE));
|
||||
|
||||
/* Don't do any per-CPU operations until interrupt state is fixed */
|
||||
|
||||
|
@ -525,10 +528,9 @@ static __always_inline long ____##func(struct pt_regs *regs)
|
|||
/* kernel/traps.c */
|
||||
DECLARE_INTERRUPT_HANDLER_NMI(system_reset_exception);
|
||||
#ifdef CONFIG_PPC_BOOK3S_64
|
||||
DECLARE_INTERRUPT_HANDLER_ASYNC(machine_check_exception);
|
||||
#else
|
||||
DECLARE_INTERRUPT_HANDLER_NMI(machine_check_exception);
|
||||
DECLARE_INTERRUPT_HANDLER_ASYNC(machine_check_exception_async);
|
||||
#endif
|
||||
DECLARE_INTERRUPT_HANDLER_NMI(machine_check_exception);
|
||||
DECLARE_INTERRUPT_HANDLER(SMIException);
|
||||
DECLARE_INTERRUPT_HANDLER(handle_hmi_exception);
|
||||
DECLARE_INTERRUPT_HANDLER(unknown_exception);
|
||||
|
|
|
@ -39,6 +39,11 @@ static inline bool security_ftr_enabled(u64 feature)
|
|||
return !!(powerpc_security_features & feature);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PPC_BOOK3S_64
|
||||
enum stf_barrier_type stf_barrier_type_get(void);
|
||||
#else
|
||||
static inline enum stf_barrier_type stf_barrier_type_get(void) { return STF_BARRIER_NONE; }
|
||||
#endif
|
||||
|
||||
// Features indicating support for Spectre/Meltdown mitigations
|
||||
|
||||
|
|
|
@ -184,6 +184,15 @@ u64 dma_iommu_get_required_mask(struct device *dev)
|
|||
struct iommu_table *tbl = get_iommu_table_base(dev);
|
||||
u64 mask;
|
||||
|
||||
if (dev_is_pci(dev)) {
|
||||
u64 bypass_mask = dma_direct_get_required_mask(dev);
|
||||
|
||||
if (dma_iommu_dma_supported(dev, bypass_mask)) {
|
||||
dev_info(dev, "%s: returning bypass mask 0x%llx\n", __func__, bypass_mask);
|
||||
return bypass_mask;
|
||||
}
|
||||
}
|
||||
|
||||
if (!tbl)
|
||||
return 0;
|
||||
|
||||
|
|
|
@ -1243,7 +1243,7 @@ EXC_COMMON_BEGIN(machine_check_common)
|
|||
li r10,MSR_RI
|
||||
mtmsrd r10,1
|
||||
addi r3,r1,STACK_FRAME_OVERHEAD
|
||||
bl machine_check_exception
|
||||
bl machine_check_exception_async
|
||||
b interrupt_return_srr
|
||||
|
||||
|
||||
|
@ -1303,7 +1303,11 @@ END_FTR_SECTION_IFSET(CPU_FTR_HVMODE)
|
|||
subi r12,r12,1
|
||||
sth r12,PACA_IN_MCE(r13)
|
||||
|
||||
/* Invoke machine_check_exception to print MCE event and panic. */
|
||||
/*
|
||||
* Invoke machine_check_exception to print MCE event and panic.
|
||||
* This is the NMI version of the handler because we are called from
|
||||
* the early handler which is a true NMI.
|
||||
*/
|
||||
addi r3,r1,STACK_FRAME_OVERHEAD
|
||||
bl machine_check_exception
|
||||
|
||||
|
@ -1665,27 +1669,30 @@ EXC_COMMON_BEGIN(program_check_common)
|
|||
*/
|
||||
|
||||
andi. r10,r12,MSR_PR
|
||||
bne 2f /* If userspace, go normal path */
|
||||
bne .Lnormal_stack /* If userspace, go normal path */
|
||||
|
||||
andis. r10,r12,(SRR1_PROGTM)@h
|
||||
bne 1f /* If TM, emergency */
|
||||
bne .Lemergency_stack /* If TM, emergency */
|
||||
|
||||
cmpdi r1,-INT_FRAME_SIZE /* check if r1 is in userspace */
|
||||
blt 2f /* normal path if not */
|
||||
blt .Lnormal_stack /* normal path if not */
|
||||
|
||||
/* Use the emergency stack */
|
||||
1: andi. r10,r12,MSR_PR /* Set CR0 correctly for label */
|
||||
.Lemergency_stack:
|
||||
andi. r10,r12,MSR_PR /* Set CR0 correctly for label */
|
||||
/* 3 in EXCEPTION_PROLOG_COMMON */
|
||||
mr r10,r1 /* Save r1 */
|
||||
ld r1,PACAEMERGSP(r13) /* Use emergency stack */
|
||||
subi r1,r1,INT_FRAME_SIZE /* alloc stack frame */
|
||||
__ISTACK(program_check)=0
|
||||
__GEN_COMMON_BODY program_check
|
||||
b 3f
|
||||
2:
|
||||
b .Ldo_program_check
|
||||
|
||||
.Lnormal_stack:
|
||||
__ISTACK(program_check)=1
|
||||
__GEN_COMMON_BODY program_check
|
||||
3:
|
||||
|
||||
.Ldo_program_check:
|
||||
addi r3,r1,STACK_FRAME_OVERHEAD
|
||||
bl program_check_exception
|
||||
REST_NVGPRS(r1) /* instruction emulation may change GPRs */
|
||||
|
|
|
@ -229,6 +229,9 @@ notrace void arch_local_irq_restore(unsigned long mask)
|
|||
return;
|
||||
}
|
||||
|
||||
if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG))
|
||||
WARN_ON_ONCE(in_nmi() || in_hardirq());
|
||||
|
||||
/*
|
||||
* After the stb, interrupts are unmasked and there are no interrupts
|
||||
* pending replay. The restart sequence makes this atomic with
|
||||
|
@ -321,6 +324,9 @@ notrace void arch_local_irq_restore(unsigned long mask)
|
|||
if (mask)
|
||||
return;
|
||||
|
||||
if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG))
|
||||
WARN_ON_ONCE(in_nmi() || in_hardirq());
|
||||
|
||||
/*
|
||||
* From this point onward, we can take interrupts, preempt,
|
||||
* etc... unless we got hard-disabled. We check if an event
|
||||
|
|
|
@ -263,6 +263,11 @@ static int __init handle_no_stf_barrier(char *p)
|
|||
|
||||
early_param("no_stf_barrier", handle_no_stf_barrier);
|
||||
|
||||
enum stf_barrier_type stf_barrier_type_get(void)
|
||||
{
|
||||
return stf_enabled_flush_types;
|
||||
}
|
||||
|
||||
/* This is the generic flag used by other architectures */
|
||||
static int __init handle_ssbd(char *p)
|
||||
{
|
||||
|
|
|
@ -340,10 +340,16 @@ static bool exception_common(int signr, struct pt_regs *regs, int code,
|
|||
return false;
|
||||
}
|
||||
|
||||
show_signal_msg(signr, regs, code, addr);
|
||||
/*
|
||||
* Must not enable interrupts even for user-mode exception, because
|
||||
* this can be called from machine check, which may be a NMI or IRQ
|
||||
* which don't like interrupts being enabled. Could check for
|
||||
* in_hardirq || in_nmi perhaps, but there doesn't seem to be a good
|
||||
* reason why _exception() should enable irqs for an exception handler,
|
||||
* the handlers themselves do that directly.
|
||||
*/
|
||||
|
||||
if (arch_irqs_disabled())
|
||||
interrupt_cond_local_irq_enable(regs);
|
||||
show_signal_msg(signr, regs, code, addr);
|
||||
|
||||
current->thread.trap_nr = code;
|
||||
|
||||
|
@ -790,24 +796,22 @@ void die_mce(const char *str, struct pt_regs *regs, long err)
|
|||
* do_exit() checks for in_interrupt() and panics in that case, so
|
||||
* exit the irq/nmi before calling die.
|
||||
*/
|
||||
if (IS_ENABLED(CONFIG_PPC_BOOK3S_64))
|
||||
irq_exit();
|
||||
else
|
||||
if (in_nmi())
|
||||
nmi_exit();
|
||||
else
|
||||
irq_exit();
|
||||
die(str, regs, err);
|
||||
}
|
||||
|
||||
/*
|
||||
* BOOK3S_64 does not call this handler as a non-maskable interrupt
|
||||
* BOOK3S_64 does not usually call this handler as a non-maskable interrupt
|
||||
* (it uses its own early real-mode handler to handle the MCE proper
|
||||
* and then raises irq_work to call this handler when interrupts are
|
||||
* enabled).
|
||||
* enabled). The only time when this is not true is if the early handler
|
||||
* is unrecoverable, then it does call this directly to try to get a
|
||||
* message out.
|
||||
*/
|
||||
#ifdef CONFIG_PPC_BOOK3S_64
|
||||
DEFINE_INTERRUPT_HANDLER_ASYNC(machine_check_exception)
|
||||
#else
|
||||
DEFINE_INTERRUPT_HANDLER_NMI(machine_check_exception)
|
||||
#endif
|
||||
static void __machine_check_exception(struct pt_regs *regs)
|
||||
{
|
||||
int recover = 0;
|
||||
|
||||
|
@ -841,12 +845,19 @@ DEFINE_INTERRUPT_HANDLER_NMI(machine_check_exception)
|
|||
/* Must die if the interrupt is not recoverable */
|
||||
if (regs_is_unrecoverable(regs))
|
||||
die_mce("Unrecoverable Machine check", regs, SIGBUS);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PPC_BOOK3S_64
|
||||
return;
|
||||
#else
|
||||
return 0;
|
||||
DEFINE_INTERRUPT_HANDLER_ASYNC(machine_check_exception_async)
|
||||
{
|
||||
__machine_check_exception(regs);
|
||||
}
|
||||
#endif
|
||||
DEFINE_INTERRUPT_HANDLER_NMI(machine_check_exception)
|
||||
{
|
||||
__machine_check_exception(regs);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
DEFINE_INTERRUPT_HANDLER(SMIException) /* async? */
|
||||
|
|
|
@ -228,6 +228,11 @@ bool is_offset_in_branch_range(long offset)
|
|||
return (offset >= -0x2000000 && offset <= 0x1fffffc && !(offset & 0x3));
|
||||
}
|
||||
|
||||
bool is_offset_in_cond_branch_range(long offset)
|
||||
{
|
||||
return offset >= -0x8000 && offset <= 0x7fff && !(offset & 0x3);
|
||||
}
|
||||
|
||||
/*
|
||||
* Helper to check if a given instruction is a conditional branch
|
||||
* Derived from the conditional checks in analyse_instr()
|
||||
|
@ -280,7 +285,7 @@ int create_cond_branch(struct ppc_inst *instr, const u32 *addr,
|
|||
offset = offset - (unsigned long)addr;
|
||||
|
||||
/* Check we can represent the target in the instruction format */
|
||||
if (offset < -0x8000 || offset > 0x7FFF || offset & 0x3)
|
||||
if (!is_offset_in_cond_branch_range(offset))
|
||||
return 1;
|
||||
|
||||
/* Mask out the flags and target, so they don't step on each other. */
|
||||
|
|
|
@ -24,16 +24,30 @@
|
|||
#define EMIT(instr) PLANT_INSTR(image, ctx->idx, instr)
|
||||
|
||||
/* Long jump; (unconditional 'branch') */
|
||||
#define PPC_JMP(dest) EMIT(PPC_INST_BRANCH | \
|
||||
(((dest) - (ctx->idx * 4)) & 0x03fffffc))
|
||||
#define PPC_JMP(dest) \
|
||||
do { \
|
||||
long offset = (long)(dest) - (ctx->idx * 4); \
|
||||
if (!is_offset_in_branch_range(offset)) { \
|
||||
pr_err_ratelimited("Branch offset 0x%lx (@%u) out of range\n", offset, ctx->idx); \
|
||||
return -ERANGE; \
|
||||
} \
|
||||
EMIT(PPC_INST_BRANCH | (offset & 0x03fffffc)); \
|
||||
} while (0)
|
||||
|
||||
/* blr; (unconditional 'branch' with link) to absolute address */
|
||||
#define PPC_BL_ABS(dest) EMIT(PPC_INST_BL | \
|
||||
(((dest) - (unsigned long)(image + ctx->idx)) & 0x03fffffc))
|
||||
/* "cond" here covers BO:BI fields. */
|
||||
#define PPC_BCC_SHORT(cond, dest) EMIT(PPC_INST_BRANCH_COND | \
|
||||
(((cond) & 0x3ff) << 16) | \
|
||||
(((dest) - (ctx->idx * 4)) & \
|
||||
0xfffc))
|
||||
#define PPC_BCC_SHORT(cond, dest) \
|
||||
do { \
|
||||
long offset = (long)(dest) - (ctx->idx * 4); \
|
||||
if (!is_offset_in_cond_branch_range(offset)) { \
|
||||
pr_err_ratelimited("Conditional branch offset 0x%lx (@%u) out of range\n", offset, ctx->idx); \
|
||||
return -ERANGE; \
|
||||
} \
|
||||
EMIT(PPC_INST_BRANCH_COND | (((cond) & 0x3ff) << 16) | (offset & 0xfffc)); \
|
||||
} while (0)
|
||||
|
||||
/* Sign-extended 32-bit immediate load */
|
||||
#define PPC_LI32(d, i) do { \
|
||||
if ((int)(uintptr_t)(i) >= -32768 && \
|
||||
|
@ -78,11 +92,6 @@
|
|||
#define PPC_FUNC_ADDR(d,i) do { PPC_LI32(d, i); } while(0)
|
||||
#endif
|
||||
|
||||
static inline bool is_nearbranch(int offset)
|
||||
{
|
||||
return (offset < 32768) && (offset >= -32768);
|
||||
}
|
||||
|
||||
/*
|
||||
* The fly in the ointment of code size changing from pass to pass is
|
||||
* avoided by padding the short branch case with a NOP. If code size differs
|
||||
|
@ -91,7 +100,7 @@ static inline bool is_nearbranch(int offset)
|
|||
* state.
|
||||
*/
|
||||
#define PPC_BCC(cond, dest) do { \
|
||||
if (is_nearbranch((dest) - (ctx->idx * 4))) { \
|
||||
if (is_offset_in_cond_branch_range((long)(dest) - (ctx->idx * 4))) { \
|
||||
PPC_BCC_SHORT(cond, dest); \
|
||||
EMIT(PPC_RAW_NOP()); \
|
||||
} else { \
|
||||
|
|
|
@ -16,18 +16,18 @@
|
|||
* with our redzone usage.
|
||||
*
|
||||
* [ prev sp ] <-------------
|
||||
* [ nv gpr save area ] 6*8 |
|
||||
* [ nv gpr save area ] 5*8 |
|
||||
* [ tail_call_cnt ] 8 |
|
||||
* [ local_tmp_var ] 8 |
|
||||
* [ local_tmp_var ] 16 |
|
||||
* fp (r31) --> [ ebpf stack space ] upto 512 |
|
||||
* [ frame header ] 32/112 |
|
||||
* sp (r1) ---> [ stack pointer ] --------------
|
||||
*/
|
||||
|
||||
/* for gpr non volatile registers BPG_REG_6 to 10 */
|
||||
#define BPF_PPC_STACK_SAVE (6*8)
|
||||
#define BPF_PPC_STACK_SAVE (5*8)
|
||||
/* for bpf JIT code internal usage */
|
||||
#define BPF_PPC_STACK_LOCALS 16
|
||||
#define BPF_PPC_STACK_LOCALS 24
|
||||
/* stack frame excluding BPF stack, ensure this is quadword aligned */
|
||||
#define BPF_PPC_STACKFRAME (STACK_FRAME_MIN_SIZE + \
|
||||
BPF_PPC_STACK_LOCALS + BPF_PPC_STACK_SAVE)
|
||||
|
|
|
@ -210,7 +210,11 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp)
|
|||
/* Now build the prologue, body code & epilogue for real. */
|
||||
cgctx.idx = 0;
|
||||
bpf_jit_build_prologue(code_base, &cgctx);
|
||||
bpf_jit_build_body(fp, code_base, &cgctx, addrs, extra_pass);
|
||||
if (bpf_jit_build_body(fp, code_base, &cgctx, addrs, extra_pass)) {
|
||||
bpf_jit_binary_free(bpf_hdr);
|
||||
fp = org_fp;
|
||||
goto out_addrs;
|
||||
}
|
||||
bpf_jit_build_epilogue(code_base, &cgctx);
|
||||
|
||||
if (bpf_jit_enable > 1)
|
||||
|
|
|
@ -200,7 +200,7 @@ void bpf_jit_emit_func_call_rel(u32 *image, struct codegen_context *ctx, u64 fun
|
|||
}
|
||||
}
|
||||
|
||||
static void bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32 out)
|
||||
static int bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32 out)
|
||||
{
|
||||
/*
|
||||
* By now, the eBPF program has already setup parameters in r3-r6
|
||||
|
@ -261,7 +261,9 @@ static void bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32
|
|||
bpf_jit_emit_common_epilogue(image, ctx);
|
||||
|
||||
EMIT(PPC_RAW_BCTR());
|
||||
|
||||
/* out: */
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Assemble the body code between the prologue & epilogue */
|
||||
|
@ -355,7 +357,7 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, struct codegen_context *
|
|||
PPC_LI32(_R0, imm);
|
||||
EMIT(PPC_RAW_ADDC(dst_reg, dst_reg, _R0));
|
||||
}
|
||||
if (imm >= 0)
|
||||
if (imm >= 0 || (BPF_OP(code) == BPF_SUB && imm == 0x80000000))
|
||||
EMIT(PPC_RAW_ADDZE(dst_reg_h, dst_reg_h));
|
||||
else
|
||||
EMIT(PPC_RAW_ADDME(dst_reg_h, dst_reg_h));
|
||||
|
@ -623,7 +625,7 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, struct codegen_context *
|
|||
EMIT(PPC_RAW_LI(dst_reg_h, 0));
|
||||
break;
|
||||
case BPF_ALU | BPF_ARSH | BPF_X: /* (s32) dst >>= src */
|
||||
EMIT(PPC_RAW_SRAW(dst_reg_h, dst_reg, src_reg));
|
||||
EMIT(PPC_RAW_SRAW(dst_reg, dst_reg, src_reg));
|
||||
break;
|
||||
case BPF_ALU64 | BPF_ARSH | BPF_X: /* (s64) dst >>= src */
|
||||
bpf_set_seen_register(ctx, tmp_reg);
|
||||
|
@ -1073,7 +1075,7 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, struct codegen_context *
|
|||
break;
|
||||
case BPF_JMP32 | BPF_JSET | BPF_K:
|
||||
/* andi does not sign-extend the immediate */
|
||||
if (imm >= -32768 && imm < 32768) {
|
||||
if (imm >= 0 && imm < 32768) {
|
||||
/* PPC_ANDI is _only/always_ dot-form */
|
||||
EMIT(PPC_RAW_ANDI(_R0, dst_reg, imm));
|
||||
} else {
|
||||
|
@ -1090,7 +1092,9 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, struct codegen_context *
|
|||
*/
|
||||
case BPF_JMP | BPF_TAIL_CALL:
|
||||
ctx->seen |= SEEN_TAILCALL;
|
||||
bpf_jit_emit_tail_call(image, ctx, addrs[i + 1]);
|
||||
ret = bpf_jit_emit_tail_call(image, ctx, addrs[i + 1]);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
break;
|
||||
|
||||
default:
|
||||
|
@ -1103,7 +1107,7 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, struct codegen_context *
|
|||
return -EOPNOTSUPP;
|
||||
}
|
||||
if (BPF_CLASS(code) == BPF_ALU && !fp->aux->verifier_zext &&
|
||||
!insn_is_zext(&insn[i + 1]))
|
||||
!insn_is_zext(&insn[i + 1]) && !(BPF_OP(code) == BPF_END && imm == 64))
|
||||
EMIT(PPC_RAW_LI(dst_reg_h, 0));
|
||||
}
|
||||
|
||||
|
|
|
@ -15,6 +15,7 @@
|
|||
#include <linux/if_vlan.h>
|
||||
#include <asm/kprobes.h>
|
||||
#include <linux/bpf.h>
|
||||
#include <asm/security_features.h>
|
||||
|
||||
#include "bpf_jit64.h"
|
||||
|
||||
|
@ -35,9 +36,9 @@ static inline bool bpf_has_stack_frame(struct codegen_context *ctx)
|
|||
* [ prev sp ] <-------------
|
||||
* [ ... ] |
|
||||
* sp (r1) ---> [ stack pointer ] --------------
|
||||
* [ nv gpr save area ] 6*8
|
||||
* [ nv gpr save area ] 5*8
|
||||
* [ tail_call_cnt ] 8
|
||||
* [ local_tmp_var ] 8
|
||||
* [ local_tmp_var ] 16
|
||||
* [ unused red zone ] 208 bytes protected
|
||||
*/
|
||||
static int bpf_jit_stack_local(struct codegen_context *ctx)
|
||||
|
@ -45,12 +46,12 @@ static int bpf_jit_stack_local(struct codegen_context *ctx)
|
|||
if (bpf_has_stack_frame(ctx))
|
||||
return STACK_FRAME_MIN_SIZE + ctx->stack_size;
|
||||
else
|
||||
return -(BPF_PPC_STACK_SAVE + 16);
|
||||
return -(BPF_PPC_STACK_SAVE + 24);
|
||||
}
|
||||
|
||||
static int bpf_jit_stack_tailcallcnt(struct codegen_context *ctx)
|
||||
{
|
||||
return bpf_jit_stack_local(ctx) + 8;
|
||||
return bpf_jit_stack_local(ctx) + 16;
|
||||
}
|
||||
|
||||
static int bpf_jit_stack_offsetof(struct codegen_context *ctx, int reg)
|
||||
|
@ -206,7 +207,7 @@ void bpf_jit_emit_func_call_rel(u32 *image, struct codegen_context *ctx, u64 fun
|
|||
EMIT(PPC_RAW_BCTRL());
|
||||
}
|
||||
|
||||
static void bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32 out)
|
||||
static int bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32 out)
|
||||
{
|
||||
/*
|
||||
* By now, the eBPF program has already setup parameters in r3, r4 and r5
|
||||
|
@ -267,13 +268,38 @@ static void bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32
|
|||
bpf_jit_emit_common_epilogue(image, ctx);
|
||||
|
||||
EMIT(PPC_RAW_BCTR());
|
||||
|
||||
/* out: */
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* We spill into the redzone always, even if the bpf program has its own stackframe.
|
||||
* Offsets hardcoded based on BPF_PPC_STACK_SAVE -- see bpf_jit_stack_local()
|
||||
*/
|
||||
void bpf_stf_barrier(void);
|
||||
|
||||
asm (
|
||||
" .global bpf_stf_barrier ;"
|
||||
" bpf_stf_barrier: ;"
|
||||
" std 21,-64(1) ;"
|
||||
" std 22,-56(1) ;"
|
||||
" sync ;"
|
||||
" ld 21,-64(1) ;"
|
||||
" ld 22,-56(1) ;"
|
||||
" ori 31,31,0 ;"
|
||||
" .rept 14 ;"
|
||||
" b 1f ;"
|
||||
" 1: ;"
|
||||
" .endr ;"
|
||||
" blr ;"
|
||||
);
|
||||
|
||||
/* Assemble the body code between the prologue & epilogue */
|
||||
int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, struct codegen_context *ctx,
|
||||
u32 *addrs, bool extra_pass)
|
||||
{
|
||||
enum stf_barrier_type stf_barrier = stf_barrier_type_get();
|
||||
const struct bpf_insn *insn = fp->insnsi;
|
||||
int flen = fp->len;
|
||||
int i, ret;
|
||||
|
@ -328,18 +354,25 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, struct codegen_context *
|
|||
EMIT(PPC_RAW_SUB(dst_reg, dst_reg, src_reg));
|
||||
goto bpf_alu32_trunc;
|
||||
case BPF_ALU | BPF_ADD | BPF_K: /* (u32) dst += (u32) imm */
|
||||
case BPF_ALU | BPF_SUB | BPF_K: /* (u32) dst -= (u32) imm */
|
||||
case BPF_ALU64 | BPF_ADD | BPF_K: /* dst += imm */
|
||||
if (!imm) {
|
||||
goto bpf_alu32_trunc;
|
||||
} else if (imm >= -32768 && imm < 32768) {
|
||||
EMIT(PPC_RAW_ADDI(dst_reg, dst_reg, IMM_L(imm)));
|
||||
} else {
|
||||
PPC_LI32(b2p[TMP_REG_1], imm);
|
||||
EMIT(PPC_RAW_ADD(dst_reg, dst_reg, b2p[TMP_REG_1]));
|
||||
}
|
||||
goto bpf_alu32_trunc;
|
||||
case BPF_ALU | BPF_SUB | BPF_K: /* (u32) dst -= (u32) imm */
|
||||
case BPF_ALU64 | BPF_SUB | BPF_K: /* dst -= imm */
|
||||
if (BPF_OP(code) == BPF_SUB)
|
||||
imm = -imm;
|
||||
if (imm) {
|
||||
if (imm >= -32768 && imm < 32768)
|
||||
EMIT(PPC_RAW_ADDI(dst_reg, dst_reg, IMM_L(imm)));
|
||||
else {
|
||||
PPC_LI32(b2p[TMP_REG_1], imm);
|
||||
EMIT(PPC_RAW_ADD(dst_reg, dst_reg, b2p[TMP_REG_1]));
|
||||
}
|
||||
if (!imm) {
|
||||
goto bpf_alu32_trunc;
|
||||
} else if (imm > -32768 && imm <= 32768) {
|
||||
EMIT(PPC_RAW_ADDI(dst_reg, dst_reg, IMM_L(-imm)));
|
||||
} else {
|
||||
PPC_LI32(b2p[TMP_REG_1], imm);
|
||||
EMIT(PPC_RAW_SUB(dst_reg, dst_reg, b2p[TMP_REG_1]));
|
||||
}
|
||||
goto bpf_alu32_trunc;
|
||||
case BPF_ALU | BPF_MUL | BPF_X: /* (u32) dst *= (u32) src */
|
||||
|
@ -389,8 +422,14 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, struct codegen_context *
|
|||
case BPF_ALU64 | BPF_DIV | BPF_K: /* dst /= imm */
|
||||
if (imm == 0)
|
||||
return -EINVAL;
|
||||
else if (imm == 1)
|
||||
goto bpf_alu32_trunc;
|
||||
if (imm == 1) {
|
||||
if (BPF_OP(code) == BPF_DIV) {
|
||||
goto bpf_alu32_trunc;
|
||||
} else {
|
||||
EMIT(PPC_RAW_LI(dst_reg, 0));
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
PPC_LI32(b2p[TMP_REG_1], imm);
|
||||
switch (BPF_CLASS(code)) {
|
||||
|
@ -631,6 +670,29 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, struct codegen_context *
|
|||
* BPF_ST NOSPEC (speculation barrier)
|
||||
*/
|
||||
case BPF_ST | BPF_NOSPEC:
|
||||
if (!security_ftr_enabled(SEC_FTR_FAVOUR_SECURITY) ||
|
||||
!security_ftr_enabled(SEC_FTR_STF_BARRIER))
|
||||
break;
|
||||
|
||||
switch (stf_barrier) {
|
||||
case STF_BARRIER_EIEIO:
|
||||
EMIT(PPC_RAW_EIEIO() | 0x02000000);
|
||||
break;
|
||||
case STF_BARRIER_SYNC_ORI:
|
||||
EMIT(PPC_RAW_SYNC());
|
||||
EMIT(PPC_RAW_LD(b2p[TMP_REG_1], _R13, 0));
|
||||
EMIT(PPC_RAW_ORI(_R31, _R31, 0));
|
||||
break;
|
||||
case STF_BARRIER_FALLBACK:
|
||||
EMIT(PPC_RAW_MFLR(b2p[TMP_REG_1]));
|
||||
PPC_LI64(12, dereference_kernel_function_descriptor(bpf_stf_barrier));
|
||||
EMIT(PPC_RAW_MTCTR(12));
|
||||
EMIT(PPC_RAW_BCTRL());
|
||||
EMIT(PPC_RAW_MTLR(b2p[TMP_REG_1]));
|
||||
break;
|
||||
case STF_BARRIER_NONE:
|
||||
break;
|
||||
}
|
||||
break;
|
||||
|
||||
/*
|
||||
|
@ -993,7 +1055,9 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, struct codegen_context *
|
|||
*/
|
||||
case BPF_JMP | BPF_TAIL_CALL:
|
||||
ctx->seen |= SEEN_TAILCALL;
|
||||
bpf_jit_emit_tail_call(image, ctx, addrs[i + 1]);
|
||||
ret = bpf_jit_emit_tail_call(image, ctx, addrs[i + 1]);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
break;
|
||||
|
||||
default:
|
||||
|
|
|
@ -867,6 +867,10 @@ static int __init eeh_pseries_init(void)
|
|||
if (is_kdump_kernel() || reset_devices) {
|
||||
pr_info("Issue PHB reset ...\n");
|
||||
list_for_each_entry(phb, &hose_list, list_node) {
|
||||
// Skip if the slot is empty
|
||||
if (list_empty(&PCI_DN(phb->dn)->child_list))
|
||||
continue;
|
||||
|
||||
pdn = list_first_entry(&PCI_DN(phb->dn)->child_list, struct pci_dn, list);
|
||||
config_addr = pseries_eeh_get_pe_config_addr(pdn);
|
||||
|
||||
|
|
|
@ -507,12 +507,27 @@ static void pseries_msi_unmask(struct irq_data *d)
|
|||
irq_chip_unmask_parent(d);
|
||||
}
|
||||
|
||||
static void pseries_msi_write_msg(struct irq_data *data, struct msi_msg *msg)
|
||||
{
|
||||
struct msi_desc *entry = irq_data_get_msi_desc(data);
|
||||
|
||||
/*
|
||||
* Do not update the MSIx vector table. It's not strictly necessary
|
||||
* because the table is initialized by the underlying hypervisor, PowerVM
|
||||
* or QEMU/KVM. However, if the MSIx vector entry is cleared, any further
|
||||
* activation will fail. This can happen in some drivers (eg. IPR) which
|
||||
* deactivate an IRQ used for testing MSI support.
|
||||
*/
|
||||
entry->msg = *msg;
|
||||
}
|
||||
|
||||
static struct irq_chip pseries_pci_msi_irq_chip = {
|
||||
.name = "pSeries-PCI-MSI",
|
||||
.irq_shutdown = pseries_msi_shutdown,
|
||||
.irq_mask = pseries_msi_mask,
|
||||
.irq_unmask = pseries_msi_unmask,
|
||||
.irq_eoi = irq_chip_eoi_parent,
|
||||
.irq_write_msi_msg = pseries_msi_write_msg,
|
||||
};
|
||||
|
||||
static struct msi_domain_info pseries_msi_domain_info = {
|
||||
|
|
|
@ -82,4 +82,5 @@ static inline int syscall_get_arch(struct task_struct *task)
|
|||
#endif
|
||||
}
|
||||
|
||||
asmlinkage long sys_riscv_flush_icache(uintptr_t, uintptr_t, uintptr_t);
|
||||
#endif /* _ASM_RISCV_SYSCALL_H */
|
||||
|
|
|
@ -16,18 +16,24 @@
|
|||
#ifdef CONFIG_MMU
|
||||
|
||||
#include <linux/types.h>
|
||||
#include <generated/vdso-offsets.h>
|
||||
/*
|
||||
* All systems with an MMU have a VDSO, but systems without an MMU don't
|
||||
* support shared libraries and therefor don't have one.
|
||||
*/
|
||||
#ifdef CONFIG_MMU
|
||||
|
||||
#ifndef CONFIG_GENERIC_TIME_VSYSCALL
|
||||
struct vdso_data {
|
||||
};
|
||||
#endif
|
||||
#define __VVAR_PAGES 1
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
#include <generated/vdso-offsets.h>
|
||||
|
||||
#define VDSO_SYMBOL(base, name) \
|
||||
(void __user *)((unsigned long)(base) + __vdso_##name##_offset)
|
||||
|
||||
#endif /* CONFIG_MMU */
|
||||
|
||||
asmlinkage long sys_riscv_flush_icache(uintptr_t, uintptr_t, uintptr_t);
|
||||
#endif /* !__ASSEMBLY__ */
|
||||
|
||||
#endif /* CONFIG_MMU */
|
||||
|
||||
#endif /* _ASM_RISCV_VDSO_H */
|
||||
|
|
|
@ -18,9 +18,10 @@
|
|||
#ifdef __LP64__
|
||||
#define __ARCH_WANT_NEW_STAT
|
||||
#define __ARCH_WANT_SET_GET_RLIMIT
|
||||
#define __ARCH_WANT_SYS_CLONE3
|
||||
#endif /* __LP64__ */
|
||||
|
||||
#define __ARCH_WANT_SYS_CLONE3
|
||||
|
||||
#include <asm-generic/unistd.h>
|
||||
|
||||
/*
|
||||
|
|
|
@ -7,7 +7,6 @@
|
|||
#include <linux/linkage.h>
|
||||
#include <linux/syscalls.h>
|
||||
#include <asm-generic/syscalls.h>
|
||||
#include <asm/vdso.h>
|
||||
#include <asm/syscall.h>
|
||||
|
||||
#undef __SYSCALL
|
||||
|
|
|
@ -12,14 +12,24 @@
|
|||
#include <linux/binfmts.h>
|
||||
#include <linux/err.h>
|
||||
#include <asm/page.h>
|
||||
#include <asm/vdso.h>
|
||||
|
||||
#ifdef CONFIG_GENERIC_TIME_VSYSCALL
|
||||
#include <vdso/datapage.h>
|
||||
#else
|
||||
#include <asm/vdso.h>
|
||||
struct vdso_data {
|
||||
};
|
||||
#endif
|
||||
|
||||
extern char vdso_start[], vdso_end[];
|
||||
|
||||
enum vvar_pages {
|
||||
VVAR_DATA_PAGE_OFFSET,
|
||||
VVAR_NR_PAGES,
|
||||
};
|
||||
|
||||
#define VVAR_SIZE (VVAR_NR_PAGES << PAGE_SHIFT)
|
||||
|
||||
static unsigned int vdso_pages __ro_after_init;
|
||||
static struct page **vdso_pagelist __ro_after_init;
|
||||
|
||||
|
@ -38,7 +48,7 @@ static int __init vdso_init(void)
|
|||
|
||||
vdso_pages = (vdso_end - vdso_start) >> PAGE_SHIFT;
|
||||
vdso_pagelist =
|
||||
kcalloc(vdso_pages + 1, sizeof(struct page *), GFP_KERNEL);
|
||||
kcalloc(vdso_pages + VVAR_NR_PAGES, sizeof(struct page *), GFP_KERNEL);
|
||||
if (unlikely(vdso_pagelist == NULL)) {
|
||||
pr_err("vdso: pagelist allocation failed\n");
|
||||
return -ENOMEM;
|
||||
|
@ -63,38 +73,41 @@ int arch_setup_additional_pages(struct linux_binprm *bprm,
|
|||
unsigned long vdso_base, vdso_len;
|
||||
int ret;
|
||||
|
||||
vdso_len = (vdso_pages + 1) << PAGE_SHIFT;
|
||||
BUILD_BUG_ON(VVAR_NR_PAGES != __VVAR_PAGES);
|
||||
|
||||
vdso_len = (vdso_pages + VVAR_NR_PAGES) << PAGE_SHIFT;
|
||||
|
||||
if (mmap_write_lock_killable(mm))
|
||||
return -EINTR;
|
||||
|
||||
mmap_write_lock(mm);
|
||||
vdso_base = get_unmapped_area(NULL, 0, vdso_len, 0, 0);
|
||||
if (IS_ERR_VALUE(vdso_base)) {
|
||||
ret = vdso_base;
|
||||
goto end;
|
||||
}
|
||||
|
||||
mm->context.vdso = NULL;
|
||||
ret = install_special_mapping(mm, vdso_base, VVAR_SIZE,
|
||||
(VM_READ | VM_MAYREAD), &vdso_pagelist[vdso_pages]);
|
||||
if (unlikely(ret))
|
||||
goto end;
|
||||
|
||||
ret =
|
||||
install_special_mapping(mm, vdso_base + VVAR_SIZE,
|
||||
vdso_pages << PAGE_SHIFT,
|
||||
(VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC),
|
||||
vdso_pagelist);
|
||||
|
||||
if (unlikely(ret))
|
||||
goto end;
|
||||
|
||||
/*
|
||||
* Put vDSO base into mm struct. We need to do this before calling
|
||||
* install_special_mapping or the perf counter mmap tracking code
|
||||
* will fail to recognise it as a vDSO (since arch_vma_name fails).
|
||||
*/
|
||||
mm->context.vdso = (void *)vdso_base;
|
||||
mm->context.vdso = (void *)vdso_base + VVAR_SIZE;
|
||||
|
||||
ret =
|
||||
install_special_mapping(mm, vdso_base, vdso_pages << PAGE_SHIFT,
|
||||
(VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC),
|
||||
vdso_pagelist);
|
||||
|
||||
if (unlikely(ret)) {
|
||||
mm->context.vdso = NULL;
|
||||
goto end;
|
||||
}
|
||||
|
||||
vdso_base += (vdso_pages << PAGE_SHIFT);
|
||||
ret = install_special_mapping(mm, vdso_base, PAGE_SIZE,
|
||||
(VM_READ | VM_MAYREAD), &vdso_pagelist[vdso_pages]);
|
||||
|
||||
if (unlikely(ret))
|
||||
mm->context.vdso = NULL;
|
||||
end:
|
||||
mmap_write_unlock(mm);
|
||||
return ret;
|
||||
|
@ -105,7 +118,7 @@ const char *arch_vma_name(struct vm_area_struct *vma)
|
|||
if (vma->vm_mm && (vma->vm_start == (long)vma->vm_mm->context.vdso))
|
||||
return "[vdso]";
|
||||
if (vma->vm_mm && (vma->vm_start ==
|
||||
(long)vma->vm_mm->context.vdso + PAGE_SIZE))
|
||||
(long)vma->vm_mm->context.vdso - VVAR_SIZE))
|
||||
return "[vdso_data]";
|
||||
return NULL;
|
||||
}
|
||||
|
|
|
@ -3,12 +3,13 @@
|
|||
* Copyright (C) 2012 Regents of the University of California
|
||||
*/
|
||||
#include <asm/page.h>
|
||||
#include <asm/vdso.h>
|
||||
|
||||
OUTPUT_ARCH(riscv)
|
||||
|
||||
SECTIONS
|
||||
{
|
||||
PROVIDE(_vdso_data = . + PAGE_SIZE);
|
||||
PROVIDE(_vdso_data = . - __VVAR_PAGES * PAGE_SIZE);
|
||||
. = SIZEOF_HEADERS;
|
||||
|
||||
.hash : { *(.hash) } :text
|
||||
|
|
|
@ -16,6 +16,8 @@ static void ipi_remote_fence_i(void *info)
|
|||
|
||||
void flush_icache_all(void)
|
||||
{
|
||||
local_flush_icache_all();
|
||||
|
||||
if (IS_ENABLED(CONFIG_RISCV_SBI))
|
||||
sbi_remote_fence_i(NULL);
|
||||
else
|
||||
|
|
|
@ -207,6 +207,8 @@ int zpci_enable_device(struct zpci_dev *);
|
|||
int zpci_disable_device(struct zpci_dev *);
|
||||
int zpci_scan_configured_device(struct zpci_dev *zdev, u32 fh);
|
||||
int zpci_deconfigure_device(struct zpci_dev *zdev);
|
||||
void zpci_device_reserved(struct zpci_dev *zdev);
|
||||
bool zpci_is_device_configured(struct zpci_dev *zdev);
|
||||
|
||||
int zpci_register_ioat(struct zpci_dev *, u8, u64, u64, u64);
|
||||
int zpci_unregister_ioat(struct zpci_dev *, u8);
|
||||
|
|
|
@ -1826,7 +1826,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp)
|
|||
jit.addrs = kvcalloc(fp->len + 1, sizeof(*jit.addrs), GFP_KERNEL);
|
||||
if (jit.addrs == NULL) {
|
||||
fp = orig_fp;
|
||||
goto out;
|
||||
goto free_addrs;
|
||||
}
|
||||
/*
|
||||
* Three initial passes:
|
||||
|
|
|
@ -92,7 +92,7 @@ void zpci_remove_reserved_devices(void)
|
|||
spin_unlock(&zpci_list_lock);
|
||||
|
||||
list_for_each_entry_safe(zdev, tmp, &remove, entry)
|
||||
zpci_zdev_put(zdev);
|
||||
zpci_device_reserved(zdev);
|
||||
}
|
||||
|
||||
int pci_domain_nr(struct pci_bus *bus)
|
||||
|
@ -751,6 +751,14 @@ struct zpci_dev *zpci_create_device(u32 fid, u32 fh, enum zpci_state state)
|
|||
return ERR_PTR(rc);
|
||||
}
|
||||
|
||||
bool zpci_is_device_configured(struct zpci_dev *zdev)
|
||||
{
|
||||
enum zpci_state state = zdev->state;
|
||||
|
||||
return state != ZPCI_FN_STATE_RESERVED &&
|
||||
state != ZPCI_FN_STATE_STANDBY;
|
||||
}
|
||||
|
||||
/**
|
||||
* zpci_scan_configured_device() - Scan a freshly configured zpci_dev
|
||||
* @zdev: The zpci_dev to be configured
|
||||
|
@ -822,6 +830,31 @@ int zpci_deconfigure_device(struct zpci_dev *zdev)
|
|||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* zpci_device_reserved() - Mark device as resverved
|
||||
* @zdev: the zpci_dev that was reserved
|
||||
*
|
||||
* Handle the case that a given zPCI function was reserved by another system.
|
||||
* After a call to this function the zpci_dev can not be found via
|
||||
* get_zdev_by_fid() anymore but may still be accessible via existing
|
||||
* references though it will not be functional anymore.
|
||||
*/
|
||||
void zpci_device_reserved(struct zpci_dev *zdev)
|
||||
{
|
||||
if (zdev->has_hp_slot)
|
||||
zpci_exit_slot(zdev);
|
||||
/*
|
||||
* Remove device from zpci_list as it is going away. This also
|
||||
* makes sure we ignore subsequent zPCI events for this device.
|
||||
*/
|
||||
spin_lock(&zpci_list_lock);
|
||||
list_del(&zdev->entry);
|
||||
spin_unlock(&zpci_list_lock);
|
||||
zdev->state = ZPCI_FN_STATE_RESERVED;
|
||||
zpci_dbg(3, "rsv fid:%x\n", zdev->fid);
|
||||
zpci_zdev_put(zdev);
|
||||
}
|
||||
|
||||
void zpci_release_device(struct kref *kref)
|
||||
{
|
||||
struct zpci_dev *zdev = container_of(kref, struct zpci_dev, kref);
|
||||
|
@ -843,6 +876,12 @@ void zpci_release_device(struct kref *kref)
|
|||
case ZPCI_FN_STATE_STANDBY:
|
||||
if (zdev->has_hp_slot)
|
||||
zpci_exit_slot(zdev);
|
||||
spin_lock(&zpci_list_lock);
|
||||
list_del(&zdev->entry);
|
||||
spin_unlock(&zpci_list_lock);
|
||||
zpci_dbg(3, "rsv fid:%x\n", zdev->fid);
|
||||
fallthrough;
|
||||
case ZPCI_FN_STATE_RESERVED:
|
||||
if (zdev->has_resources)
|
||||
zpci_cleanup_bus_resources(zdev);
|
||||
zpci_bus_device_unregister(zdev);
|
||||
|
@ -851,10 +890,6 @@ void zpci_release_device(struct kref *kref)
|
|||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
spin_lock(&zpci_list_lock);
|
||||
list_del(&zdev->entry);
|
||||
spin_unlock(&zpci_list_lock);
|
||||
zpci_dbg(3, "rem fid:%x\n", zdev->fid);
|
||||
kfree(zdev);
|
||||
}
|
||||
|
|
|
@ -140,7 +140,7 @@ static void __zpci_event_availability(struct zpci_ccdf_avail *ccdf)
|
|||
/* The 0x0304 event may immediately reserve the device */
|
||||
if (!clp_get_state(zdev->fid, &state) &&
|
||||
state == ZPCI_FN_STATE_RESERVED) {
|
||||
zpci_zdev_put(zdev);
|
||||
zpci_device_reserved(zdev);
|
||||
}
|
||||
}
|
||||
break;
|
||||
|
@ -151,7 +151,7 @@ static void __zpci_event_availability(struct zpci_ccdf_avail *ccdf)
|
|||
case 0x0308: /* Standby -> Reserved */
|
||||
if (!zdev)
|
||||
break;
|
||||
zpci_zdev_put(zdev);
|
||||
zpci_device_reserved(zdev);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
|
|
|
@ -1405,7 +1405,7 @@ config HIGHMEM4G
|
|||
|
||||
config HIGHMEM64G
|
||||
bool "64GB"
|
||||
depends on !M486SX && !M486 && !M586 && !M586TSC && !M586MMX && !MGEODE_LX && !MGEODEGX1 && !MCYRIXIII && !MELAN && !MWINCHIPC6 && !WINCHIP3D && !MK6
|
||||
depends on !M486SX && !M486 && !M586 && !M586TSC && !M586MMX && !MGEODE_LX && !MGEODEGX1 && !MCYRIXIII && !MELAN && !MWINCHIPC6 && !MWINCHIP3D && !MK6
|
||||
select X86_PAE
|
||||
help
|
||||
Select this if you have a 32-bit processor and more than 4
|
||||
|
|
|
@ -25,7 +25,7 @@ static __always_inline void arch_check_user_regs(struct pt_regs *regs)
|
|||
* For !SMAP hardware we patch out CLAC on entry.
|
||||
*/
|
||||
if (boot_cpu_has(X86_FEATURE_SMAP) ||
|
||||
(IS_ENABLED(CONFIG_64_BIT) && boot_cpu_has(X86_FEATURE_XENPV)))
|
||||
(IS_ENABLED(CONFIG_64BIT) && boot_cpu_has(X86_FEATURE_XENPV)))
|
||||
mask |= X86_EFLAGS_AC;
|
||||
|
||||
WARN_ON_ONCE(flags & mask);
|
||||
|
|
|
@ -326,6 +326,7 @@ static __always_inline void setup_smap(struct cpuinfo_x86 *c)
|
|||
#ifdef CONFIG_X86_SMAP
|
||||
cr4_set_bits(X86_CR4_SMAP);
|
||||
#else
|
||||
clear_cpu_cap(c, X86_FEATURE_SMAP);
|
||||
cr4_clear_bits(X86_CR4_SMAP);
|
||||
#endif
|
||||
}
|
||||
|
|
|
@ -527,12 +527,14 @@ static void domain_add_cpu(int cpu, struct rdt_resource *r)
|
|||
rdt_domain_reconfigure_cdp(r);
|
||||
|
||||
if (r->alloc_capable && domain_setup_ctrlval(r, d)) {
|
||||
kfree(d);
|
||||
kfree(hw_dom);
|
||||
return;
|
||||
}
|
||||
|
||||
if (r->mon_capable && domain_setup_mon_state(r, d)) {
|
||||
kfree(d);
|
||||
kfree(hw_dom->ctrl_val);
|
||||
kfree(hw_dom->mbps_val);
|
||||
kfree(hw_dom);
|
||||
return;
|
||||
}
|
||||
|
||||
|
|
|
@ -714,12 +714,6 @@ static struct chipset early_qrk[] __initdata = {
|
|||
*/
|
||||
{ PCI_VENDOR_ID_INTEL, 0x0f00,
|
||||
PCI_CLASS_BRIDGE_HOST, PCI_ANY_ID, 0, force_disable_hpet},
|
||||
{ PCI_VENDOR_ID_INTEL, 0x3e20,
|
||||
PCI_CLASS_BRIDGE_HOST, PCI_ANY_ID, 0, force_disable_hpet},
|
||||
{ PCI_VENDOR_ID_INTEL, 0x3ec4,
|
||||
PCI_CLASS_BRIDGE_HOST, PCI_ANY_ID, 0, force_disable_hpet},
|
||||
{ PCI_VENDOR_ID_INTEL, 0x8a12,
|
||||
PCI_CLASS_BRIDGE_HOST, PCI_ANY_ID, 0, force_disable_hpet},
|
||||
{ PCI_VENDOR_ID_BROADCOM, 0x4331,
|
||||
PCI_CLASS_NETWORK_OTHER, PCI_ANY_ID, 0, apple_airport_reset},
|
||||
{}
|
||||
|
|
|
@ -379,9 +379,14 @@ static int __fpu_restore_sig(void __user *buf, void __user *buf_fx,
|
|||
sizeof(fpu->state.fxsave)))
|
||||
return -EFAULT;
|
||||
|
||||
/* Reject invalid MXCSR values. */
|
||||
if (fpu->state.fxsave.mxcsr & ~mxcsr_feature_mask)
|
||||
return -EINVAL;
|
||||
if (IS_ENABLED(CONFIG_X86_64)) {
|
||||
/* Reject invalid MXCSR values. */
|
||||
if (fpu->state.fxsave.mxcsr & ~mxcsr_feature_mask)
|
||||
return -EINVAL;
|
||||
} else {
|
||||
/* Mask invalid bits out for historical reasons (broken hardware). */
|
||||
fpu->state.fxsave.mxcsr &= ~mxcsr_feature_mask;
|
||||
}
|
||||
|
||||
/* Enforce XFEATURE_MASK_FPSSE when XSAVE is enabled */
|
||||
if (use_xsave())
|
||||
|
|
|
@ -10,6 +10,7 @@
|
|||
#include <asm/irq_remapping.h>
|
||||
#include <asm/hpet.h>
|
||||
#include <asm/time.h>
|
||||
#include <asm/mwait.h>
|
||||
|
||||
#undef pr_fmt
|
||||
#define pr_fmt(fmt) "hpet: " fmt
|
||||
|
@ -916,6 +917,83 @@ static bool __init hpet_counting(void)
|
|||
return false;
|
||||
}
|
||||
|
||||
static bool __init mwait_pc10_supported(void)
|
||||
{
|
||||
unsigned int eax, ebx, ecx, mwait_substates;
|
||||
|
||||
if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
|
||||
return false;
|
||||
|
||||
if (!cpu_feature_enabled(X86_FEATURE_MWAIT))
|
||||
return false;
|
||||
|
||||
if (boot_cpu_data.cpuid_level < CPUID_MWAIT_LEAF)
|
||||
return false;
|
||||
|
||||
cpuid(CPUID_MWAIT_LEAF, &eax, &ebx, &ecx, &mwait_substates);
|
||||
|
||||
return (ecx & CPUID5_ECX_EXTENSIONS_SUPPORTED) &&
|
||||
(ecx & CPUID5_ECX_INTERRUPT_BREAK) &&
|
||||
(mwait_substates & (0xF << 28));
|
||||
}
|
||||
|
||||
/*
|
||||
* Check whether the system supports PC10. If so force disable HPET as that
|
||||
* stops counting in PC10. This check is overbroad as it does not take any
|
||||
* of the following into account:
|
||||
*
|
||||
* - ACPI tables
|
||||
* - Enablement of intel_idle
|
||||
* - Command line arguments which limit intel_idle C-state support
|
||||
*
|
||||
* That's perfectly fine. HPET is a piece of hardware designed by committee
|
||||
* and the only reasons why it is still in use on modern systems is the
|
||||
* fact that it is impossible to reliably query TSC and CPU frequency via
|
||||
* CPUID or firmware.
|
||||
*
|
||||
* If HPET is functional it is useful for calibrating TSC, but this can be
|
||||
* done via PMTIMER as well which seems to be the last remaining timer on
|
||||
* X86/INTEL platforms that has not been completely wreckaged by feature
|
||||
* creep.
|
||||
*
|
||||
* In theory HPET support should be removed altogether, but there are older
|
||||
* systems out there which depend on it because TSC and APIC timer are
|
||||
* dysfunctional in deeper C-states.
|
||||
*
|
||||
* It's only 20 years now that hardware people have been asked to provide
|
||||
* reliable and discoverable facilities which can be used for timekeeping
|
||||
* and per CPU timer interrupts.
|
||||
*
|
||||
* The probability that this problem is going to be solved in the
|
||||
* forseeable future is close to zero, so the kernel has to be cluttered
|
||||
* with heuristics to keep up with the ever growing amount of hardware and
|
||||
* firmware trainwrecks. Hopefully some day hardware people will understand
|
||||
* that the approach of "This can be fixed in software" is not sustainable.
|
||||
* Hope dies last...
|
||||
*/
|
||||
static bool __init hpet_is_pc10_damaged(void)
|
||||
{
|
||||
unsigned long long pcfg;
|
||||
|
||||
/* Check whether PC10 substates are supported */
|
||||
if (!mwait_pc10_supported())
|
||||
return false;
|
||||
|
||||
/* Check whether PC10 is enabled in PKG C-state limit */
|
||||
rdmsrl(MSR_PKG_CST_CONFIG_CONTROL, pcfg);
|
||||
if ((pcfg & 0xF) < 8)
|
||||
return false;
|
||||
|
||||
if (hpet_force_user) {
|
||||
pr_warn("HPET force enabled via command line, but dysfunctional in PC10.\n");
|
||||
return false;
|
||||
}
|
||||
|
||||
pr_info("HPET dysfunctional in PC10. Force disabled.\n");
|
||||
boot_hpet_disable = true;
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* hpet_enable - Try to setup the HPET timer. Returns 1 on success.
|
||||
*/
|
||||
|
@ -929,6 +1007,9 @@ int __init hpet_enable(void)
|
|||
if (!is_hpet_capable())
|
||||
return 0;
|
||||
|
||||
if (hpet_is_pc10_damaged())
|
||||
return 0;
|
||||
|
||||
hpet_set_mapping();
|
||||
if (!hpet_virt_address)
|
||||
return 0;
|
||||
|
|
|
@ -130,6 +130,8 @@ static enum es_result sev_es_ghcb_hv_call(struct ghcb *ghcb,
|
|||
} else {
|
||||
ret = ES_VMM_ERROR;
|
||||
}
|
||||
} else if (ghcb->save.sw_exit_info_1 & 0xffffffff) {
|
||||
ret = ES_VMM_ERROR;
|
||||
} else {
|
||||
ret = ES_OK;
|
||||
}
|
||||
|
|
|
@ -274,7 +274,7 @@ static struct olpc_ec_driver ec_xo1_driver = {
|
|||
|
||||
static struct olpc_ec_driver ec_xo1_5_driver = {
|
||||
.ec_cmd = olpc_xo1_ec_cmd,
|
||||
#ifdef CONFIG_OLPC_XO1_5_SCI
|
||||
#ifdef CONFIG_OLPC_XO15_SCI
|
||||
/*
|
||||
* XO-1.5 EC wakeups are available when olpc-xo15-sci driver is
|
||||
* compiled in
|
||||
|
|
|
@ -490,7 +490,6 @@ struct block_device *bdev_alloc(struct gendisk *disk, u8 partno)
|
|||
bdev = I_BDEV(inode);
|
||||
mutex_init(&bdev->bd_fsfreeze_mutex);
|
||||
spin_lock_init(&bdev->bd_size_lock);
|
||||
bdev->bd_disk = disk;
|
||||
bdev->bd_partno = partno;
|
||||
bdev->bd_inode = inode;
|
||||
bdev->bd_stats = alloc_percpu(struct disk_stats);
|
||||
|
@ -498,6 +497,7 @@ struct block_device *bdev_alloc(struct gendisk *disk, u8 partno)
|
|||
iput(inode);
|
||||
return NULL;
|
||||
}
|
||||
bdev->bd_disk = disk;
|
||||
return bdev;
|
||||
}
|
||||
|
||||
|
|
|
@ -129,6 +129,7 @@ static const char *const blk_queue_flag_name[] = {
|
|||
QUEUE_FLAG_NAME(PCI_P2PDMA),
|
||||
QUEUE_FLAG_NAME(ZONE_RESETALL),
|
||||
QUEUE_FLAG_NAME(RQ_ALLOC_TIME),
|
||||
QUEUE_FLAG_NAME(HCTX_ACTIVE),
|
||||
QUEUE_FLAG_NAME(NOWAIT),
|
||||
};
|
||||
#undef QUEUE_FLAG_NAME
|
||||
|
|
|
@ -1268,6 +1268,7 @@ struct gendisk *__alloc_disk_node(struct request_queue *q, int node_id,
|
|||
|
||||
out_destroy_part_tbl:
|
||||
xa_destroy(&disk->part_tbl);
|
||||
disk->part0->bd_disk = NULL;
|
||||
iput(disk->part0->bd_inode);
|
||||
out_free_bdi:
|
||||
bdi_put(disk->bdi);
|
||||
|
|
|
@ -49,7 +49,7 @@
|
|||
#define MLXCPLD_LPCI2C_NACK_IND 2
|
||||
|
||||
#define MLXCPLD_I2C_FREQ_1000KHZ_SET 0x04
|
||||
#define MLXCPLD_I2C_FREQ_400KHZ_SET 0x0f
|
||||
#define MLXCPLD_I2C_FREQ_400KHZ_SET 0x0c
|
||||
#define MLXCPLD_I2C_FREQ_100KHZ_SET 0x42
|
||||
|
||||
enum mlxcpld_i2c_frequency {
|
||||
|
@ -495,7 +495,7 @@ mlxcpld_i2c_set_frequency(struct mlxcpld_i2c_priv *priv,
|
|||
return err;
|
||||
|
||||
/* Set frequency only if it is not 100KHz, which is default. */
|
||||
switch ((data->reg & data->mask) >> data->bit) {
|
||||
switch ((regval & data->mask) >> data->bit) {
|
||||
case MLXCPLD_I2C_FREQ_1000KHZ:
|
||||
freq = MLXCPLD_I2C_FREQ_1000KHZ_SET;
|
||||
break;
|
||||
|
|
|
@ -41,6 +41,8 @@
|
|||
#define I2C_HANDSHAKE_RST 0x0020
|
||||
#define I2C_FIFO_ADDR_CLR 0x0001
|
||||
#define I2C_DELAY_LEN 0x0002
|
||||
#define I2C_ST_START_CON 0x8001
|
||||
#define I2C_FS_START_CON 0x1800
|
||||
#define I2C_TIME_CLR_VALUE 0x0000
|
||||
#define I2C_TIME_DEFAULT_VALUE 0x0003
|
||||
#define I2C_WRRD_TRANAC_VALUE 0x0002
|
||||
|
@ -480,6 +482,7 @@ static void mtk_i2c_init_hw(struct mtk_i2c *i2c)
|
|||
{
|
||||
u16 control_reg;
|
||||
u16 intr_stat_reg;
|
||||
u16 ext_conf_val;
|
||||
|
||||
mtk_i2c_writew(i2c, I2C_CHN_CLR_FLAG, OFFSET_START);
|
||||
intr_stat_reg = mtk_i2c_readw(i2c, OFFSET_INTR_STAT);
|
||||
|
@ -518,8 +521,13 @@ static void mtk_i2c_init_hw(struct mtk_i2c *i2c)
|
|||
if (i2c->dev_comp->ltiming_adjust)
|
||||
mtk_i2c_writew(i2c, i2c->ltiming_reg, OFFSET_LTIMING);
|
||||
|
||||
if (i2c->speed_hz <= I2C_MAX_STANDARD_MODE_FREQ)
|
||||
ext_conf_val = I2C_ST_START_CON;
|
||||
else
|
||||
ext_conf_val = I2C_FS_START_CON;
|
||||
|
||||
if (i2c->dev_comp->timing_adjust) {
|
||||
mtk_i2c_writew(i2c, i2c->ac_timing.ext, OFFSET_EXT_CONF);
|
||||
ext_conf_val = i2c->ac_timing.ext;
|
||||
mtk_i2c_writew(i2c, i2c->ac_timing.inter_clk_div,
|
||||
OFFSET_CLOCK_DIV);
|
||||
mtk_i2c_writew(i2c, I2C_SCL_MIS_COMP_VALUE,
|
||||
|
@ -544,6 +552,7 @@ static void mtk_i2c_init_hw(struct mtk_i2c *i2c)
|
|||
OFFSET_HS_STA_STO_AC_TIMING);
|
||||
}
|
||||
}
|
||||
mtk_i2c_writew(i2c, ext_conf_val, OFFSET_EXT_CONF);
|
||||
|
||||
/* If use i2c pin from PMIC mt6397 side, need set PATH_DIR first */
|
||||
if (i2c->have_pmic)
|
||||
|
|
|
@ -454,6 +454,7 @@ static int i2c_acpi_notify(struct notifier_block *nb, unsigned long value,
|
|||
break;
|
||||
|
||||
i2c_acpi_register_device(adapter, adev, &info);
|
||||
put_device(&adapter->dev);
|
||||
break;
|
||||
case ACPI_RECONFIG_DEVICE_REMOVE:
|
||||
if (!acpi_device_enumerated(adev))
|
||||
|
|
|
@ -62,14 +62,7 @@ static int get_power_status(struct hotplug_slot *hotplug_slot, u8 *value)
|
|||
struct zpci_dev *zdev = container_of(hotplug_slot, struct zpci_dev,
|
||||
hotplug_slot);
|
||||
|
||||
switch (zdev->state) {
|
||||
case ZPCI_FN_STATE_STANDBY:
|
||||
*value = 0;
|
||||
break;
|
||||
default:
|
||||
*value = 1;
|
||||
break;
|
||||
}
|
||||
*value = zpci_is_device_configured(zdev) ? 1 : 0;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -1776,7 +1776,7 @@ int acornscsi_reconnect_finish(AS_Host *host)
|
|||
host->scsi.disconnectable = 0;
|
||||
if (host->SCpnt->device->id == host->scsi.reconnected.target &&
|
||||
host->SCpnt->device->lun == host->scsi.reconnected.lun &&
|
||||
scsi_cmd_to_tag(host->SCpnt) == host->scsi.reconnected.tag) {
|
||||
scsi_cmd_to_rq(host->SCpnt)->tag == host->scsi.reconnected.tag) {
|
||||
#if (DEBUG & (DEBUG_QUEUES|DEBUG_DISCON))
|
||||
DBG(host->SCpnt, printk("scsi%d.%c: reconnected",
|
||||
host->host->host_no, acornscsi_target(host)));
|
||||
|
|
|
@ -32,7 +32,7 @@ efct_scsi_io_alloc(struct efct_node *node)
|
|||
struct efct *efct;
|
||||
struct efct_xport *xport;
|
||||
struct efct_io *io;
|
||||
unsigned long flags = 0;
|
||||
unsigned long flags;
|
||||
|
||||
efct = node->efct;
|
||||
|
||||
|
@ -44,7 +44,6 @@ efct_scsi_io_alloc(struct efct_node *node)
|
|||
if (!io) {
|
||||
efc_log_err(efct, "IO alloc Failed\n");
|
||||
atomic_add_return(1, &xport->io_alloc_failed_count);
|
||||
spin_unlock_irqrestore(&node->active_ios_lock, flags);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
|
|
@ -2281,11 +2281,6 @@ int iscsi_eh_abort(struct scsi_cmnd *sc)
|
|||
return FAILED;
|
||||
}
|
||||
|
||||
conn = session->leadconn;
|
||||
iscsi_get_conn(conn->cls_conn);
|
||||
conn->eh_abort_cnt++;
|
||||
age = session->age;
|
||||
|
||||
spin_lock(&session->back_lock);
|
||||
task = (struct iscsi_task *)sc->SCp.ptr;
|
||||
if (!task || !task->sc) {
|
||||
|
@ -2293,8 +2288,16 @@ int iscsi_eh_abort(struct scsi_cmnd *sc)
|
|||
ISCSI_DBG_EH(session, "sc completed while abort in progress\n");
|
||||
|
||||
spin_unlock(&session->back_lock);
|
||||
goto success;
|
||||
spin_unlock_bh(&session->frwd_lock);
|
||||
mutex_unlock(&session->eh_mutex);
|
||||
return SUCCESS;
|
||||
}
|
||||
|
||||
conn = session->leadconn;
|
||||
iscsi_get_conn(conn->cls_conn);
|
||||
conn->eh_abort_cnt++;
|
||||
age = session->age;
|
||||
|
||||
ISCSI_DBG_EH(session, "aborting [sc %p itt 0x%x]\n", sc, task->itt);
|
||||
__iscsi_get_task(task);
|
||||
spin_unlock(&session->back_lock);
|
||||
|
|
|
@ -12292,12 +12292,12 @@ void
|
|||
lpfc_ignore_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
|
||||
struct lpfc_iocbq *rspiocb)
|
||||
{
|
||||
struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
|
||||
struct lpfc_nodelist *ndlp = NULL;
|
||||
IOCB_t *irsp = &rspiocb->iocb;
|
||||
|
||||
/* ELS cmd tag <ulpIoTag> completes */
|
||||
lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
|
||||
"0139 Ignoring ELS cmd tag x%x completion Data: "
|
||||
"0139 Ignoring ELS cmd code x%x completion Data: "
|
||||
"x%x x%x x%x\n",
|
||||
irsp->ulpIoTag, irsp->ulpStatus,
|
||||
irsp->un.ulpWord[4], irsp->ulpTimeout);
|
||||
|
@ -12305,10 +12305,13 @@ lpfc_ignore_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
|
|||
* Deref the ndlp after free_iocb. sli_release_iocb will access the ndlp
|
||||
* if exchange is busy.
|
||||
*/
|
||||
if (cmdiocb->iocb.ulpCommand == CMD_GEN_REQUEST64_CR)
|
||||
if (cmdiocb->iocb.ulpCommand == CMD_GEN_REQUEST64_CR) {
|
||||
ndlp = cmdiocb->context_un.ndlp;
|
||||
lpfc_ct_free_iocb(phba, cmdiocb);
|
||||
else
|
||||
} else {
|
||||
ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
|
||||
lpfc_els_free_iocb(phba, cmdiocb);
|
||||
}
|
||||
|
||||
lpfc_nlp_put(ndlp);
|
||||
}
|
||||
|
|
|
@ -6409,27 +6409,6 @@ static irqreturn_t ufshcd_check_errors(struct ufs_hba *hba, u32 intr_status)
|
|||
return retval;
|
||||
}
|
||||
|
||||
struct ctm_info {
|
||||
struct ufs_hba *hba;
|
||||
unsigned long pending;
|
||||
unsigned int ncpl;
|
||||
};
|
||||
|
||||
static bool ufshcd_compl_tm(struct request *req, void *priv, bool reserved)
|
||||
{
|
||||
struct ctm_info *const ci = priv;
|
||||
struct completion *c;
|
||||
|
||||
WARN_ON_ONCE(reserved);
|
||||
if (test_bit(req->tag, &ci->pending))
|
||||
return true;
|
||||
ci->ncpl++;
|
||||
c = req->end_io_data;
|
||||
if (c)
|
||||
complete(c);
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* ufshcd_tmc_handler - handle task management function completion
|
||||
* @hba: per adapter instance
|
||||
|
@ -6440,18 +6419,24 @@ static bool ufshcd_compl_tm(struct request *req, void *priv, bool reserved)
|
|||
*/
|
||||
static irqreturn_t ufshcd_tmc_handler(struct ufs_hba *hba)
|
||||
{
|
||||
unsigned long flags;
|
||||
struct request_queue *q = hba->tmf_queue;
|
||||
struct ctm_info ci = {
|
||||
.hba = hba,
|
||||
};
|
||||
unsigned long flags, pending, issued;
|
||||
irqreturn_t ret = IRQ_NONE;
|
||||
int tag;
|
||||
|
||||
pending = ufshcd_readl(hba, REG_UTP_TASK_REQ_DOOR_BELL);
|
||||
|
||||
spin_lock_irqsave(hba->host->host_lock, flags);
|
||||
ci.pending = ufshcd_readl(hba, REG_UTP_TASK_REQ_DOOR_BELL);
|
||||
blk_mq_tagset_busy_iter(q->tag_set, ufshcd_compl_tm, &ci);
|
||||
issued = hba->outstanding_tasks & ~pending;
|
||||
for_each_set_bit(tag, &issued, hba->nutmrs) {
|
||||
struct request *req = hba->tmf_rqs[tag];
|
||||
struct completion *c = req->end_io_data;
|
||||
|
||||
complete(c);
|
||||
ret = IRQ_HANDLED;
|
||||
}
|
||||
spin_unlock_irqrestore(hba->host->host_lock, flags);
|
||||
|
||||
return ci.ncpl ? IRQ_HANDLED : IRQ_NONE;
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -6574,9 +6559,9 @@ static int __ufshcd_issue_tm_cmd(struct ufs_hba *hba,
|
|||
ufshcd_hold(hba, false);
|
||||
|
||||
spin_lock_irqsave(host->host_lock, flags);
|
||||
blk_mq_start_request(req);
|
||||
|
||||
task_tag = req->tag;
|
||||
hba->tmf_rqs[req->tag] = req;
|
||||
treq->upiu_req.req_header.dword_0 |= cpu_to_be32(task_tag);
|
||||
|
||||
memcpy(hba->utmrdl_base_addr + task_tag, treq, sizeof(*treq));
|
||||
|
@ -6617,6 +6602,7 @@ static int __ufshcd_issue_tm_cmd(struct ufs_hba *hba,
|
|||
}
|
||||
|
||||
spin_lock_irqsave(hba->host->host_lock, flags);
|
||||
hba->tmf_rqs[req->tag] = NULL;
|
||||
__clear_bit(task_tag, &hba->outstanding_tasks);
|
||||
spin_unlock_irqrestore(hba->host->host_lock, flags);
|
||||
|
||||
|
@ -9668,6 +9654,12 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
|
|||
err = PTR_ERR(hba->tmf_queue);
|
||||
goto free_tmf_tag_set;
|
||||
}
|
||||
hba->tmf_rqs = devm_kcalloc(hba->dev, hba->nutmrs,
|
||||
sizeof(*hba->tmf_rqs), GFP_KERNEL);
|
||||
if (!hba->tmf_rqs) {
|
||||
err = -ENOMEM;
|
||||
goto free_tmf_queue;
|
||||
}
|
||||
|
||||
/* Reset the attached device */
|
||||
ufshcd_device_reset(hba);
|
||||
|
|
|
@ -853,6 +853,7 @@ struct ufs_hba {
|
|||
|
||||
struct blk_mq_tag_set tmf_tag_set;
|
||||
struct request_queue *tmf_queue;
|
||||
struct request **tmf_rqs;
|
||||
|
||||
struct uic_command *active_uic_cmd;
|
||||
struct mutex uic_cmd_mutex;
|
||||
|
|
|
@ -296,10 +296,12 @@ int ksmbd_conn_handler_loop(void *p)
|
|||
pdu_size = get_rfc1002_len(hdr_buf);
|
||||
ksmbd_debug(CONN, "RFC1002 header %u bytes\n", pdu_size);
|
||||
|
||||
/* make sure we have enough to get to SMB header end */
|
||||
if (!ksmbd_pdu_size_has_room(pdu_size)) {
|
||||
ksmbd_debug(CONN, "SMB request too short (%u bytes)\n",
|
||||
pdu_size);
|
||||
/*
|
||||
* Check if pdu size is valid (min : smb header size,
|
||||
* max : 0x00FFFFFF).
|
||||
*/
|
||||
if (pdu_size < __SMB2_HEADER_STRUCTURE_SIZE ||
|
||||
pdu_size > MAX_STREAM_PROT_LEN) {
|
||||
continue;
|
||||
}
|
||||
|
||||
|
|
|
@ -12,7 +12,7 @@
|
|||
#include "unicode.h"
|
||||
#include "vfs_cache.h"
|
||||
|
||||
#define KSMBD_VERSION "3.1.9"
|
||||
#define KSMBD_VERSION "3.4.2"
|
||||
|
||||
extern int ksmbd_debug_types;
|
||||
|
||||
|
|
|
@ -80,18 +80,21 @@ static const bool has_smb2_data_area[NUMBER_OF_SMB2_COMMANDS] = {
|
|||
};
|
||||
|
||||
/*
|
||||
* Returns the pointer to the beginning of the data area. Length of the data
|
||||
* area and the offset to it (from the beginning of the smb are also returned.
|
||||
* Set length of the data area and the offset to arguments.
|
||||
* if they are invalid, return error.
|
||||
*/
|
||||
static char *smb2_get_data_area_len(int *off, int *len, struct smb2_hdr *hdr)
|
||||
static int smb2_get_data_area_len(unsigned int *off, unsigned int *len,
|
||||
struct smb2_hdr *hdr)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
*off = 0;
|
||||
*len = 0;
|
||||
|
||||
/* error reqeusts do not have data area */
|
||||
if (hdr->Status && hdr->Status != STATUS_MORE_PROCESSING_REQUIRED &&
|
||||
(((struct smb2_err_rsp *)hdr)->StructureSize) == SMB2_ERROR_STRUCTURE_SIZE2_LE)
|
||||
return NULL;
|
||||
return ret;
|
||||
|
||||
/*
|
||||
* Following commands have data areas so we have to get the location
|
||||
|
@ -165,69 +168,60 @@ static char *smb2_get_data_area_len(int *off, int *len, struct smb2_hdr *hdr)
|
|||
case SMB2_IOCTL:
|
||||
*off = le32_to_cpu(((struct smb2_ioctl_req *)hdr)->InputOffset);
|
||||
*len = le32_to_cpu(((struct smb2_ioctl_req *)hdr)->InputCount);
|
||||
|
||||
break;
|
||||
default:
|
||||
ksmbd_debug(SMB, "no length check for command\n");
|
||||
break;
|
||||
}
|
||||
|
||||
/*
|
||||
* Invalid length or offset probably means data area is invalid, but
|
||||
* we have little choice but to ignore the data area in this case.
|
||||
*/
|
||||
if (*off > 4096) {
|
||||
ksmbd_debug(SMB, "offset %d too large, data area ignored\n",
|
||||
*off);
|
||||
*len = 0;
|
||||
*off = 0;
|
||||
} else if (*off < 0) {
|
||||
ksmbd_debug(SMB,
|
||||
"negative offset %d to data invalid ignore data area\n",
|
||||
*off);
|
||||
*off = 0;
|
||||
*len = 0;
|
||||
} else if (*len < 0) {
|
||||
ksmbd_debug(SMB,
|
||||
"negative data length %d invalid, data area ignored\n",
|
||||
*len);
|
||||
*len = 0;
|
||||
} else if (*len > 128 * 1024) {
|
||||
ksmbd_debug(SMB, "data area larger than 128K: %d\n", *len);
|
||||
*len = 0;
|
||||
ksmbd_debug(SMB, "offset %d too large\n", *off);
|
||||
ret = -EINVAL;
|
||||
} else if ((u64)*off + *len > MAX_STREAM_PROT_LEN) {
|
||||
ksmbd_debug(SMB, "Request is larger than maximum stream protocol length(%u): %llu\n",
|
||||
MAX_STREAM_PROT_LEN, (u64)*off + *len);
|
||||
ret = -EINVAL;
|
||||
}
|
||||
|
||||
/* return pointer to beginning of data area, ie offset from SMB start */
|
||||
if ((*off != 0) && (*len != 0))
|
||||
return (char *)hdr + *off;
|
||||
else
|
||||
return NULL;
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Calculate the size of the SMB message based on the fixed header
|
||||
* portion, the number of word parameters and the data portion of the message.
|
||||
*/
|
||||
static unsigned int smb2_calc_size(void *buf)
|
||||
static int smb2_calc_size(void *buf, unsigned int *len)
|
||||
{
|
||||
struct smb2_pdu *pdu = (struct smb2_pdu *)buf;
|
||||
struct smb2_hdr *hdr = &pdu->hdr;
|
||||
int offset; /* the offset from the beginning of SMB to data area */
|
||||
int data_length; /* the length of the variable length data area */
|
||||
unsigned int offset; /* the offset from the beginning of SMB to data area */
|
||||
unsigned int data_length; /* the length of the variable length data area */
|
||||
int ret;
|
||||
|
||||
/* Structure Size has already been checked to make sure it is 64 */
|
||||
int len = le16_to_cpu(hdr->StructureSize);
|
||||
*len = le16_to_cpu(hdr->StructureSize);
|
||||
|
||||
/*
|
||||
* StructureSize2, ie length of fixed parameter area has already
|
||||
* been checked to make sure it is the correct length.
|
||||
*/
|
||||
len += le16_to_cpu(pdu->StructureSize2);
|
||||
*len += le16_to_cpu(pdu->StructureSize2);
|
||||
/*
|
||||
* StructureSize2 of smb2_lock pdu is set to 48, indicating
|
||||
* the size of smb2 lock request with single smb2_lock_element
|
||||
* regardless of number of locks. Subtract single
|
||||
* smb2_lock_element for correct buffer size check.
|
||||
*/
|
||||
if (hdr->Command == SMB2_LOCK)
|
||||
*len -= sizeof(struct smb2_lock_element);
|
||||
|
||||
if (has_smb2_data_area[le16_to_cpu(hdr->Command)] == false)
|
||||
goto calc_size_exit;
|
||||
|
||||
smb2_get_data_area_len(&offset, &data_length, hdr);
|
||||
ksmbd_debug(SMB, "SMB2 data length %d offset %d\n", data_length,
|
||||
ret = smb2_get_data_area_len(&offset, &data_length, hdr);
|
||||
if (ret)
|
||||
return ret;
|
||||
ksmbd_debug(SMB, "SMB2 data length %u offset %u\n", data_length,
|
||||
offset);
|
||||
|
||||
if (data_length > 0) {
|
||||
|
@ -237,16 +231,19 @@ static unsigned int smb2_calc_size(void *buf)
|
|||
* for some commands, typically those with odd StructureSize,
|
||||
* so we must add one to the calculation.
|
||||
*/
|
||||
if (offset + 1 < len)
|
||||
if (offset + 1 < *len) {
|
||||
ksmbd_debug(SMB,
|
||||
"data area offset %d overlaps SMB2 header %d\n",
|
||||
offset + 1, len);
|
||||
else
|
||||
len = offset + data_length;
|
||||
"data area offset %d overlaps SMB2 header %u\n",
|
||||
offset + 1, *len);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
*len = offset + data_length;
|
||||
}
|
||||
|
||||
calc_size_exit:
|
||||
ksmbd_debug(SMB, "SMB2 len %d\n", len);
|
||||
return len;
|
||||
ksmbd_debug(SMB, "SMB2 len %u\n", *len);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int smb2_query_info_req_len(struct smb2_query_info_req *h)
|
||||
|
@ -391,9 +388,11 @@ int ksmbd_smb2_check_message(struct ksmbd_work *work)
|
|||
return 1;
|
||||
}
|
||||
|
||||
clc_len = smb2_calc_size(hdr);
|
||||
if (smb2_calc_size(hdr, &clc_len))
|
||||
return 1;
|
||||
|
||||
if (len != clc_len) {
|
||||
/* server can return one byte more due to implied bcc[0] */
|
||||
/* client can return one byte more due to implied bcc[0] */
|
||||
if (clc_len == len + 1)
|
||||
return 0;
|
||||
|
||||
|
@ -418,9 +417,6 @@ int ksmbd_smb2_check_message(struct ksmbd_work *work)
|
|||
return 0;
|
||||
}
|
||||
|
||||
if (command == SMB2_LOCK_HE && len == 88)
|
||||
return 0;
|
||||
|
||||
ksmbd_debug(SMB,
|
||||
"cli req too short, len %d not %d. cmd:%d mid:%llu\n",
|
||||
len, clc_len, command,
|
||||
|
|
|
@ -187,11 +187,6 @@ static struct smb_version_cmds smb2_0_server_cmds[NUMBER_OF_SMB2_COMMANDS] = {
|
|||
[SMB2_CHANGE_NOTIFY_HE] = { .proc = smb2_notify},
|
||||
};
|
||||
|
||||
int init_smb2_0_server(struct ksmbd_conn *conn)
|
||||
{
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
/**
|
||||
* init_smb2_1_server() - initialize a smb server connection with smb2.1
|
||||
* command dispatcher
|
||||
|
|
|
@ -236,9 +236,6 @@ int init_smb2_neg_rsp(struct ksmbd_work *work)
|
|||
|
||||
if (conn->need_neg == false)
|
||||
return -EINVAL;
|
||||
if (!(conn->dialect >= SMB20_PROT_ID &&
|
||||
conn->dialect <= SMB311_PROT_ID))
|
||||
return -EINVAL;
|
||||
|
||||
rsp_hdr = work->response_buf;
|
||||
|
||||
|
@ -1166,13 +1163,6 @@ int smb2_handle_negotiate(struct ksmbd_work *work)
|
|||
case SMB21_PROT_ID:
|
||||
init_smb2_1_server(conn);
|
||||
break;
|
||||
case SMB20_PROT_ID:
|
||||
rc = init_smb2_0_server(conn);
|
||||
if (rc) {
|
||||
rsp->hdr.Status = STATUS_NOT_SUPPORTED;
|
||||
goto err_out;
|
||||
}
|
||||
break;
|
||||
case SMB2X_PROT_ID:
|
||||
case BAD_PROT_ID:
|
||||
default:
|
||||
|
@ -1191,11 +1181,9 @@ int smb2_handle_negotiate(struct ksmbd_work *work)
|
|||
rsp->MaxReadSize = cpu_to_le32(conn->vals->max_read_size);
|
||||
rsp->MaxWriteSize = cpu_to_le32(conn->vals->max_write_size);
|
||||
|
||||
if (conn->dialect > SMB20_PROT_ID) {
|
||||
memcpy(conn->ClientGUID, req->ClientGUID,
|
||||
SMB2_CLIENT_GUID_SIZE);
|
||||
conn->cli_sec_mode = le16_to_cpu(req->SecurityMode);
|
||||
}
|
||||
memcpy(conn->ClientGUID, req->ClientGUID,
|
||||
SMB2_CLIENT_GUID_SIZE);
|
||||
conn->cli_sec_mode = le16_to_cpu(req->SecurityMode);
|
||||
|
||||
rsp->StructureSize = cpu_to_le16(65);
|
||||
rsp->DialectRevision = cpu_to_le16(conn->dialect);
|
||||
|
@ -1537,11 +1525,9 @@ static int ntlm_authenticate(struct ksmbd_work *work)
|
|||
}
|
||||
}
|
||||
|
||||
if (conn->dialect > SMB20_PROT_ID) {
|
||||
if (!ksmbd_conn_lookup_dialect(conn)) {
|
||||
pr_err("fail to verify the dialect\n");
|
||||
return -ENOENT;
|
||||
}
|
||||
if (!ksmbd_conn_lookup_dialect(conn)) {
|
||||
pr_err("fail to verify the dialect\n");
|
||||
return -ENOENT;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
@ -1623,11 +1609,9 @@ static int krb5_authenticate(struct ksmbd_work *work)
|
|||
}
|
||||
}
|
||||
|
||||
if (conn->dialect > SMB20_PROT_ID) {
|
||||
if (!ksmbd_conn_lookup_dialect(conn)) {
|
||||
pr_err("fail to verify the dialect\n");
|
||||
return -ENOENT;
|
||||
}
|
||||
if (!ksmbd_conn_lookup_dialect(conn)) {
|
||||
pr_err("fail to verify the dialect\n");
|
||||
return -ENOENT;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
@ -5499,7 +5483,6 @@ static int set_file_basic_info(struct ksmbd_file *fp,
|
|||
struct ksmbd_share_config *share)
|
||||
{
|
||||
struct iattr attrs;
|
||||
struct timespec64 ctime;
|
||||
struct file *filp;
|
||||
struct inode *inode;
|
||||
struct user_namespace *user_ns;
|
||||
|
@ -5521,13 +5504,11 @@ static int set_file_basic_info(struct ksmbd_file *fp,
|
|||
attrs.ia_valid |= (ATTR_ATIME | ATTR_ATIME_SET);
|
||||
}
|
||||
|
||||
if (file_info->ChangeTime) {
|
||||
attrs.ia_valid |= ATTR_CTIME;
|
||||
if (file_info->ChangeTime)
|
||||
attrs.ia_ctime = ksmbd_NTtimeToUnix(file_info->ChangeTime);
|
||||
ctime = attrs.ia_ctime;
|
||||
attrs.ia_valid |= ATTR_CTIME;
|
||||
} else {
|
||||
ctime = inode->i_ctime;
|
||||
}
|
||||
else
|
||||
attrs.ia_ctime = inode->i_ctime;
|
||||
|
||||
if (file_info->LastWriteTime) {
|
||||
attrs.ia_mtime = ksmbd_NTtimeToUnix(file_info->LastWriteTime);
|
||||
|
@ -5573,11 +5554,9 @@ static int set_file_basic_info(struct ksmbd_file *fp,
|
|||
return -EACCES;
|
||||
|
||||
inode_lock(inode);
|
||||
inode->i_ctime = attrs.ia_ctime;
|
||||
attrs.ia_valid &= ~ATTR_CTIME;
|
||||
rc = notify_change(user_ns, dentry, &attrs, NULL);
|
||||
if (!rc) {
|
||||
inode->i_ctime = ctime;
|
||||
mark_inode_dirty(inode);
|
||||
}
|
||||
inode_unlock(inode);
|
||||
}
|
||||
return rc;
|
||||
|
@ -8411,20 +8390,18 @@ int smb3_decrypt_req(struct ksmbd_work *work)
|
|||
struct smb2_hdr *hdr;
|
||||
unsigned int pdu_length = get_rfc1002_len(buf);
|
||||
struct kvec iov[2];
|
||||
unsigned int buf_data_size = pdu_length + 4 -
|
||||
int buf_data_size = pdu_length + 4 -
|
||||
sizeof(struct smb2_transform_hdr);
|
||||
struct smb2_transform_hdr *tr_hdr = (struct smb2_transform_hdr *)buf;
|
||||
int rc = 0;
|
||||
|
||||
if (pdu_length + 4 <
|
||||
sizeof(struct smb2_transform_hdr) + sizeof(struct smb2_hdr)) {
|
||||
if (buf_data_size < sizeof(struct smb2_hdr)) {
|
||||
pr_err("Transform message is too small (%u)\n",
|
||||
pdu_length);
|
||||
return -ECONNABORTED;
|
||||
}
|
||||
|
||||
if (pdu_length + 4 <
|
||||
le32_to_cpu(tr_hdr->OriginalMessageSize) + sizeof(struct smb2_transform_hdr)) {
|
||||
if (buf_data_size < le32_to_cpu(tr_hdr->OriginalMessageSize)) {
|
||||
pr_err("Transform message is broken\n");
|
||||
return -ECONNABORTED;
|
||||
}
|
||||
|
|
|
@ -1637,7 +1637,6 @@ struct smb2_posix_info {
|
|||
} __packed;
|
||||
|
||||
/* functions */
|
||||
int init_smb2_0_server(struct ksmbd_conn *conn);
|
||||
void init_smb2_1_server(struct ksmbd_conn *conn);
|
||||
void init_smb3_0_server(struct ksmbd_conn *conn);
|
||||
void init_smb3_02_server(struct ksmbd_conn *conn);
|
||||
|
|
|
@ -21,7 +21,6 @@ static const char basechars[43] = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ_-!@#$%";
|
|||
#define MAGIC_CHAR '~'
|
||||
#define PERIOD '.'
|
||||
#define mangle(V) ((char)(basechars[(V) % MANGLE_BASE]))
|
||||
#define KSMBD_MIN_SUPPORTED_HEADER_SIZE (sizeof(struct smb2_hdr))
|
||||
|
||||
struct smb_protocol {
|
||||
int index;
|
||||
|
@ -89,7 +88,7 @@ unsigned int ksmbd_server_side_copy_max_total_size(void)
|
|||
|
||||
inline int ksmbd_min_protocol(void)
|
||||
{
|
||||
return SMB2_PROT;
|
||||
return SMB21_PROT;
|
||||
}
|
||||
|
||||
inline int ksmbd_max_protocol(void)
|
||||
|
@ -294,11 +293,6 @@ int ksmbd_init_smb_server(struct ksmbd_work *work)
|
|||
return 0;
|
||||
}
|
||||
|
||||
bool ksmbd_pdu_size_has_room(unsigned int pdu)
|
||||
{
|
||||
return (pdu >= KSMBD_MIN_SUPPORTED_HEADER_SIZE - 4);
|
||||
}
|
||||
|
||||
int ksmbd_populate_dot_dotdot_entries(struct ksmbd_work *work, int info_level,
|
||||
struct ksmbd_file *dir,
|
||||
struct ksmbd_dir_info *d_info,
|
||||
|
@ -433,7 +427,7 @@ int ksmbd_extract_shortname(struct ksmbd_conn *conn, const char *longname,
|
|||
|
||||
static int __smb2_negotiate(struct ksmbd_conn *conn)
|
||||
{
|
||||
return (conn->dialect >= SMB20_PROT_ID &&
|
||||
return (conn->dialect >= SMB21_PROT_ID &&
|
||||
conn->dialect <= SMB311_PROT_ID);
|
||||
}
|
||||
|
||||
|
@ -463,7 +457,7 @@ int ksmbd_smb_negotiate_common(struct ksmbd_work *work, unsigned int command)
|
|||
}
|
||||
}
|
||||
|
||||
if (command == SMB2_NEGOTIATE_HE) {
|
||||
if (command == SMB2_NEGOTIATE_HE && __smb2_negotiate(conn)) {
|
||||
ret = smb2_handle_negotiate(work);
|
||||
init_smb2_neg_rsp(work);
|
||||
return ret;
|
||||
|
|
|
@ -48,6 +48,8 @@
|
|||
#define CIFS_DEFAULT_IOSIZE (64 * 1024)
|
||||
#define MAX_CIFS_SMALL_BUFFER_SIZE 448 /* big enough for most */
|
||||
|
||||
#define MAX_STREAM_PROT_LEN 0x00FFFFFF
|
||||
|
||||
/* Responses when opening a file. */
|
||||
#define F_SUPERSEDED 0
|
||||
#define F_OPENED 1
|
||||
|
@ -493,8 +495,6 @@ int ksmbd_lookup_dialect_by_id(__le16 *cli_dialects, __le16 dialects_count);
|
|||
|
||||
int ksmbd_init_smb_server(struct ksmbd_work *work);
|
||||
|
||||
bool ksmbd_pdu_size_has_room(unsigned int pdu);
|
||||
|
||||
struct ksmbd_kstat;
|
||||
int ksmbd_populate_dot_dotdot_entries(struct ksmbd_work *work,
|
||||
int info_level,
|
||||
|
|
|
@ -82,10 +82,8 @@ cat << EOF
|
|||
#define __IGNORE_truncate64
|
||||
#define __IGNORE_stat64
|
||||
#define __IGNORE_lstat64
|
||||
#define __IGNORE_fstat64
|
||||
#define __IGNORE_fcntl64
|
||||
#define __IGNORE_fadvise64_64
|
||||
#define __IGNORE_fstatat64
|
||||
#define __IGNORE_fstatfs64
|
||||
#define __IGNORE_statfs64
|
||||
#define __IGNORE_llseek
|
||||
|
@ -253,6 +251,10 @@ cat << EOF
|
|||
#define __IGNORE_getpmsg
|
||||
#define __IGNORE_putpmsg
|
||||
#define __IGNORE_vserver
|
||||
|
||||
/* 64-bit ports never needed these, and new 32-bit ports can use statx */
|
||||
#define __IGNORE_fstat64
|
||||
#define __IGNORE_fstatat64
|
||||
EOF
|
||||
}
|
||||
|
||||
|
|
|
@ -684,7 +684,7 @@ static int elf_add_alternative(struct elf *elf,
|
|||
sec = find_section_by_name(elf, ".altinstructions");
|
||||
if (!sec) {
|
||||
sec = elf_create_section(elf, ".altinstructions",
|
||||
SHF_ALLOC, size, 0);
|
||||
SHF_ALLOC, 0, 0);
|
||||
|
||||
if (!sec) {
|
||||
WARN_ELF("elf_create_section");
|
||||
|
|
|
@ -292,7 +292,7 @@ static int decode_instructions(struct objtool_file *file)
|
|||
!strcmp(sec->name, ".entry.text"))
|
||||
sec->noinstr = true;
|
||||
|
||||
for (offset = 0; offset < sec->len; offset += insn->len) {
|
||||
for (offset = 0; offset < sec->sh.sh_size; offset += insn->len) {
|
||||
insn = malloc(sizeof(*insn));
|
||||
if (!insn) {
|
||||
WARN("malloc failed");
|
||||
|
@ -307,7 +307,7 @@ static int decode_instructions(struct objtool_file *file)
|
|||
insn->offset = offset;
|
||||
|
||||
ret = arch_decode_instruction(file->elf, sec, offset,
|
||||
sec->len - offset,
|
||||
sec->sh.sh_size - offset,
|
||||
&insn->len, &insn->type,
|
||||
&insn->immediate,
|
||||
&insn->stack_ops);
|
||||
|
@ -349,9 +349,9 @@ static struct instruction *find_last_insn(struct objtool_file *file,
|
|||
{
|
||||
struct instruction *insn = NULL;
|
||||
unsigned int offset;
|
||||
unsigned int end = (sec->len > 10) ? sec->len - 10 : 0;
|
||||
unsigned int end = (sec->sh.sh_size > 10) ? sec->sh.sh_size - 10 : 0;
|
||||
|
||||
for (offset = sec->len - 1; offset >= end && !insn; offset--)
|
||||
for (offset = sec->sh.sh_size - 1; offset >= end && !insn; offset--)
|
||||
insn = find_insn(file, sec, offset);
|
||||
|
||||
return insn;
|
||||
|
@ -389,7 +389,7 @@ static int add_dead_ends(struct objtool_file *file)
|
|||
insn = find_insn(file, reloc->sym->sec, reloc->addend);
|
||||
if (insn)
|
||||
insn = list_prev_entry(insn, list);
|
||||
else if (reloc->addend == reloc->sym->sec->len) {
|
||||
else if (reloc->addend == reloc->sym->sec->sh.sh_size) {
|
||||
insn = find_last_insn(file, reloc->sym->sec);
|
||||
if (!insn) {
|
||||
WARN("can't find unreachable insn at %s+0x%x",
|
||||
|
@ -424,7 +424,7 @@ static int add_dead_ends(struct objtool_file *file)
|
|||
insn = find_insn(file, reloc->sym->sec, reloc->addend);
|
||||
if (insn)
|
||||
insn = list_prev_entry(insn, list);
|
||||
else if (reloc->addend == reloc->sym->sec->len) {
|
||||
else if (reloc->addend == reloc->sym->sec->sh.sh_size) {
|
||||
insn = find_last_insn(file, reloc->sym->sec);
|
||||
if (!insn) {
|
||||
WARN("can't find reachable insn at %s+0x%x",
|
||||
|
@ -1561,14 +1561,14 @@ static int read_unwind_hints(struct objtool_file *file)
|
|||
return -1;
|
||||
}
|
||||
|
||||
if (sec->len % sizeof(struct unwind_hint)) {
|
||||
if (sec->sh.sh_size % sizeof(struct unwind_hint)) {
|
||||
WARN("struct unwind_hint size mismatch");
|
||||
return -1;
|
||||
}
|
||||
|
||||
file->hints = true;
|
||||
|
||||
for (i = 0; i < sec->len / sizeof(struct unwind_hint); i++) {
|
||||
for (i = 0; i < sec->sh.sh_size / sizeof(struct unwind_hint); i++) {
|
||||
hint = (struct unwind_hint *)sec->data->d_buf + i;
|
||||
|
||||
reloc = find_reloc_by_dest(file->elf, sec, i * sizeof(*hint));
|
||||
|
|
|
@ -286,10 +286,9 @@ static int read_sections(struct elf *elf)
|
|||
return -1;
|
||||
}
|
||||
}
|
||||
sec->len = sec->sh.sh_size;
|
||||
|
||||
if (sec->sh.sh_flags & SHF_EXECINSTR)
|
||||
elf->text_size += sec->len;
|
||||
elf->text_size += sec->sh.sh_size;
|
||||
|
||||
list_add_tail(&sec->list, &elf->sections);
|
||||
elf_hash_add(section, &sec->hash, sec->idx);
|
||||
|
@ -734,8 +733,8 @@ static int elf_add_string(struct elf *elf, struct section *strtab, char *str)
|
|||
data->d_size = strlen(str) + 1;
|
||||
data->d_align = 1;
|
||||
|
||||
len = strtab->len;
|
||||
strtab->len += data->d_size;
|
||||
len = strtab->sh.sh_size;
|
||||
strtab->sh.sh_size += data->d_size;
|
||||
strtab->changed = true;
|
||||
|
||||
return len;
|
||||
|
@ -790,9 +789,9 @@ struct symbol *elf_create_undef_symbol(struct elf *elf, const char *name)
|
|||
data->d_align = 1;
|
||||
data->d_type = ELF_T_SYM;
|
||||
|
||||
sym->idx = symtab->len / sizeof(sym->sym);
|
||||
sym->idx = symtab->sh.sh_size / sizeof(sym->sym);
|
||||
|
||||
symtab->len += data->d_size;
|
||||
symtab->sh.sh_size += data->d_size;
|
||||
symtab->changed = true;
|
||||
|
||||
symtab_shndx = find_section_by_name(elf, ".symtab_shndx");
|
||||
|
@ -814,7 +813,7 @@ struct symbol *elf_create_undef_symbol(struct elf *elf, const char *name)
|
|||
data->d_align = 4;
|
||||
data->d_type = ELF_T_WORD;
|
||||
|
||||
symtab_shndx->len += 4;
|
||||
symtab_shndx->sh.sh_size += 4;
|
||||
symtab_shndx->changed = true;
|
||||
}
|
||||
|
||||
|
@ -855,7 +854,6 @@ struct section *elf_create_section(struct elf *elf, const char *name,
|
|||
}
|
||||
|
||||
sec->idx = elf_ndxscn(s);
|
||||
sec->len = size;
|
||||
sec->changed = true;
|
||||
|
||||
sec->data = elf_newdata(s);
|
||||
|
|
|
@ -38,7 +38,6 @@ struct section {
|
|||
Elf_Data *data;
|
||||
char *name;
|
||||
int idx;
|
||||
unsigned int len;
|
||||
bool changed, text, rodata, noinstr;
|
||||
};
|
||||
|
||||
|
|
|
@ -204,7 +204,7 @@ int orc_create(struct objtool_file *file)
|
|||
|
||||
/* Add a section terminator */
|
||||
if (!empty) {
|
||||
orc_list_add(&orc_list, &null, sec, sec->len);
|
||||
orc_list_add(&orc_list, &null, sec, sec->sh.sh_size);
|
||||
nr++;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -58,22 +58,11 @@ void __weak arch_handle_alternative(unsigned short feature, struct special_alt *
|
|||
{
|
||||
}
|
||||
|
||||
static bool reloc2sec_off(struct reloc *reloc, struct section **sec, unsigned long *off)
|
||||
static void reloc_to_sec_off(struct reloc *reloc, struct section **sec,
|
||||
unsigned long *off)
|
||||
{
|
||||
switch (reloc->sym->type) {
|
||||
case STT_FUNC:
|
||||
*sec = reloc->sym->sec;
|
||||
*off = reloc->sym->offset + reloc->addend;
|
||||
return true;
|
||||
|
||||
case STT_SECTION:
|
||||
*sec = reloc->sym->sec;
|
||||
*off = reloc->addend;
|
||||
return true;
|
||||
|
||||
default:
|
||||
return false;
|
||||
}
|
||||
*sec = reloc->sym->sec;
|
||||
*off = reloc->sym->offset + reloc->addend;
|
||||
}
|
||||
|
||||
static int get_alt_entry(struct elf *elf, struct special_entry *entry,
|
||||
|
@ -109,13 +98,8 @@ static int get_alt_entry(struct elf *elf, struct special_entry *entry,
|
|||
WARN_FUNC("can't find orig reloc", sec, offset + entry->orig);
|
||||
return -1;
|
||||
}
|
||||
if (!reloc2sec_off(orig_reloc, &alt->orig_sec, &alt->orig_off)) {
|
||||
WARN_FUNC("don't know how to handle reloc symbol type %d: %s",
|
||||
sec, offset + entry->orig,
|
||||
orig_reloc->sym->type,
|
||||
orig_reloc->sym->name);
|
||||
return -1;
|
||||
}
|
||||
|
||||
reloc_to_sec_off(orig_reloc, &alt->orig_sec, &alt->orig_off);
|
||||
|
||||
if (!entry->group || alt->new_len) {
|
||||
new_reloc = find_reloc_by_dest(elf, sec, offset + entry->new);
|
||||
|
@ -133,13 +117,7 @@ static int get_alt_entry(struct elf *elf, struct special_entry *entry,
|
|||
if (arch_is_retpoline(new_reloc->sym))
|
||||
return 1;
|
||||
|
||||
if (!reloc2sec_off(new_reloc, &alt->new_sec, &alt->new_off)) {
|
||||
WARN_FUNC("don't know how to handle reloc symbol type %d: %s",
|
||||
sec, offset + entry->new,
|
||||
new_reloc->sym->type,
|
||||
new_reloc->sym->name);
|
||||
return -1;
|
||||
}
|
||||
reloc_to_sec_off(new_reloc, &alt->new_sec, &alt->new_off);
|
||||
|
||||
/* _ASM_EXTABLE_EX hack */
|
||||
if (alt->new_off >= 0x7ffffff0)
|
||||
|
@ -181,13 +159,13 @@ int special_get_alts(struct elf *elf, struct list_head *alts)
|
|||
if (!sec)
|
||||
continue;
|
||||
|
||||
if (sec->len % entry->size != 0) {
|
||||
if (sec->sh.sh_size % entry->size != 0) {
|
||||
WARN("%s size not a multiple of %d",
|
||||
sec->name, entry->size);
|
||||
return -1;
|
||||
}
|
||||
|
||||
nr_entries = sec->len / entry->size;
|
||||
nr_entries = sec->sh.sh_size / entry->size;
|
||||
|
||||
for (idx = 0; idx < nr_entries; idx++) {
|
||||
alt = malloc(sizeof(*alt));
|
||||
|
|
Loading…
Reference in New Issue