mirror of https://gitee.com/openkylin/linux.git
Merge 5.0-rc6 into usb-next
We want the USB fixes in here, and this resolves a merge issue in the uas driver. Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
commit
629b911153
4
.mailmap
4
.mailmap
|
@ -169,6 +169,10 @@ Juha Yrjola <juha.yrjola@solidboot.com>
|
|||
Julien Thierry <julien.thierry.kdev@gmail.com> <julien.thierry@arm.com>
|
||||
Kamil Konieczny <k.konieczny@samsung.com> <k.konieczny@partner.samsung.com>
|
||||
Kay Sievers <kay.sievers@vrfy.org>
|
||||
Kees Cook <keescook@chromium.org> <kees.cook@canonical.com>
|
||||
Kees Cook <keescook@chromium.org> <keescook@google.com>
|
||||
Kees Cook <keescook@chromium.org> <kees@outflux.net>
|
||||
Kees Cook <keescook@chromium.org> <kees@ubuntu.com>
|
||||
Kenneth W Chen <kenneth.w.chen@intel.com>
|
||||
Konstantin Khlebnikov <koct9i@gmail.com> <khlebnikov@yandex-team.ru>
|
||||
Konstantin Khlebnikov <koct9i@gmail.com> <k.khlebnikov@samsung.com>
|
||||
|
|
|
@ -22,7 +22,7 @@
|
|||
| nios2: | TODO |
|
||||
| openrisc: | TODO |
|
||||
| parisc: | TODO |
|
||||
| powerpc: | ok |
|
||||
| powerpc: | TODO |
|
||||
| riscv: | ok |
|
||||
| s390: | ok |
|
||||
| sh: | TODO |
|
||||
|
|
|
@ -6180,7 +6180,7 @@ F: Documentation/devicetree/bindings/edac/aspeed-sdram-edac.txt
|
|||
F: drivers/edac/aspeed_edac.c
|
||||
|
||||
EDAC-BLUEFIELD
|
||||
M: Shravan Kumar Ramani <sramani@nvidia.com>
|
||||
M: Shravan Kumar Ramani <shravankr@nvidia.com>
|
||||
S: Supported
|
||||
F: drivers/edac/bluefield_edac.c
|
||||
|
||||
|
@ -9251,7 +9251,7 @@ F: drivers/firmware/iscsi_ibft*
|
|||
|
||||
ISCSI EXTENSIONS FOR RDMA (ISER) INITIATOR
|
||||
M: Sagi Grimberg <sagi@grimberg.me>
|
||||
M: Max Gurtovoy <maxg@nvidia.com>
|
||||
M: Max Gurtovoy <mgurtovoy@nvidia.com>
|
||||
L: linux-rdma@vger.kernel.org
|
||||
S: Supported
|
||||
W: http://www.openfabrics.org
|
||||
|
|
2
Makefile
2
Makefile
|
@ -2,7 +2,7 @@
|
|||
VERSION = 5
|
||||
PATCHLEVEL = 9
|
||||
SUBLEVEL = 0
|
||||
EXTRAVERSION = -rc5
|
||||
EXTRAVERSION = -rc6
|
||||
NAME = Kleptomaniac Octopus
|
||||
|
||||
# *DOCUMENTATION*
|
||||
|
|
|
@ -910,8 +910,12 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
|
|||
.desc = "ARM erratum 1418040",
|
||||
.capability = ARM64_WORKAROUND_1418040,
|
||||
ERRATA_MIDR_RANGE_LIST(erratum_1418040_list),
|
||||
.type = (ARM64_CPUCAP_SCOPE_LOCAL_CPU |
|
||||
ARM64_CPUCAP_PERMITTED_FOR_LATE_CPU),
|
||||
/*
|
||||
* We need to allow affected CPUs to come in late, but
|
||||
* also need the non-affected CPUs to be able to come
|
||||
* in at any point in time. Wonderful.
|
||||
*/
|
||||
.type = ARM64_CPUCAP_WEAK_LOCAL_CPU_FEATURE,
|
||||
},
|
||||
#endif
|
||||
#ifdef CONFIG_ARM64_WORKAROUND_SPECULATIVE_AT
|
||||
|
|
|
@ -50,16 +50,19 @@ static u64 pv_steal_clock(int cpu)
|
|||
struct pv_time_stolen_time_region *reg;
|
||||
|
||||
reg = per_cpu_ptr(&stolen_time_region, cpu);
|
||||
if (!reg->kaddr) {
|
||||
pr_warn_once("stolen time enabled but not configured for cpu %d\n",
|
||||
cpu);
|
||||
|
||||
/*
|
||||
* paravirt_steal_clock() may be called before the CPU
|
||||
* online notification callback runs. Until the callback
|
||||
* has run we just return zero.
|
||||
*/
|
||||
if (!reg->kaddr)
|
||||
return 0;
|
||||
}
|
||||
|
||||
return le64_to_cpu(READ_ONCE(reg->kaddr->stolen_time));
|
||||
}
|
||||
|
||||
static int stolen_time_dying_cpu(unsigned int cpu)
|
||||
static int stolen_time_cpu_down_prepare(unsigned int cpu)
|
||||
{
|
||||
struct pv_time_stolen_time_region *reg;
|
||||
|
||||
|
@ -73,7 +76,7 @@ static int stolen_time_dying_cpu(unsigned int cpu)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int init_stolen_time_cpu(unsigned int cpu)
|
||||
static int stolen_time_cpu_online(unsigned int cpu)
|
||||
{
|
||||
struct pv_time_stolen_time_region *reg;
|
||||
struct arm_smccc_res res;
|
||||
|
@ -103,19 +106,20 @@ static int init_stolen_time_cpu(unsigned int cpu)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int pv_time_init_stolen_time(void)
|
||||
static int __init pv_time_init_stolen_time(void)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = cpuhp_setup_state(CPUHP_AP_ARM_KVMPV_STARTING,
|
||||
"hypervisor/arm/pvtime:starting",
|
||||
init_stolen_time_cpu, stolen_time_dying_cpu);
|
||||
ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN,
|
||||
"hypervisor/arm/pvtime:online",
|
||||
stolen_time_cpu_online,
|
||||
stolen_time_cpu_down_prepare);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static bool has_pv_steal_clock(void)
|
||||
static bool __init has_pv_steal_clock(void)
|
||||
{
|
||||
struct arm_smccc_res res;
|
||||
|
||||
|
|
|
@ -143,14 +143,17 @@ static inline void emit_addr_mov_i64(const int reg, const u64 val,
|
|||
}
|
||||
}
|
||||
|
||||
static inline int bpf2a64_offset(int bpf_to, int bpf_from,
|
||||
static inline int bpf2a64_offset(int bpf_insn, int off,
|
||||
const struct jit_ctx *ctx)
|
||||
{
|
||||
int to = ctx->offset[bpf_to];
|
||||
/* -1 to account for the Branch instruction */
|
||||
int from = ctx->offset[bpf_from] - 1;
|
||||
|
||||
return to - from;
|
||||
/* BPF JMP offset is relative to the next instruction */
|
||||
bpf_insn++;
|
||||
/*
|
||||
* Whereas arm64 branch instructions encode the offset
|
||||
* from the branch itself, so we must subtract 1 from the
|
||||
* instruction offset.
|
||||
*/
|
||||
return ctx->offset[bpf_insn + off] - (ctx->offset[bpf_insn] - 1);
|
||||
}
|
||||
|
||||
static void jit_fill_hole(void *area, unsigned int size)
|
||||
|
@ -642,7 +645,7 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
|
|||
|
||||
/* JUMP off */
|
||||
case BPF_JMP | BPF_JA:
|
||||
jmp_offset = bpf2a64_offset(i + off, i, ctx);
|
||||
jmp_offset = bpf2a64_offset(i, off, ctx);
|
||||
check_imm26(jmp_offset);
|
||||
emit(A64_B(jmp_offset), ctx);
|
||||
break;
|
||||
|
@ -669,7 +672,7 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
|
|||
case BPF_JMP32 | BPF_JSLE | BPF_X:
|
||||
emit(A64_CMP(is64, dst, src), ctx);
|
||||
emit_cond_jmp:
|
||||
jmp_offset = bpf2a64_offset(i + off, i, ctx);
|
||||
jmp_offset = bpf2a64_offset(i, off, ctx);
|
||||
check_imm19(jmp_offset);
|
||||
switch (BPF_OP(code)) {
|
||||
case BPF_JEQ:
|
||||
|
@ -908,10 +911,21 @@ static int build_body(struct jit_ctx *ctx, bool extra_pass)
|
|||
const struct bpf_prog *prog = ctx->prog;
|
||||
int i;
|
||||
|
||||
/*
|
||||
* - offset[0] offset of the end of prologue,
|
||||
* start of the 1st instruction.
|
||||
* - offset[1] - offset of the end of 1st instruction,
|
||||
* start of the 2nd instruction
|
||||
* [....]
|
||||
* - offset[3] - offset of the end of 3rd instruction,
|
||||
* start of 4th instruction
|
||||
*/
|
||||
for (i = 0; i < prog->len; i++) {
|
||||
const struct bpf_insn *insn = &prog->insnsi[i];
|
||||
int ret;
|
||||
|
||||
if (ctx->image == NULL)
|
||||
ctx->offset[i] = ctx->idx;
|
||||
ret = build_insn(insn, ctx, extra_pass);
|
||||
if (ret > 0) {
|
||||
i++;
|
||||
|
@ -919,11 +933,16 @@ static int build_body(struct jit_ctx *ctx, bool extra_pass)
|
|||
ctx->offset[i] = ctx->idx;
|
||||
continue;
|
||||
}
|
||||
if (ctx->image == NULL)
|
||||
ctx->offset[i] = ctx->idx;
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
/*
|
||||
* offset is allocated with prog->len + 1 so fill in
|
||||
* the last element with the offset after the last
|
||||
* instruction (end of program)
|
||||
*/
|
||||
if (ctx->image == NULL)
|
||||
ctx->offset[i] = ctx->idx;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -1002,7 +1021,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
|
|||
memset(&ctx, 0, sizeof(ctx));
|
||||
ctx.prog = prog;
|
||||
|
||||
ctx.offset = kcalloc(prog->len, sizeof(int), GFP_KERNEL);
|
||||
ctx.offset = kcalloc(prog->len + 1, sizeof(int), GFP_KERNEL);
|
||||
if (ctx.offset == NULL) {
|
||||
prog = orig_prog;
|
||||
goto out_off;
|
||||
|
@ -1089,7 +1108,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
|
|||
prog->jited_len = prog_size;
|
||||
|
||||
if (!prog->is_func || extra_pass) {
|
||||
bpf_prog_fill_jited_linfo(prog, ctx.offset);
|
||||
bpf_prog_fill_jited_linfo(prog, ctx.offset + 1);
|
||||
out_off:
|
||||
kfree(ctx.offset);
|
||||
kfree(jit_data);
|
||||
|
|
|
@ -74,8 +74,6 @@ static inline void arch_acpi_set_pdc_bits(u32 *buf)
|
|||
buf[2] |= ACPI_PDC_EST_CAPABILITY_SMP;
|
||||
}
|
||||
|
||||
#define acpi_unlazy_tlb(x)
|
||||
|
||||
#ifdef CONFIG_ACPI_NUMA
|
||||
extern cpumask_t early_cpu_possible_map;
|
||||
#define for_each_possible_early_cpu(cpu) \
|
||||
|
|
|
@ -877,6 +877,7 @@ config SNI_RM
|
|||
select I8253
|
||||
select I8259
|
||||
select ISA
|
||||
select MIPS_L1_CACHE_SHIFT_6
|
||||
select SWAP_IO_SPACE if CPU_BIG_ENDIAN
|
||||
select SYS_HAS_CPU_R4X00
|
||||
select SYS_HAS_CPU_R5000
|
||||
|
|
|
@ -143,7 +143,10 @@ static struct platform_device sc26xx_pdev = {
|
|||
},
|
||||
};
|
||||
|
||||
static u32 a20r_ack_hwint(void)
|
||||
/*
|
||||
* Trigger chipset to update CPU's CAUSE IP field
|
||||
*/
|
||||
static u32 a20r_update_cause_ip(void)
|
||||
{
|
||||
u32 status = read_c0_status();
|
||||
|
||||
|
@ -205,12 +208,14 @@ static void a20r_hwint(void)
|
|||
int irq;
|
||||
|
||||
clear_c0_status(IE_IRQ0);
|
||||
status = a20r_ack_hwint();
|
||||
status = a20r_update_cause_ip();
|
||||
cause = read_c0_cause();
|
||||
|
||||
irq = ffs(((cause & status) >> 8) & 0xf8);
|
||||
if (likely(irq > 0))
|
||||
do_IRQ(SNI_A20R_IRQ_BASE + irq - 1);
|
||||
|
||||
a20r_update_cause_ip();
|
||||
set_c0_status(IE_IRQ0);
|
||||
}
|
||||
|
||||
|
|
|
@ -116,7 +116,6 @@ config PPC
|
|||
#
|
||||
select ARCH_32BIT_OFF_T if PPC32
|
||||
select ARCH_HAS_DEBUG_VIRTUAL
|
||||
select ARCH_HAS_DEBUG_VM_PGTABLE
|
||||
select ARCH_HAS_DEVMEM_IS_ALLOWED
|
||||
select ARCH_HAS_ELF_RANDOMIZE
|
||||
select ARCH_HAS_FORTIFY_SOURCE
|
||||
|
|
|
@ -108,7 +108,6 @@ CONFIG_FB_NVIDIA=y
|
|||
CONFIG_FB_NVIDIA_I2C=y
|
||||
CONFIG_FB_RADEON=y
|
||||
# CONFIG_LCD_CLASS_DEVICE is not set
|
||||
CONFIG_VGACON_SOFT_SCROLLBACK=y
|
||||
CONFIG_LOGO=y
|
||||
CONFIG_SOUND=y
|
||||
CONFIG_SND=y
|
||||
|
|
|
@ -743,7 +743,6 @@ CONFIG_FB_TRIDENT=m
|
|||
CONFIG_FB_SM501=m
|
||||
CONFIG_FB_IBM_GXT4500=y
|
||||
CONFIG_LCD_PLATFORM=m
|
||||
CONFIG_VGACON_SOFT_SCROLLBACK=y
|
||||
CONFIG_FRAMEBUFFER_CONSOLE=y
|
||||
CONFIG_FRAMEBUFFER_CONSOLE_ROTATION=y
|
||||
CONFIG_LOGO=y
|
||||
|
|
|
@ -239,14 +239,14 @@ static inline void early_init_mmu_secondary(void)
|
|||
|
||||
extern void hash__setup_initial_memory_limit(phys_addr_t first_memblock_base,
|
||||
phys_addr_t first_memblock_size);
|
||||
extern void radix__setup_initial_memory_limit(phys_addr_t first_memblock_base,
|
||||
phys_addr_t first_memblock_size);
|
||||
static inline void setup_initial_memory_limit(phys_addr_t first_memblock_base,
|
||||
phys_addr_t first_memblock_size)
|
||||
{
|
||||
if (early_radix_enabled())
|
||||
return radix__setup_initial_memory_limit(first_memblock_base,
|
||||
first_memblock_size);
|
||||
/*
|
||||
* Hash has more strict restrictions. At this point we don't
|
||||
* know which translations we will pick. Hence go with hash
|
||||
* restrictions.
|
||||
*/
|
||||
return hash__setup_initial_memory_limit(first_memblock_base,
|
||||
first_memblock_size);
|
||||
}
|
||||
|
|
|
@ -120,7 +120,8 @@ u64 dma_iommu_get_required_mask(struct device *dev)
|
|||
if (!tbl)
|
||||
return 0;
|
||||
|
||||
mask = 1ULL < (fls_long(tbl->it_offset + tbl->it_size) - 1);
|
||||
mask = 1ULL << (fls_long(tbl->it_offset + tbl->it_size) +
|
||||
tbl->it_page_shift - 1);
|
||||
mask += mask - 1;
|
||||
|
||||
return mask;
|
||||
|
|
|
@ -50,7 +50,7 @@ $(obj-vdso32): %.o: %.S FORCE
|
|||
|
||||
# actual build commands
|
||||
quiet_cmd_vdso32ld = VDSO32L $@
|
||||
cmd_vdso32ld = $(VDSOCC) $(c_flags) $(CC32FLAGS) -o $@ $(call cc-ldoption, -Wl$(comma)--orphan-handling=warn) -Wl,-T$(filter %.lds,$^) $(filter %.o,$^)
|
||||
cmd_vdso32ld = $(VDSOCC) $(c_flags) $(CC32FLAGS) -o $@ -Wl,-T$(filter %.lds,$^) $(filter %.o,$^)
|
||||
quiet_cmd_vdso32as = VDSO32A $@
|
||||
cmd_vdso32as = $(VDSOCC) $(a_flags) $(CC32FLAGS) -c -o $@ $<
|
||||
|
||||
|
|
|
@ -111,7 +111,6 @@ SECTIONS
|
|||
*(.note.GNU-stack)
|
||||
*(.data .data.* .gnu.linkonce.d.* .sdata*)
|
||||
*(.bss .sbss .dynbss .dynsbss)
|
||||
*(.glink .iplt .plt .rela*)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -34,7 +34,7 @@ $(obj)/%.so: $(obj)/%.so.dbg FORCE
|
|||
|
||||
# actual build commands
|
||||
quiet_cmd_vdso64ld = VDSO64L $@
|
||||
cmd_vdso64ld = $(CC) $(c_flags) -o $@ -Wl,-T$(filter %.lds,$^) $(filter %.o,$^) $(call cc-ldoption, -Wl$(comma)--orphan-handling=warn)
|
||||
cmd_vdso64ld = $(CC) $(c_flags) -o $@ -Wl,-T$(filter %.lds,$^) $(filter %.o,$^)
|
||||
|
||||
# install commands for the unstripped file
|
||||
quiet_cmd_vdso_install = INSTALL $@
|
||||
|
|
|
@ -30,7 +30,7 @@ SECTIONS
|
|||
. = ALIGN(16);
|
||||
.text : {
|
||||
*(.text .stub .text.* .gnu.linkonce.t.* __ftr_alt_*)
|
||||
*(.sfpr)
|
||||
*(.sfpr .glink)
|
||||
} :text
|
||||
PROVIDE(__etext = .);
|
||||
PROVIDE(_etext = .);
|
||||
|
@ -111,7 +111,6 @@ SECTIONS
|
|||
*(.branch_lt)
|
||||
*(.data .data.* .gnu.linkonce.d.* .sdata*)
|
||||
*(.bss .sbss .dynbss .dynsbss)
|
||||
*(.glink .iplt .plt .rela*)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -734,21 +734,6 @@ void radix__mmu_cleanup_all(void)
|
|||
}
|
||||
}
|
||||
|
||||
void radix__setup_initial_memory_limit(phys_addr_t first_memblock_base,
|
||||
phys_addr_t first_memblock_size)
|
||||
{
|
||||
/*
|
||||
* We don't currently support the first MEMBLOCK not mapping 0
|
||||
* physical on those processors
|
||||
*/
|
||||
BUG_ON(first_memblock_base != 0);
|
||||
|
||||
/*
|
||||
* Radix mode is not limited by RMA / VRMA addressing.
|
||||
*/
|
||||
ppc64_rma_size = ULONG_MAX;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_MEMORY_HOTPLUG
|
||||
static void free_pte_table(pte_t *pte_start, pmd_t *pmd)
|
||||
{
|
||||
|
|
|
@ -433,9 +433,16 @@ void __init mmu_early_init_devtree(void)
|
|||
if (!(mfmsr() & MSR_HV))
|
||||
early_check_vec5();
|
||||
|
||||
if (early_radix_enabled())
|
||||
if (early_radix_enabled()) {
|
||||
radix__early_init_devtree();
|
||||
else
|
||||
/*
|
||||
* We have finalized the translation we are going to use by now.
|
||||
* Radix mode is not limited by RMA / VRMA addressing.
|
||||
* Hence don't limit memblock allocations.
|
||||
*/
|
||||
ppc64_rma_size = ULONG_MAX;
|
||||
memblock_set_current_limit(MEMBLOCK_ALLOC_ANYWHERE);
|
||||
} else
|
||||
hash__early_init_devtree();
|
||||
}
|
||||
#endif /* CONFIG_PPC_BOOK3S_64 */
|
||||
|
|
|
@ -822,7 +822,7 @@ static ssize_t perf_stats_show(struct device *dev,
|
|||
kfree(stats);
|
||||
return rc ? rc : seq_buf_used(&s);
|
||||
}
|
||||
DEVICE_ATTR_RO(perf_stats);
|
||||
DEVICE_ATTR_ADMIN_RO(perf_stats);
|
||||
|
||||
static ssize_t flags_show(struct device *dev,
|
||||
struct device_attribute *attr, char *buf)
|
||||
|
|
|
@ -32,6 +32,7 @@ config RISCV
|
|||
select ARCH_WANT_FRAME_POINTERS
|
||||
select ARCH_WANT_HUGE_PMD_SHARE if 64BIT
|
||||
select CLONE_BACKWARDS
|
||||
select CLINT_TIMER if !MMU
|
||||
select COMMON_CLK
|
||||
select EDAC_SUPPORT
|
||||
select GENERIC_ARCH_TOPOLOGY if SMP
|
||||
|
|
|
@ -95,10 +95,12 @@ sysctl: sysctl@50440000 {
|
|||
#clock-cells = <1>;
|
||||
};
|
||||
|
||||
clint0: interrupt-controller@2000000 {
|
||||
clint0: clint@2000000 {
|
||||
#interrupt-cells = <1>;
|
||||
compatible = "riscv,clint0";
|
||||
reg = <0x2000000 0xC000>;
|
||||
interrupts-extended = <&cpu0_intc 3>, <&cpu1_intc 3>;
|
||||
interrupts-extended = <&cpu0_intc 3 &cpu0_intc 7
|
||||
&cpu1_intc 3 &cpu1_intc 7>;
|
||||
clocks = <&sysctl K210_CLK_ACLK>;
|
||||
};
|
||||
|
||||
|
|
|
@ -0,0 +1,26 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* Copyright (C) 2020 Google, Inc
|
||||
*/
|
||||
|
||||
#ifndef _ASM_RISCV_CLINT_H
|
||||
#define _ASM_RISCV_CLINT_H
|
||||
|
||||
#include <linux/types.h>
|
||||
#include <asm/mmio.h>
|
||||
|
||||
#ifdef CONFIG_RISCV_M_MODE
|
||||
/*
|
||||
* This lives in the CLINT driver, but is accessed directly by timex.h to avoid
|
||||
* any overhead when accessing the MMIO timer.
|
||||
*
|
||||
* The ISA defines mtime as a 64-bit memory-mapped register that increments at
|
||||
* a constant frequency, but it doesn't define some other constraints we depend
|
||||
* on (most notably ordering constraints, but also some simpler stuff like the
|
||||
* memory layout). Thus, this is called "clint_time_val" instead of something
|
||||
* like "riscv_mtime", to signify that these non-ISA assumptions must hold.
|
||||
*/
|
||||
extern u64 __iomem *clint_time_val;
|
||||
#endif
|
||||
|
||||
#endif
|
|
@ -66,6 +66,13 @@ do { \
|
|||
* Let auipc+jalr be the basic *mcount unit*, so we make it 8 bytes here.
|
||||
*/
|
||||
#define MCOUNT_INSN_SIZE 8
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
struct dyn_ftrace;
|
||||
int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec);
|
||||
#define ftrace_init_nop ftrace_init_nop
|
||||
#endif
|
||||
|
||||
#endif
|
||||
|
||||
#endif /* _ASM_RISCV_FTRACE_H */
|
||||
|
|
|
@ -10,6 +10,31 @@
|
|||
|
||||
typedef unsigned long cycles_t;
|
||||
|
||||
#ifdef CONFIG_RISCV_M_MODE
|
||||
|
||||
#include <asm/clint.h>
|
||||
|
||||
#ifdef CONFIG_64BIT
|
||||
static inline cycles_t get_cycles(void)
|
||||
{
|
||||
return readq_relaxed(clint_time_val);
|
||||
}
|
||||
#else /* !CONFIG_64BIT */
|
||||
static inline u32 get_cycles(void)
|
||||
{
|
||||
return readl_relaxed(((u32 *)clint_time_val));
|
||||
}
|
||||
#define get_cycles get_cycles
|
||||
|
||||
static inline u32 get_cycles_hi(void)
|
||||
{
|
||||
return readl_relaxed(((u32 *)clint_time_val) + 1);
|
||||
}
|
||||
#define get_cycles_hi get_cycles_hi
|
||||
#endif /* CONFIG_64BIT */
|
||||
|
||||
#else /* CONFIG_RISCV_M_MODE */
|
||||
|
||||
static inline cycles_t get_cycles(void)
|
||||
{
|
||||
return csr_read(CSR_TIME);
|
||||
|
@ -41,6 +66,8 @@ static inline u64 get_cycles64(void)
|
|||
}
|
||||
#endif /* CONFIG_64BIT */
|
||||
|
||||
#endif /* !CONFIG_RISCV_M_MODE */
|
||||
|
||||
#define ARCH_HAS_READ_CURRENT_TIMER
|
||||
static inline int read_current_timer(unsigned long *timer_val)
|
||||
{
|
||||
|
|
|
@ -97,6 +97,25 @@ int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
|
|||
return __ftrace_modify_call(rec->ip, addr, false);
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* This is called early on, and isn't wrapped by
|
||||
* ftrace_arch_code_modify_{prepare,post_process}() and therefor doesn't hold
|
||||
* text_mutex, which triggers a lockdep failure. SMP isn't running so we could
|
||||
* just directly poke the text, but it's simpler to just take the lock
|
||||
* ourselves.
|
||||
*/
|
||||
int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec)
|
||||
{
|
||||
int out;
|
||||
|
||||
ftrace_arch_code_modify_prepare();
|
||||
out = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
|
||||
ftrace_arch_code_modify_post_process();
|
||||
|
||||
return out;
|
||||
}
|
||||
|
||||
int ftrace_update_ftrace_func(ftrace_func_t func)
|
||||
{
|
||||
int ret = __ftrace_modify_call((unsigned long)&ftrace_call,
|
||||
|
|
|
@ -226,12 +226,11 @@ void __set_fixmap(enum fixed_addresses idx, phys_addr_t phys, pgprot_t prot)
|
|||
|
||||
ptep = &fixmap_pte[pte_index(addr)];
|
||||
|
||||
if (pgprot_val(prot)) {
|
||||
if (pgprot_val(prot))
|
||||
set_pte(ptep, pfn_pte(phys >> PAGE_SHIFT, prot));
|
||||
} else {
|
||||
else
|
||||
pte_clear(&init_mm, addr, ptep);
|
||||
local_flush_tlb_page(addr);
|
||||
}
|
||||
local_flush_tlb_page(addr);
|
||||
}
|
||||
|
||||
static pte_t *__init get_pte_virt(phys_addr_t pa)
|
||||
|
|
|
@ -26,6 +26,7 @@ void do_protection_exception(struct pt_regs *regs);
|
|||
void do_dat_exception(struct pt_regs *regs);
|
||||
void do_secure_storage_access(struct pt_regs *regs);
|
||||
void do_non_secure_storage_access(struct pt_regs *regs);
|
||||
void do_secure_storage_violation(struct pt_regs *regs);
|
||||
|
||||
void addressing_exception(struct pt_regs *regs);
|
||||
void data_exception(struct pt_regs *regs);
|
||||
|
|
|
@ -39,14 +39,13 @@ void enabled_wait(void)
|
|||
local_irq_restore(flags);
|
||||
|
||||
/* Account time spent with enabled wait psw loaded as idle time. */
|
||||
/* XXX seqcount has tracepoints that require RCU */
|
||||
write_seqcount_begin(&idle->seqcount);
|
||||
raw_write_seqcount_begin(&idle->seqcount);
|
||||
idle_time = idle->clock_idle_exit - idle->clock_idle_enter;
|
||||
idle->clock_idle_enter = idle->clock_idle_exit = 0ULL;
|
||||
idle->idle_time += idle_time;
|
||||
idle->idle_count++;
|
||||
account_idle_time(cputime_to_nsecs(idle_time));
|
||||
write_seqcount_end(&idle->seqcount);
|
||||
raw_write_seqcount_end(&idle->seqcount);
|
||||
}
|
||||
NOKPROBE_SYMBOL(enabled_wait);
|
||||
|
||||
|
|
|
@ -80,7 +80,7 @@ PGM_CHECK(do_dat_exception) /* 3b */
|
|||
PGM_CHECK_DEFAULT /* 3c */
|
||||
PGM_CHECK(do_secure_storage_access) /* 3d */
|
||||
PGM_CHECK(do_non_secure_storage_access) /* 3e */
|
||||
PGM_CHECK_DEFAULT /* 3f */
|
||||
PGM_CHECK(do_secure_storage_violation) /* 3f */
|
||||
PGM_CHECK(monitor_event_exception) /* 40 */
|
||||
PGM_CHECK_DEFAULT /* 41 */
|
||||
PGM_CHECK_DEFAULT /* 42 */
|
||||
|
|
|
@ -619,7 +619,7 @@ static struct notifier_block kdump_mem_nb = {
|
|||
/*
|
||||
* Make sure that the area behind memory_end is protected
|
||||
*/
|
||||
static void reserve_memory_end(void)
|
||||
static void __init reserve_memory_end(void)
|
||||
{
|
||||
if (memory_end_set)
|
||||
memblock_reserve(memory_end, ULONG_MAX);
|
||||
|
@ -628,7 +628,7 @@ static void reserve_memory_end(void)
|
|||
/*
|
||||
* Make sure that oldmem, where the dump is stored, is protected
|
||||
*/
|
||||
static void reserve_oldmem(void)
|
||||
static void __init reserve_oldmem(void)
|
||||
{
|
||||
#ifdef CONFIG_CRASH_DUMP
|
||||
if (OLDMEM_BASE)
|
||||
|
@ -640,7 +640,7 @@ static void reserve_oldmem(void)
|
|||
/*
|
||||
* Make sure that oldmem, where the dump is stored, is protected
|
||||
*/
|
||||
static void remove_oldmem(void)
|
||||
static void __init remove_oldmem(void)
|
||||
{
|
||||
#ifdef CONFIG_CRASH_DUMP
|
||||
if (OLDMEM_BASE)
|
||||
|
|
|
@ -859,6 +859,21 @@ void do_non_secure_storage_access(struct pt_regs *regs)
|
|||
}
|
||||
NOKPROBE_SYMBOL(do_non_secure_storage_access);
|
||||
|
||||
void do_secure_storage_violation(struct pt_regs *regs)
|
||||
{
|
||||
/*
|
||||
* Either KVM messed up the secure guest mapping or the same
|
||||
* page is mapped into multiple secure guests.
|
||||
*
|
||||
* This exception is only triggered when a guest 2 is running
|
||||
* and can therefore never occur in kernel context.
|
||||
*/
|
||||
printk_ratelimited(KERN_WARNING
|
||||
"Secure storage violation in task: %s, pid %d\n",
|
||||
current->comm, current->pid);
|
||||
send_sig(SIGSEGV, current, 0);
|
||||
}
|
||||
|
||||
#else
|
||||
void do_secure_storage_access(struct pt_regs *regs)
|
||||
{
|
||||
|
@ -869,4 +884,9 @@ void do_non_secure_storage_access(struct pt_regs *regs)
|
|||
{
|
||||
default_trap_handler(regs);
|
||||
}
|
||||
|
||||
void do_secure_storage_violation(struct pt_regs *regs)
|
||||
{
|
||||
default_trap_handler(regs);
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -668,6 +668,10 @@ EXPORT_SYMBOL_GPL(zpci_enable_device);
|
|||
int zpci_disable_device(struct zpci_dev *zdev)
|
||||
{
|
||||
zpci_dma_exit_device(zdev);
|
||||
/*
|
||||
* The zPCI function may already be disabled by the platform, this is
|
||||
* detected in clp_disable_fh() which becomes a no-op.
|
||||
*/
|
||||
return clp_disable_fh(zdev);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(zpci_disable_device);
|
||||
|
|
|
@ -143,6 +143,8 @@ static void __zpci_event_availability(struct zpci_ccdf_avail *ccdf)
|
|||
zpci_remove_device(zdev);
|
||||
}
|
||||
|
||||
zdev->fh = ccdf->fh;
|
||||
zpci_disable_device(zdev);
|
||||
zdev->state = ZPCI_FN_STATE_STANDBY;
|
||||
if (!clp_get_state(ccdf->fid, &state) &&
|
||||
state == ZPCI_FN_STATE_RESERVED) {
|
||||
|
|
|
@ -8,7 +8,6 @@
|
|||
|
||||
#ifdef CONFIG_SMP
|
||||
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/atomic.h>
|
||||
#include <asm/current.h>
|
||||
#include <asm/percpu.h>
|
||||
|
|
|
@ -370,7 +370,6 @@ syscall_trace_entry:
|
|||
nop
|
||||
cmp/eq #-1, r0
|
||||
bt syscall_exit
|
||||
mov.l r0, @(OFF_R0,r15) ! Save return value
|
||||
! Reload R0-R4 from kernel stack, where the
|
||||
! parent may have modified them using
|
||||
! ptrace(POKEUSR). (Note that R0-R2 are
|
||||
|
|
|
@ -455,16 +455,11 @@ long arch_ptrace(struct task_struct *child, long request,
|
|||
|
||||
asmlinkage long do_syscall_trace_enter(struct pt_regs *regs)
|
||||
{
|
||||
long ret = 0;
|
||||
|
||||
if (test_thread_flag(TIF_SYSCALL_TRACE) &&
|
||||
tracehook_report_syscall_entry(regs))
|
||||
/*
|
||||
* Tracing decided this syscall should not happen.
|
||||
* We'll return a bogus call number to get an ENOSYS
|
||||
* error, but leave the original number in regs->regs[0].
|
||||
*/
|
||||
ret = -1L;
|
||||
tracehook_report_syscall_entry(regs)) {
|
||||
regs->regs[0] = -ENOSYS;
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (secure_computing() == -1)
|
||||
return -1;
|
||||
|
@ -475,7 +470,7 @@ asmlinkage long do_syscall_trace_enter(struct pt_regs *regs)
|
|||
audit_syscall_entry(regs->regs[3], regs->regs[4], regs->regs[5],
|
||||
regs->regs[6], regs->regs[7]);
|
||||
|
||||
return ret ?: regs->regs[0];
|
||||
return 0;
|
||||
}
|
||||
|
||||
asmlinkage void do_syscall_trace_leave(struct pt_regs *regs)
|
||||
|
|
|
@ -43,6 +43,8 @@ KBUILD_CFLAGS += -Wno-pointer-sign
|
|||
KBUILD_CFLAGS += $(call cc-option,-fmacro-prefix-map=$(srctree)/=)
|
||||
KBUILD_CFLAGS += -fno-asynchronous-unwind-tables
|
||||
KBUILD_CFLAGS += -D__DISABLE_EXPORTS
|
||||
# Disable relocation relaxation in case the link is not PIE.
|
||||
KBUILD_CFLAGS += $(call as-option,-Wa$(comma)-mrelax-relocations=no)
|
||||
|
||||
KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
|
||||
GCOV_PROFILE := n
|
||||
|
|
|
@ -19,6 +19,7 @@ CONFIG_CGROUP_CPUACCT=y
|
|||
CONFIG_BLK_DEV_INITRD=y
|
||||
# CONFIG_COMPAT_BRK is not set
|
||||
CONFIG_PROFILING=y
|
||||
# CONFIG_64BIT is not set
|
||||
CONFIG_SMP=y
|
||||
CONFIG_X86_GENERIC=y
|
||||
CONFIG_HPET_TIMER=y
|
||||
|
@ -186,7 +187,6 @@ CONFIG_DRM_I915=y
|
|||
CONFIG_FB_MODE_HELPERS=y
|
||||
CONFIG_FB_TILEBLITTING=y
|
||||
CONFIG_FB_EFI=y
|
||||
CONFIG_VGACON_SOFT_SCROLLBACK=y
|
||||
CONFIG_LOGO=y
|
||||
# CONFIG_LOGO_LINUX_MONO is not set
|
||||
# CONFIG_LOGO_LINUX_VGA16 is not set
|
||||
|
|
|
@ -181,7 +181,6 @@ CONFIG_DRM_I915=y
|
|||
CONFIG_FB_MODE_HELPERS=y
|
||||
CONFIG_FB_TILEBLITTING=y
|
||||
CONFIG_FB_EFI=y
|
||||
CONFIG_VGACON_SOFT_SCROLLBACK=y
|
||||
CONFIG_LOGO=y
|
||||
# CONFIG_LOGO_LINUX_MONO is not set
|
||||
# CONFIG_LOGO_LINUX_VGA16 is not set
|
||||
|
|
|
@ -159,8 +159,6 @@ static inline u64 x86_default_get_root_pointer(void)
|
|||
extern int x86_acpi_numa_init(void);
|
||||
#endif /* CONFIG_ACPI_NUMA */
|
||||
|
||||
#define acpi_unlazy_tlb(x) leave_mm(x)
|
||||
|
||||
#ifdef CONFIG_ACPI_APEI
|
||||
static inline pgprot_t arch_apei_get_mem_attribute(phys_addr_t addr)
|
||||
{
|
||||
|
|
|
@ -60,12 +60,26 @@
|
|||
#define FRAME_END "pop %" _ASM_BP "\n"
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
|
||||
#define ENCODE_FRAME_POINTER \
|
||||
"lea 1(%rsp), %rbp\n\t"
|
||||
|
||||
static inline unsigned long encode_frame_pointer(struct pt_regs *regs)
|
||||
{
|
||||
return (unsigned long)regs + 1;
|
||||
}
|
||||
|
||||
#else /* !CONFIG_X86_64 */
|
||||
|
||||
#define ENCODE_FRAME_POINTER \
|
||||
"movl %esp, %ebp\n\t" \
|
||||
"andl $0x7fffffff, %ebp\n\t"
|
||||
|
||||
static inline unsigned long encode_frame_pointer(struct pt_regs *regs)
|
||||
{
|
||||
return (unsigned long)regs & 0x7fffffff;
|
||||
}
|
||||
|
||||
#endif /* CONFIG_X86_64 */
|
||||
|
||||
#endif /* __ASSEMBLY__ */
|
||||
|
@ -83,6 +97,11 @@
|
|||
|
||||
#define ENCODE_FRAME_POINTER
|
||||
|
||||
static inline unsigned long encode_frame_pointer(struct pt_regs *regs)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
#define FRAME_BEGIN
|
||||
|
|
|
@ -42,6 +42,7 @@
|
|||
#include <asm/spec-ctrl.h>
|
||||
#include <asm/io_bitmap.h>
|
||||
#include <asm/proto.h>
|
||||
#include <asm/frame.h>
|
||||
|
||||
#include "process.h"
|
||||
|
||||
|
@ -133,7 +134,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp, unsigned long arg,
|
|||
fork_frame = container_of(childregs, struct fork_frame, regs);
|
||||
frame = &fork_frame->frame;
|
||||
|
||||
frame->bp = 0;
|
||||
frame->bp = encode_frame_pointer(childregs);
|
||||
frame->ret_addr = (unsigned long) ret_from_fork;
|
||||
p->thread.sp = (unsigned long) fork_frame;
|
||||
p->thread.io_bitmap = NULL;
|
||||
|
|
|
@ -161,18 +161,10 @@ static void lapic_timer_propagate_broadcast(struct acpi_processor *pr)
|
|||
}
|
||||
|
||||
/* Power(C) State timer broadcast control */
|
||||
static void lapic_timer_state_broadcast(struct acpi_processor *pr,
|
||||
struct acpi_processor_cx *cx,
|
||||
int broadcast)
|
||||
static bool lapic_timer_needs_broadcast(struct acpi_processor *pr,
|
||||
struct acpi_processor_cx *cx)
|
||||
{
|
||||
int state = cx - pr->power.states;
|
||||
|
||||
if (state >= pr->power.timer_broadcast_on_state) {
|
||||
if (broadcast)
|
||||
tick_broadcast_enter();
|
||||
else
|
||||
tick_broadcast_exit();
|
||||
}
|
||||
return cx - pr->power.states >= pr->power.timer_broadcast_on_state;
|
||||
}
|
||||
|
||||
#else
|
||||
|
@ -180,9 +172,9 @@ static void lapic_timer_state_broadcast(struct acpi_processor *pr,
|
|||
static void lapic_timer_check_state(int state, struct acpi_processor *pr,
|
||||
struct acpi_processor_cx *cstate) { }
|
||||
static void lapic_timer_propagate_broadcast(struct acpi_processor *pr) { }
|
||||
static void lapic_timer_state_broadcast(struct acpi_processor *pr,
|
||||
struct acpi_processor_cx *cx,
|
||||
int broadcast)
|
||||
|
||||
static bool lapic_timer_needs_broadcast(struct acpi_processor *pr,
|
||||
struct acpi_processor_cx *cx)
|
||||
{
|
||||
}
|
||||
|
||||
|
@ -566,32 +558,43 @@ static DEFINE_RAW_SPINLOCK(c3_lock);
|
|||
|
||||
/**
|
||||
* acpi_idle_enter_bm - enters C3 with proper BM handling
|
||||
* @drv: cpuidle driver
|
||||
* @pr: Target processor
|
||||
* @cx: Target state context
|
||||
* @timer_bc: Whether or not to change timer mode to broadcast
|
||||
* @index: index of target state
|
||||
*/
|
||||
static void acpi_idle_enter_bm(struct acpi_processor *pr,
|
||||
struct acpi_processor_cx *cx, bool timer_bc)
|
||||
static int acpi_idle_enter_bm(struct cpuidle_driver *drv,
|
||||
struct acpi_processor *pr,
|
||||
struct acpi_processor_cx *cx,
|
||||
int index)
|
||||
{
|
||||
acpi_unlazy_tlb(smp_processor_id());
|
||||
|
||||
/*
|
||||
* Must be done before busmaster disable as we might need to
|
||||
* access HPET !
|
||||
*/
|
||||
if (timer_bc)
|
||||
lapic_timer_state_broadcast(pr, cx, 1);
|
||||
static struct acpi_processor_cx safe_cx = {
|
||||
.entry_method = ACPI_CSTATE_HALT,
|
||||
};
|
||||
|
||||
/*
|
||||
* disable bus master
|
||||
* bm_check implies we need ARB_DIS
|
||||
* bm_control implies whether we can do ARB_DIS
|
||||
*
|
||||
* That leaves a case where bm_check is set and bm_control is
|
||||
* not set. In that case we cannot do much, we enter C3
|
||||
* without doing anything.
|
||||
* That leaves a case where bm_check is set and bm_control is not set.
|
||||
* In that case we cannot do much, we enter C3 without doing anything.
|
||||
*/
|
||||
if (pr->flags.bm_control) {
|
||||
bool dis_bm = pr->flags.bm_control;
|
||||
|
||||
/* If we can skip BM, demote to a safe state. */
|
||||
if (!cx->bm_sts_skip && acpi_idle_bm_check()) {
|
||||
dis_bm = false;
|
||||
index = drv->safe_state_index;
|
||||
if (index >= 0) {
|
||||
cx = this_cpu_read(acpi_cstate[index]);
|
||||
} else {
|
||||
cx = &safe_cx;
|
||||
index = -EBUSY;
|
||||
}
|
||||
}
|
||||
|
||||
if (dis_bm) {
|
||||
raw_spin_lock(&c3_lock);
|
||||
c3_cpu_count++;
|
||||
/* Disable bus master arbitration when all CPUs are in C3 */
|
||||
|
@ -600,18 +603,21 @@ static void acpi_idle_enter_bm(struct acpi_processor *pr,
|
|||
raw_spin_unlock(&c3_lock);
|
||||
}
|
||||
|
||||
rcu_idle_enter();
|
||||
|
||||
acpi_idle_do_entry(cx);
|
||||
|
||||
rcu_idle_exit();
|
||||
|
||||
/* Re-enable bus master arbitration */
|
||||
if (pr->flags.bm_control) {
|
||||
if (dis_bm) {
|
||||
raw_spin_lock(&c3_lock);
|
||||
acpi_write_bit_register(ACPI_BITREG_ARB_DISABLE, 0);
|
||||
c3_cpu_count--;
|
||||
raw_spin_unlock(&c3_lock);
|
||||
}
|
||||
|
||||
if (timer_bc)
|
||||
lapic_timer_state_broadcast(pr, cx, 0);
|
||||
return index;
|
||||
}
|
||||
|
||||
static int acpi_idle_enter(struct cpuidle_device *dev,
|
||||
|
@ -625,32 +631,21 @@ static int acpi_idle_enter(struct cpuidle_device *dev,
|
|||
return -EINVAL;
|
||||
|
||||
if (cx->type != ACPI_STATE_C1) {
|
||||
if (cx->type == ACPI_STATE_C3 && pr->flags.bm_check)
|
||||
return acpi_idle_enter_bm(drv, pr, cx, index);
|
||||
|
||||
/* C2 to C1 demotion. */
|
||||
if (acpi_idle_fallback_to_c1(pr) && num_online_cpus() > 1) {
|
||||
index = ACPI_IDLE_STATE_START;
|
||||
cx = per_cpu(acpi_cstate[index], dev->cpu);
|
||||
} else if (cx->type == ACPI_STATE_C3 && pr->flags.bm_check) {
|
||||
if (cx->bm_sts_skip || !acpi_idle_bm_check()) {
|
||||
acpi_idle_enter_bm(pr, cx, true);
|
||||
return index;
|
||||
} else if (drv->safe_state_index >= 0) {
|
||||
index = drv->safe_state_index;
|
||||
cx = per_cpu(acpi_cstate[index], dev->cpu);
|
||||
} else {
|
||||
acpi_safe_halt();
|
||||
return -EBUSY;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
lapic_timer_state_broadcast(pr, cx, 1);
|
||||
|
||||
if (cx->type == ACPI_STATE_C3)
|
||||
ACPI_FLUSH_CPU_CACHE();
|
||||
|
||||
acpi_idle_do_entry(cx);
|
||||
|
||||
lapic_timer_state_broadcast(pr, cx, 0);
|
||||
|
||||
return index;
|
||||
}
|
||||
|
||||
|
@ -666,7 +661,13 @@ static int acpi_idle_enter_s2idle(struct cpuidle_device *dev,
|
|||
return 0;
|
||||
|
||||
if (pr->flags.bm_check) {
|
||||
acpi_idle_enter_bm(pr, cx, false);
|
||||
u8 bm_sts_skip = cx->bm_sts_skip;
|
||||
|
||||
/* Don't check BM_STS, do an unconditional ARB_DIS for S2IDLE */
|
||||
cx->bm_sts_skip = 1;
|
||||
acpi_idle_enter_bm(drv, pr, cx, index);
|
||||
cx->bm_sts_skip = bm_sts_skip;
|
||||
|
||||
return 0;
|
||||
} else {
|
||||
ACPI_FLUSH_CPU_CACHE();
|
||||
|
@ -682,11 +683,13 @@ static int acpi_processor_setup_cpuidle_cx(struct acpi_processor *pr,
|
|||
{
|
||||
int i, count = ACPI_IDLE_STATE_START;
|
||||
struct acpi_processor_cx *cx;
|
||||
struct cpuidle_state *state;
|
||||
|
||||
if (max_cstate == 0)
|
||||
max_cstate = 1;
|
||||
|
||||
for (i = 1; i < ACPI_PROCESSOR_MAX_POWER && i <= max_cstate; i++) {
|
||||
state = &acpi_idle_driver.states[count];
|
||||
cx = &pr->power.states[i];
|
||||
|
||||
if (!cx->valid)
|
||||
|
@ -694,6 +697,15 @@ static int acpi_processor_setup_cpuidle_cx(struct acpi_processor *pr,
|
|||
|
||||
per_cpu(acpi_cstate[count], dev->cpu) = cx;
|
||||
|
||||
if (lapic_timer_needs_broadcast(pr, cx))
|
||||
state->flags |= CPUIDLE_FLAG_TIMER_STOP;
|
||||
|
||||
if (cx->type == ACPI_STATE_C3) {
|
||||
state->flags |= CPUIDLE_FLAG_TLB_FLUSHED;
|
||||
if (pr->flags.bm_check)
|
||||
state->flags |= CPUIDLE_FLAG_RCU_IDLE;
|
||||
}
|
||||
|
||||
count++;
|
||||
if (count == CPUIDLE_STATE_MAX)
|
||||
break;
|
||||
|
|
|
@ -5,6 +5,7 @@ config CLK_BCM2711_DVP
|
|||
depends on ARCH_BCM2835 ||COMPILE_TEST
|
||||
depends on COMMON_CLK
|
||||
default ARCH_BCM2835
|
||||
select RESET_CONTROLLER
|
||||
select RESET_SIMPLE
|
||||
help
|
||||
Enable common clock framework support for the Broadcom BCM2711
|
||||
|
|
|
@ -491,7 +491,7 @@ struct clk *davinci_pll_clk_register(struct device *dev,
|
|||
parent_name = postdiv_name;
|
||||
}
|
||||
|
||||
pllen = kzalloc(sizeof(*pllout), GFP_KERNEL);
|
||||
pllen = kzalloc(sizeof(*pllen), GFP_KERNEL);
|
||||
if (!pllen) {
|
||||
ret = -ENOMEM;
|
||||
goto err_unregister_postdiv;
|
||||
|
|
|
@ -420,17 +420,18 @@ static int lpass_core_sc7180_probe(struct platform_device *pdev)
|
|||
pm_runtime_enable(&pdev->dev);
|
||||
ret = pm_clk_create(&pdev->dev);
|
||||
if (ret)
|
||||
return ret;
|
||||
goto disable_pm_runtime;
|
||||
|
||||
ret = pm_clk_add(&pdev->dev, "iface");
|
||||
if (ret < 0) {
|
||||
dev_err(&pdev->dev, "failed to acquire iface clock\n");
|
||||
goto disable_pm_runtime;
|
||||
goto destroy_pm_clk;
|
||||
}
|
||||
|
||||
ret = -EINVAL;
|
||||
clk_probe = of_device_get_match_data(&pdev->dev);
|
||||
if (!clk_probe)
|
||||
return -EINVAL;
|
||||
goto destroy_pm_clk;
|
||||
|
||||
ret = clk_probe(pdev);
|
||||
if (ret)
|
||||
|
|
|
@ -137,7 +137,7 @@ PNAME(mux_usb480m_p) = { "usb480m_phy", "xin24m" };
|
|||
PNAME(mux_hdmiphy_p) = { "hdmiphy_phy", "xin24m" };
|
||||
PNAME(mux_aclk_cpu_src_p) = { "cpll_aclk_cpu", "gpll_aclk_cpu", "hdmiphy_aclk_cpu" };
|
||||
|
||||
PNAME(mux_pll_src_4plls_p) = { "cpll", "gpll", "hdmiphy" "usb480m" };
|
||||
PNAME(mux_pll_src_4plls_p) = { "cpll", "gpll", "hdmiphy", "usb480m" };
|
||||
PNAME(mux_pll_src_3plls_p) = { "cpll", "gpll", "hdmiphy" };
|
||||
PNAME(mux_pll_src_2plls_p) = { "cpll", "gpll" };
|
||||
PNAME(mux_sclk_hdmi_cec_p) = { "cpll", "gpll", "xin24m" };
|
||||
|
|
|
@ -109,8 +109,10 @@ static int integrator_impd1_clk_probe(struct platform_device *pdev)
|
|||
|
||||
for_each_available_child_of_node(np, child) {
|
||||
ret = integrator_impd1_clk_spawn(dev, np, child);
|
||||
if (ret)
|
||||
if (ret) {
|
||||
of_node_put(child);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
return ret;
|
||||
|
|
|
@ -19,6 +19,11 @@
|
|||
#include <linux/interrupt.h>
|
||||
#include <linux/of_irq.h>
|
||||
#include <linux/smp.h>
|
||||
#include <linux/timex.h>
|
||||
|
||||
#ifndef CONFIG_RISCV_M_MODE
|
||||
#include <asm/clint.h>
|
||||
#endif
|
||||
|
||||
#define CLINT_IPI_OFF 0
|
||||
#define CLINT_TIMER_CMP_OFF 0x4000
|
||||
|
@ -31,6 +36,10 @@ static u64 __iomem *clint_timer_val;
|
|||
static unsigned long clint_timer_freq;
|
||||
static unsigned int clint_timer_irq;
|
||||
|
||||
#ifdef CONFIG_RISCV_M_MODE
|
||||
u64 __iomem *clint_time_val;
|
||||
#endif
|
||||
|
||||
static void clint_send_ipi(const struct cpumask *target)
|
||||
{
|
||||
unsigned int cpu;
|
||||
|
@ -184,6 +193,14 @@ static int __init clint_timer_init_dt(struct device_node *np)
|
|||
clint_timer_val = base + CLINT_TIMER_VAL_OFF;
|
||||
clint_timer_freq = riscv_timebase;
|
||||
|
||||
#ifdef CONFIG_RISCV_M_MODE
|
||||
/*
|
||||
* Yes, that's an odd naming scheme. time_val is public, but hopefully
|
||||
* will die in favor of something cleaner.
|
||||
*/
|
||||
clint_time_val = clint_timer_val;
|
||||
#endif
|
||||
|
||||
pr_info("%pOFP: timer running at %ld Hz\n", np, clint_timer_freq);
|
||||
|
||||
rc = clocksource_register_hz(&clint_clocksource, clint_timer_freq);
|
||||
|
|
|
@ -361,7 +361,10 @@ static void __init fixup_cede0_latency(void)
|
|||
for (i = 0; i < nr_xcede_records; i++) {
|
||||
struct xcede_latency_record *record = &payload->records[i];
|
||||
u64 latency_tb = be64_to_cpu(record->latency_ticks);
|
||||
u64 latency_us = tb_to_ns(latency_tb) / NSEC_PER_USEC;
|
||||
u64 latency_us = DIV_ROUND_UP_ULL(tb_to_ns(latency_tb), NSEC_PER_USEC);
|
||||
|
||||
if (latency_us == 0)
|
||||
pr_warn("cpuidle: xcede record %d has an unrealistic latency of 0us.\n", i);
|
||||
|
||||
if (latency_us < min_latency_us)
|
||||
min_latency_us = latency_us;
|
||||
|
@ -378,10 +381,14 @@ static void __init fixup_cede0_latency(void)
|
|||
* Perform the fix-up.
|
||||
*/
|
||||
if (min_latency_us < dedicated_states[1].exit_latency) {
|
||||
u64 cede0_latency = min_latency_us - 1;
|
||||
/*
|
||||
* We set a minimum of 1us wakeup latency for cede0 to
|
||||
* distinguish it from snooze
|
||||
*/
|
||||
u64 cede0_latency = 1;
|
||||
|
||||
if (cede0_latency <= 0)
|
||||
cede0_latency = min_latency_us;
|
||||
if (min_latency_us > cede0_latency)
|
||||
cede0_latency = min_latency_us - 1;
|
||||
|
||||
dedicated_states[1].exit_latency = cede0_latency;
|
||||
dedicated_states[1].target_residency = 10 * (cede0_latency);
|
||||
|
|
|
@ -138,6 +138,7 @@ static void enter_s2idle_proper(struct cpuidle_driver *drv,
|
|||
struct cpuidle_device *dev, int index)
|
||||
{
|
||||
ktime_t time_start, time_end;
|
||||
struct cpuidle_state *target_state = &drv->states[index];
|
||||
|
||||
time_start = ns_to_ktime(local_clock());
|
||||
|
||||
|
@ -153,8 +154,9 @@ static void enter_s2idle_proper(struct cpuidle_driver *drv,
|
|||
* suspended is generally unsafe.
|
||||
*/
|
||||
stop_critical_timings();
|
||||
rcu_idle_enter();
|
||||
drv->states[index].enter_s2idle(dev, drv, index);
|
||||
if (!(target_state->flags & CPUIDLE_FLAG_RCU_IDLE))
|
||||
rcu_idle_enter();
|
||||
target_state->enter_s2idle(dev, drv, index);
|
||||
if (WARN_ON_ONCE(!irqs_disabled()))
|
||||
local_irq_disable();
|
||||
/*
|
||||
|
@ -162,7 +164,8 @@ static void enter_s2idle_proper(struct cpuidle_driver *drv,
|
|||
* first CPU executing it calls functions containing RCU read-side
|
||||
* critical sections, so tell RCU about that.
|
||||
*/
|
||||
rcu_idle_exit();
|
||||
if (!(target_state->flags & CPUIDLE_FLAG_RCU_IDLE))
|
||||
rcu_idle_exit();
|
||||
tick_unfreeze();
|
||||
start_critical_timings();
|
||||
|
||||
|
@ -239,9 +242,11 @@ int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv,
|
|||
time_start = ns_to_ktime(local_clock());
|
||||
|
||||
stop_critical_timings();
|
||||
rcu_idle_enter();
|
||||
if (!(target_state->flags & CPUIDLE_FLAG_RCU_IDLE))
|
||||
rcu_idle_enter();
|
||||
entered_state = target_state->enter(dev, drv, index);
|
||||
rcu_idle_exit();
|
||||
if (!(target_state->flags & CPUIDLE_FLAG_RCU_IDLE))
|
||||
rcu_idle_exit();
|
||||
start_critical_timings();
|
||||
|
||||
sched_clock_idle_wakeup_event();
|
||||
|
|
|
@ -85,6 +85,12 @@ bool __generic_fsdax_supported(struct dax_device *dax_dev,
|
|||
return false;
|
||||
}
|
||||
|
||||
if (!dax_dev) {
|
||||
pr_debug("%s: error: dax unsupported by block device\n",
|
||||
bdevname(bdev, buf));
|
||||
return false;
|
||||
}
|
||||
|
||||
err = bdev_dax_pgoff(bdev, start, PAGE_SIZE, &pgoff);
|
||||
if (err) {
|
||||
pr_info("%s: error: unaligned partition for dax\n",
|
||||
|
@ -100,12 +106,6 @@ bool __generic_fsdax_supported(struct dax_device *dax_dev,
|
|||
return false;
|
||||
}
|
||||
|
||||
if (!dax_dev || !bdev_dax_supported(bdev, blocksize)) {
|
||||
pr_debug("%s: error: dax unsupported by block device\n",
|
||||
bdevname(bdev, buf));
|
||||
return false;
|
||||
}
|
||||
|
||||
id = dax_read_lock();
|
||||
len = dax_direct_access(dax_dev, pgoff, 1, &kaddr, &pfn);
|
||||
len2 = dax_direct_access(dax_dev, pgoff_end, 1, &end_kaddr, &end_pfn);
|
||||
|
@ -325,11 +325,15 @@ EXPORT_SYMBOL_GPL(dax_direct_access);
|
|||
bool dax_supported(struct dax_device *dax_dev, struct block_device *bdev,
|
||||
int blocksize, sector_t start, sector_t len)
|
||||
{
|
||||
if (!dax_dev)
|
||||
return false;
|
||||
|
||||
if (!dax_alive(dax_dev))
|
||||
return false;
|
||||
|
||||
return dax_dev->ops->dax_supported(dax_dev, bdev, blocksize, start, len);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dax_supported);
|
||||
|
||||
size_t dax_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff, void *addr,
|
||||
size_t bytes, struct iov_iter *i)
|
||||
|
|
|
@ -508,6 +508,7 @@ int ghes_edac_register(struct ghes *ghes, struct device *dev)
|
|||
if (!force_load && idx < 0)
|
||||
return -ENODEV;
|
||||
} else {
|
||||
force_load = true;
|
||||
idx = 0;
|
||||
}
|
||||
|
||||
|
@ -629,9 +630,13 @@ void ghes_edac_unregister(struct ghes *ghes)
|
|||
struct mem_ctl_info *mci;
|
||||
unsigned long flags;
|
||||
|
||||
if (!force_load)
|
||||
return;
|
||||
|
||||
mutex_lock(&ghes_reg_mutex);
|
||||
|
||||
system_scanned = false;
|
||||
memset(&ghes_hw, 0, sizeof(struct ghes_hw_desc));
|
||||
|
||||
if (!refcount_dec_and_test(&ghes_refcount))
|
||||
goto unlock;
|
||||
|
|
|
@ -84,7 +84,7 @@ static int __init efibc_init(void)
|
|||
{
|
||||
int ret;
|
||||
|
||||
if (!efi_enabled(EFI_RUNTIME_SERVICES))
|
||||
if (!efivars_kobject() || !efivar_supports_writes())
|
||||
return -ENODEV;
|
||||
|
||||
ret = register_reboot_notifier(&efibc_reboot_notifier);
|
||||
|
|
|
@ -178,7 +178,7 @@ static int psp_sw_init(void *handle)
|
|||
return ret;
|
||||
}
|
||||
|
||||
if (adev->asic_type == CHIP_NAVI10) {
|
||||
if (adev->asic_type == CHIP_NAVI10 || adev->asic_type == CHIP_SIENNA_CICHLID) {
|
||||
ret= psp_sysfs_init(adev);
|
||||
if (ret) {
|
||||
return ret;
|
||||
|
|
|
@ -58,7 +58,7 @@ MODULE_FIRMWARE("amdgpu/arcturus_ta.bin");
|
|||
MODULE_FIRMWARE("amdgpu/sienna_cichlid_sos.bin");
|
||||
MODULE_FIRMWARE("amdgpu/sienna_cichlid_ta.bin");
|
||||
MODULE_FIRMWARE("amdgpu/navy_flounder_sos.bin");
|
||||
MODULE_FIRMWARE("amdgpu/navy_flounder_asd.bin");
|
||||
MODULE_FIRMWARE("amdgpu/navy_flounder_ta.bin");
|
||||
|
||||
/* address block */
|
||||
#define smnMP1_FIRMWARE_FLAGS 0x3010024
|
||||
|
|
|
@ -1216,6 +1216,8 @@ static int stop_cpsch(struct device_queue_manager *dqm)
|
|||
dqm->sched_running = false;
|
||||
dqm_unlock(dqm);
|
||||
|
||||
pm_release_ib(&dqm->packets);
|
||||
|
||||
kfd_gtt_sa_free(dqm->dev, dqm->fence_mem);
|
||||
pm_uninit(&dqm->packets, hanging);
|
||||
|
||||
|
@ -1326,7 +1328,7 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,
|
|||
if (q->properties.is_active) {
|
||||
increment_queue_count(dqm, q->properties.type);
|
||||
|
||||
retval = execute_queues_cpsch(dqm,
|
||||
execute_queues_cpsch(dqm,
|
||||
KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
|
||||
}
|
||||
|
||||
|
|
|
@ -5278,19 +5278,6 @@ static void dm_crtc_helper_disable(struct drm_crtc *crtc)
|
|||
{
|
||||
}
|
||||
|
||||
static bool does_crtc_have_active_cursor(struct drm_crtc_state *new_crtc_state)
|
||||
{
|
||||
struct drm_device *dev = new_crtc_state->crtc->dev;
|
||||
struct drm_plane *plane;
|
||||
|
||||
drm_for_each_plane_mask(plane, dev, new_crtc_state->plane_mask) {
|
||||
if (plane->type == DRM_PLANE_TYPE_CURSOR)
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state)
|
||||
{
|
||||
struct drm_atomic_state *state = new_crtc_state->state;
|
||||
|
@ -5354,19 +5341,20 @@ static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
|
|||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* We require the primary plane to be enabled whenever the CRTC is, otherwise
|
||||
* drm_mode_cursor_universal may end up trying to enable the cursor plane while all other
|
||||
* planes are disabled, which is not supported by the hardware. And there is legacy
|
||||
* userspace which stops using the HW cursor altogether in response to the resulting EINVAL.
|
||||
*/
|
||||
if (state->enable &&
|
||||
!(state->plane_mask & drm_plane_mask(crtc->primary)))
|
||||
return -EINVAL;
|
||||
|
||||
/* In some use cases, like reset, no stream is attached */
|
||||
if (!dm_crtc_state->stream)
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* We want at least one hardware plane enabled to use
|
||||
* the stream with a cursor enabled.
|
||||
*/
|
||||
if (state->enable && state->active &&
|
||||
does_crtc_have_active_cursor(state) &&
|
||||
dm_crtc_state->active_planes == 0)
|
||||
return -EINVAL;
|
||||
|
||||
if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
|
||||
return 0;
|
||||
|
||||
|
|
|
@ -409,8 +409,8 @@ static struct _vcs_dpi_soc_bounding_box_st dcn2_0_nv14_soc = {
|
|||
},
|
||||
},
|
||||
.num_states = 5,
|
||||
.sr_exit_time_us = 8.6,
|
||||
.sr_enter_plus_exit_time_us = 10.9,
|
||||
.sr_exit_time_us = 11.6,
|
||||
.sr_enter_plus_exit_time_us = 13.9,
|
||||
.urgent_latency_us = 4.0,
|
||||
.urgent_latency_pixel_data_only_us = 4.0,
|
||||
.urgent_latency_pixel_mixed_with_vm_data_us = 4.0,
|
||||
|
|
|
@ -27,7 +27,7 @@
|
|||
#define MOD_HDCP_LOG_H_
|
||||
|
||||
#ifdef CONFIG_DRM_AMD_DC_HDCP
|
||||
#define HDCP_LOG_ERR(hdcp, ...) DRM_WARN(__VA_ARGS__)
|
||||
#define HDCP_LOG_ERR(hdcp, ...) DRM_DEBUG_KMS(__VA_ARGS__)
|
||||
#define HDCP_LOG_VER(hdcp, ...) DRM_DEBUG_KMS(__VA_ARGS__)
|
||||
#define HDCP_LOG_FSM(hdcp, ...) DRM_DEBUG_KMS(__VA_ARGS__)
|
||||
#define HDCP_LOG_TOP(hdcp, ...) pr_debug("[HDCP_TOP]:"__VA_ARGS__)
|
||||
|
|
|
@ -88,7 +88,7 @@ enum mod_hdcp_status mod_hdcp_add_display_to_topology(struct mod_hdcp *hdcp,
|
|||
enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
|
||||
|
||||
if (!psp->dtm_context.dtm_initialized) {
|
||||
DRM_ERROR("Failed to add display topology, DTM TA is not initialized.");
|
||||
DRM_INFO("Failed to add display topology, DTM TA is not initialized.");
|
||||
display->state = MOD_HDCP_DISPLAY_INACTIVE;
|
||||
return MOD_HDCP_STATUS_FAILURE;
|
||||
}
|
||||
|
|
|
@ -1126,7 +1126,7 @@ static int smu_disable_dpms(struct smu_context *smu)
|
|||
*/
|
||||
if (smu->uploading_custom_pp_table &&
|
||||
(adev->asic_type >= CHIP_NAVI10) &&
|
||||
(adev->asic_type <= CHIP_NAVI12))
|
||||
(adev->asic_type <= CHIP_NAVY_FLOUNDER))
|
||||
return 0;
|
||||
|
||||
/*
|
||||
|
@ -1211,7 +1211,9 @@ static int smu_hw_fini(void *handle)
|
|||
int smu_reset(struct smu_context *smu)
|
||||
{
|
||||
struct amdgpu_device *adev = smu->adev;
|
||||
int ret = 0;
|
||||
int ret;
|
||||
|
||||
amdgpu_gfx_off_ctrl(smu->adev, false);
|
||||
|
||||
ret = smu_hw_fini(adev);
|
||||
if (ret)
|
||||
|
@ -1222,8 +1224,12 @@ int smu_reset(struct smu_context *smu)
|
|||
return ret;
|
||||
|
||||
ret = smu_late_init(adev);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
return ret;
|
||||
amdgpu_gfx_off_ctrl(smu->adev, true);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int smu_suspend(void *handle)
|
||||
|
|
|
@ -439,29 +439,36 @@ static bool __cancel_engine(struct intel_engine_cs *engine)
|
|||
return __reset_engine(engine);
|
||||
}
|
||||
|
||||
static struct intel_engine_cs *__active_engine(struct i915_request *rq)
|
||||
static bool
|
||||
__active_engine(struct i915_request *rq, struct intel_engine_cs **active)
|
||||
{
|
||||
struct intel_engine_cs *engine, *locked;
|
||||
bool ret = false;
|
||||
|
||||
/*
|
||||
* Serialise with __i915_request_submit() so that it sees
|
||||
* is-banned?, or we know the request is already inflight.
|
||||
*
|
||||
* Note that rq->engine is unstable, and so we double
|
||||
* check that we have acquired the lock on the final engine.
|
||||
*/
|
||||
locked = READ_ONCE(rq->engine);
|
||||
spin_lock_irq(&locked->active.lock);
|
||||
while (unlikely(locked != (engine = READ_ONCE(rq->engine)))) {
|
||||
spin_unlock(&locked->active.lock);
|
||||
spin_lock(&engine->active.lock);
|
||||
locked = engine;
|
||||
spin_lock(&locked->active.lock);
|
||||
}
|
||||
|
||||
engine = NULL;
|
||||
if (i915_request_is_active(rq) && rq->fence.error != -EIO)
|
||||
engine = rq->engine;
|
||||
if (!i915_request_completed(rq)) {
|
||||
if (i915_request_is_active(rq) && rq->fence.error != -EIO)
|
||||
*active = locked;
|
||||
ret = true;
|
||||
}
|
||||
|
||||
spin_unlock_irq(&locked->active.lock);
|
||||
|
||||
return engine;
|
||||
return ret;
|
||||
}
|
||||
|
||||
static struct intel_engine_cs *active_engine(struct intel_context *ce)
|
||||
|
@ -472,17 +479,16 @@ static struct intel_engine_cs *active_engine(struct intel_context *ce)
|
|||
if (!ce->timeline)
|
||||
return NULL;
|
||||
|
||||
mutex_lock(&ce->timeline->mutex);
|
||||
list_for_each_entry_reverse(rq, &ce->timeline->requests, link) {
|
||||
if (i915_request_completed(rq))
|
||||
break;
|
||||
rcu_read_lock();
|
||||
list_for_each_entry_rcu(rq, &ce->timeline->requests, link) {
|
||||
if (i915_request_is_active(rq) && i915_request_completed(rq))
|
||||
continue;
|
||||
|
||||
/* Check with the backend if the request is inflight */
|
||||
engine = __active_engine(rq);
|
||||
if (engine)
|
||||
if (__active_engine(rq, &engine))
|
||||
break;
|
||||
}
|
||||
mutex_unlock(&ce->timeline->mutex);
|
||||
rcu_read_unlock();
|
||||
|
||||
return engine;
|
||||
}
|
||||
|
@ -713,6 +719,7 @@ __create_context(struct drm_i915_private *i915)
|
|||
ctx->i915 = i915;
|
||||
ctx->sched.priority = I915_USER_PRIORITY(I915_PRIORITY_NORMAL);
|
||||
mutex_init(&ctx->mutex);
|
||||
INIT_LIST_HEAD(&ctx->link);
|
||||
|
||||
spin_lock_init(&ctx->stale.lock);
|
||||
INIT_LIST_HEAD(&ctx->stale.engines);
|
||||
|
@ -740,10 +747,6 @@ __create_context(struct drm_i915_private *i915)
|
|||
for (i = 0; i < ARRAY_SIZE(ctx->hang_timestamp); i++)
|
||||
ctx->hang_timestamp[i] = jiffies - CONTEXT_FAST_HANG_JIFFIES;
|
||||
|
||||
spin_lock(&i915->gem.contexts.lock);
|
||||
list_add_tail(&ctx->link, &i915->gem.contexts.list);
|
||||
spin_unlock(&i915->gem.contexts.lock);
|
||||
|
||||
return ctx;
|
||||
|
||||
err_free:
|
||||
|
@ -931,6 +934,7 @@ static int gem_context_register(struct i915_gem_context *ctx,
|
|||
struct drm_i915_file_private *fpriv,
|
||||
u32 *id)
|
||||
{
|
||||
struct drm_i915_private *i915 = ctx->i915;
|
||||
struct i915_address_space *vm;
|
||||
int ret;
|
||||
|
||||
|
@ -949,8 +953,16 @@ static int gem_context_register(struct i915_gem_context *ctx,
|
|||
/* And finally expose ourselves to userspace via the idr */
|
||||
ret = xa_alloc(&fpriv->context_xa, id, ctx, xa_limit_32b, GFP_KERNEL);
|
||||
if (ret)
|
||||
put_pid(fetch_and_zero(&ctx->pid));
|
||||
goto err_pid;
|
||||
|
||||
spin_lock(&i915->gem.contexts.lock);
|
||||
list_add_tail(&ctx->link, &i915->gem.contexts.list);
|
||||
spin_unlock(&i915->gem.contexts.lock);
|
||||
|
||||
return 0;
|
||||
|
||||
err_pid:
|
||||
put_pid(fetch_and_zero(&ctx->pid));
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
|
|
@ -2060,6 +2060,14 @@ static inline void clear_ports(struct i915_request **ports, int count)
|
|||
memset_p((void **)ports, NULL, count);
|
||||
}
|
||||
|
||||
static inline void
|
||||
copy_ports(struct i915_request **dst, struct i915_request **src, int count)
|
||||
{
|
||||
/* A memcpy_p() would be very useful here! */
|
||||
while (count--)
|
||||
WRITE_ONCE(*dst++, *src++); /* avoid write tearing */
|
||||
}
|
||||
|
||||
static void execlists_dequeue(struct intel_engine_cs *engine)
|
||||
{
|
||||
struct intel_engine_execlists * const execlists = &engine->execlists;
|
||||
|
@ -2648,10 +2656,9 @@ static void process_csb(struct intel_engine_cs *engine)
|
|||
|
||||
/* switch pending to inflight */
|
||||
GEM_BUG_ON(!assert_pending_valid(execlists, "promote"));
|
||||
memcpy(execlists->inflight,
|
||||
execlists->pending,
|
||||
execlists_num_ports(execlists) *
|
||||
sizeof(*execlists->pending));
|
||||
copy_ports(execlists->inflight,
|
||||
execlists->pending,
|
||||
execlists_num_ports(execlists));
|
||||
smp_wmb(); /* complete the seqlock */
|
||||
WRITE_ONCE(execlists->active, execlists->inflight);
|
||||
|
||||
|
|
|
@ -388,17 +388,38 @@ static bool __request_in_flight(const struct i915_request *signal)
|
|||
* As we know that there are always preemption points between
|
||||
* requests, we know that only the currently executing request
|
||||
* may be still active even though we have cleared the flag.
|
||||
* However, we can't rely on our tracking of ELSP[0] to known
|
||||
* However, we can't rely on our tracking of ELSP[0] to know
|
||||
* which request is currently active and so maybe stuck, as
|
||||
* the tracking maybe an event behind. Instead assume that
|
||||
* if the context is still inflight, then it is still active
|
||||
* even if the active flag has been cleared.
|
||||
*
|
||||
* To further complicate matters, if there a pending promotion, the HW
|
||||
* may either perform a context switch to the second inflight execlists,
|
||||
* or it may switch to the pending set of execlists. In the case of the
|
||||
* latter, it may send the ACK and we process the event copying the
|
||||
* pending[] over top of inflight[], _overwriting_ our *active. Since
|
||||
* this implies the HW is arbitrating and not struck in *active, we do
|
||||
* not worry about complete accuracy, but we do require no read/write
|
||||
* tearing of the pointer [the read of the pointer must be valid, even
|
||||
* as the array is being overwritten, for which we require the writes
|
||||
* to avoid tearing.]
|
||||
*
|
||||
* Note that the read of *execlists->active may race with the promotion
|
||||
* of execlists->pending[] to execlists->inflight[], overwritting
|
||||
* the value at *execlists->active. This is fine. The promotion implies
|
||||
* that we received an ACK from the HW, and so the context is not
|
||||
* stuck -- if we do not see ourselves in *active, the inflight status
|
||||
* is valid. If instead we see ourselves being copied into *active,
|
||||
* we are inflight and may signal the callback.
|
||||
*/
|
||||
if (!intel_context_inflight(signal->context))
|
||||
return false;
|
||||
|
||||
rcu_read_lock();
|
||||
for (port = __engine_active(signal->engine); (rq = *port); port++) {
|
||||
for (port = __engine_active(signal->engine);
|
||||
(rq = READ_ONCE(*port)); /* may race with promotion of pending[] */
|
||||
port++) {
|
||||
if (rq->context == signal->context) {
|
||||
inflight = i915_seqno_passed(rq->fence.seqno,
|
||||
signal->fence.seqno);
|
||||
|
|
|
@ -164,9 +164,13 @@ static void __i915_sw_fence_wake_up_all(struct i915_sw_fence *fence,
|
|||
|
||||
do {
|
||||
list_for_each_entry_safe(pos, next, &x->head, entry) {
|
||||
pos->func(pos,
|
||||
TASK_NORMAL, fence->error,
|
||||
&extra);
|
||||
int wake_flags;
|
||||
|
||||
wake_flags = fence->error;
|
||||
if (pos->func == autoremove_wake_function)
|
||||
wake_flags = 0;
|
||||
|
||||
pos->func(pos, TASK_NORMAL, wake_flags, &extra);
|
||||
}
|
||||
|
||||
if (list_empty(&extra))
|
||||
|
|
|
@ -831,13 +831,19 @@ int mtk_drm_crtc_create(struct drm_device *drm_dev,
|
|||
drm_crtc_index(&mtk_crtc->base));
|
||||
mtk_crtc->cmdq_client = NULL;
|
||||
}
|
||||
ret = of_property_read_u32_index(priv->mutex_node,
|
||||
"mediatek,gce-events",
|
||||
drm_crtc_index(&mtk_crtc->base),
|
||||
&mtk_crtc->cmdq_event);
|
||||
if (ret)
|
||||
dev_dbg(dev, "mtk_crtc %d failed to get mediatek,gce-events property\n",
|
||||
drm_crtc_index(&mtk_crtc->base));
|
||||
|
||||
if (mtk_crtc->cmdq_client) {
|
||||
ret = of_property_read_u32_index(priv->mutex_node,
|
||||
"mediatek,gce-events",
|
||||
drm_crtc_index(&mtk_crtc->base),
|
||||
&mtk_crtc->cmdq_event);
|
||||
if (ret) {
|
||||
dev_dbg(dev, "mtk_crtc %d failed to get mediatek,gce-events property\n",
|
||||
drm_crtc_index(&mtk_crtc->base));
|
||||
cmdq_mbox_destroy(mtk_crtc->cmdq_client);
|
||||
mtk_crtc->cmdq_client = NULL;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -496,6 +496,7 @@ int mtk_ddp_comp_init(struct device *dev, struct device_node *node,
|
|||
#if IS_REACHABLE(CONFIG_MTK_CMDQ)
|
||||
if (of_address_to_resource(node, 0, &res) != 0) {
|
||||
dev_err(dev, "Missing reg in %s node\n", node->full_name);
|
||||
put_device(&larb_pdev->dev);
|
||||
return -EINVAL;
|
||||
}
|
||||
comp->regs_pa = res.start;
|
||||
|
|
|
@ -27,7 +27,6 @@
|
|||
|
||||
#include "mtk_drm_crtc.h"
|
||||
#include "mtk_drm_ddp.h"
|
||||
#include "mtk_drm_ddp.h"
|
||||
#include "mtk_drm_ddp_comp.h"
|
||||
#include "mtk_drm_drv.h"
|
||||
#include "mtk_drm_gem.h"
|
||||
|
@ -165,7 +164,7 @@ static int mtk_drm_kms_init(struct drm_device *drm)
|
|||
|
||||
ret = drmm_mode_config_init(drm);
|
||||
if (ret)
|
||||
return ret;
|
||||
goto put_mutex_dev;
|
||||
|
||||
drm->mode_config.min_width = 64;
|
||||
drm->mode_config.min_height = 64;
|
||||
|
@ -182,7 +181,7 @@ static int mtk_drm_kms_init(struct drm_device *drm)
|
|||
|
||||
ret = component_bind_all(drm->dev, drm);
|
||||
if (ret)
|
||||
return ret;
|
||||
goto put_mutex_dev;
|
||||
|
||||
/*
|
||||
* We currently support two fixed data streams, each optional,
|
||||
|
@ -229,7 +228,7 @@ static int mtk_drm_kms_init(struct drm_device *drm)
|
|||
}
|
||||
if (!dma_dev->dma_parms) {
|
||||
ret = -ENOMEM;
|
||||
goto err_component_unbind;
|
||||
goto put_dma_dev;
|
||||
}
|
||||
|
||||
ret = dma_set_max_seg_size(dma_dev, (unsigned int)DMA_BIT_MASK(32));
|
||||
|
@ -256,9 +255,12 @@ static int mtk_drm_kms_init(struct drm_device *drm)
|
|||
err_unset_dma_parms:
|
||||
if (private->dma_parms_allocated)
|
||||
dma_dev->dma_parms = NULL;
|
||||
put_dma_dev:
|
||||
put_device(private->dma_dev);
|
||||
err_component_unbind:
|
||||
component_unbind_all(drm->dev, drm);
|
||||
|
||||
put_mutex_dev:
|
||||
put_device(private->mutex_dev);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -544,8 +546,13 @@ static int mtk_drm_probe(struct platform_device *pdev)
|
|||
pm_runtime_disable(dev);
|
||||
err_node:
|
||||
of_node_put(private->mutex_node);
|
||||
for (i = 0; i < DDP_COMPONENT_ID_MAX; i++)
|
||||
for (i = 0; i < DDP_COMPONENT_ID_MAX; i++) {
|
||||
of_node_put(private->comp_node[i]);
|
||||
if (private->ddp_comp[i]) {
|
||||
put_device(private->ddp_comp[i]->larb_dev);
|
||||
private->ddp_comp[i] = NULL;
|
||||
}
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
|
|
@ -466,14 +466,13 @@ static void mtk_dsi_config_vdo_timing(struct mtk_dsi *dsi)
|
|||
horizontal_sync_active_byte = (vm->hsync_len * dsi_tmp_buf_bpp - 10);
|
||||
|
||||
if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE)
|
||||
horizontal_backporch_byte =
|
||||
(vm->hback_porch * dsi_tmp_buf_bpp - 10);
|
||||
horizontal_backporch_byte = vm->hback_porch * dsi_tmp_buf_bpp;
|
||||
else
|
||||
horizontal_backporch_byte = ((vm->hback_porch + vm->hsync_len) *
|
||||
dsi_tmp_buf_bpp - 10);
|
||||
horizontal_backporch_byte = (vm->hback_porch + vm->hsync_len) *
|
||||
dsi_tmp_buf_bpp;
|
||||
|
||||
data_phy_cycles = timing->lpx + timing->da_hs_prepare +
|
||||
timing->da_hs_zero + timing->da_hs_exit + 3;
|
||||
timing->da_hs_zero + timing->da_hs_exit;
|
||||
|
||||
if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO_BURST) {
|
||||
if ((vm->hfront_porch + vm->hback_porch) * dsi_tmp_buf_bpp >
|
||||
|
|
|
@ -1507,25 +1507,30 @@ static int mtk_hdmi_dt_parse_pdata(struct mtk_hdmi *hdmi,
|
|||
dev_err(dev,
|
||||
"Failed to get system configuration registers: %d\n",
|
||||
ret);
|
||||
return ret;
|
||||
goto put_device;
|
||||
}
|
||||
hdmi->sys_regmap = regmap;
|
||||
|
||||
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
||||
hdmi->regs = devm_ioremap_resource(dev, mem);
|
||||
if (IS_ERR(hdmi->regs))
|
||||
return PTR_ERR(hdmi->regs);
|
||||
if (IS_ERR(hdmi->regs)) {
|
||||
ret = PTR_ERR(hdmi->regs);
|
||||
goto put_device;
|
||||
}
|
||||
|
||||
remote = of_graph_get_remote_node(np, 1, 0);
|
||||
if (!remote)
|
||||
return -EINVAL;
|
||||
if (!remote) {
|
||||
ret = -EINVAL;
|
||||
goto put_device;
|
||||
}
|
||||
|
||||
if (!of_device_is_compatible(remote, "hdmi-connector")) {
|
||||
hdmi->next_bridge = of_drm_find_bridge(remote);
|
||||
if (!hdmi->next_bridge) {
|
||||
dev_err(dev, "Waiting for external bridge\n");
|
||||
of_node_put(remote);
|
||||
return -EPROBE_DEFER;
|
||||
ret = -EPROBE_DEFER;
|
||||
goto put_device;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1534,7 +1539,8 @@ static int mtk_hdmi_dt_parse_pdata(struct mtk_hdmi *hdmi,
|
|||
dev_err(dev, "Failed to find ddc-i2c-bus node in %pOF\n",
|
||||
remote);
|
||||
of_node_put(remote);
|
||||
return -EINVAL;
|
||||
ret = -EINVAL;
|
||||
goto put_device;
|
||||
}
|
||||
of_node_put(remote);
|
||||
|
||||
|
@ -1542,10 +1548,14 @@ static int mtk_hdmi_dt_parse_pdata(struct mtk_hdmi *hdmi,
|
|||
of_node_put(i2c_np);
|
||||
if (!hdmi->ddc_adpt) {
|
||||
dev_err(dev, "Failed to get ddc i2c adapter by node\n");
|
||||
return -EINVAL;
|
||||
ret = -EINVAL;
|
||||
goto put_device;
|
||||
}
|
||||
|
||||
return 0;
|
||||
put_device:
|
||||
put_device(hdmi->cec_dev);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -933,7 +933,7 @@ static void avivo_get_fb_ref_div(unsigned nom, unsigned den, unsigned post_div,
|
|||
|
||||
/* get matching reference and feedback divider */
|
||||
*ref_div = min(max(den/post_div, 1u), ref_div_max);
|
||||
*fb_div = max(nom * *ref_div * post_div / den, 1u);
|
||||
*fb_div = DIV_ROUND_CLOSEST(nom * *ref_div * post_div, den);
|
||||
|
||||
/* limit fb divider to its maximum */
|
||||
if (*fb_div > fb_div_max) {
|
||||
|
|
|
@ -731,7 +731,7 @@ static void vmbus_wait_for_unload(void)
|
|||
void *page_addr;
|
||||
struct hv_message *msg;
|
||||
struct vmbus_channel_message_header *hdr;
|
||||
u32 message_type;
|
||||
u32 message_type, i;
|
||||
|
||||
/*
|
||||
* CHANNELMSG_UNLOAD_RESPONSE is always delivered to the CPU which was
|
||||
|
@ -741,8 +741,11 @@ static void vmbus_wait_for_unload(void)
|
|||
* functional and vmbus_unload_response() will complete
|
||||
* vmbus_connection.unload_event. If not, the last thing we can do is
|
||||
* read message pages for all CPUs directly.
|
||||
*
|
||||
* Wait no more than 10 seconds so that the panic path can't get
|
||||
* hung forever in case the response message isn't seen.
|
||||
*/
|
||||
while (1) {
|
||||
for (i = 0; i < 1000; i++) {
|
||||
if (completion_done(&vmbus_connection.unload_event))
|
||||
break;
|
||||
|
||||
|
|
|
@ -2382,7 +2382,10 @@ static int vmbus_bus_suspend(struct device *dev)
|
|||
if (atomic_read(&vmbus_connection.nr_chan_close_on_suspend) > 0)
|
||||
wait_for_completion(&vmbus_connection.ready_for_suspend_event);
|
||||
|
||||
WARN_ON(atomic_read(&vmbus_connection.nr_chan_fixup_on_resume) != 0);
|
||||
if (atomic_read(&vmbus_connection.nr_chan_fixup_on_resume) != 0) {
|
||||
pr_err("Can not suspend due to a previous failed resuming\n");
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
mutex_lock(&vmbus_connection.channel_mutex);
|
||||
|
||||
|
@ -2456,7 +2459,9 @@ static int vmbus_bus_resume(struct device *dev)
|
|||
|
||||
vmbus_request_offers();
|
||||
|
||||
wait_for_completion(&vmbus_connection.ready_for_resume_event);
|
||||
if (wait_for_completion_timeout(
|
||||
&vmbus_connection.ready_for_resume_event, 10 * HZ) == 0)
|
||||
pr_err("Some vmbus device is missing after suspending?\n");
|
||||
|
||||
/* Reset the event for the next suspend. */
|
||||
reinit_completion(&vmbus_connection.ready_for_suspend_event);
|
||||
|
|
|
@ -69,6 +69,7 @@
|
|||
* These share bit definitions, so use the same values for the enable &
|
||||
* status bits.
|
||||
*/
|
||||
#define ASPEED_I2CD_INTR_RECV_MASK 0xf000ffff
|
||||
#define ASPEED_I2CD_INTR_SDA_DL_TIMEOUT BIT(14)
|
||||
#define ASPEED_I2CD_INTR_BUS_RECOVER_DONE BIT(13)
|
||||
#define ASPEED_I2CD_INTR_SLAVE_MATCH BIT(7)
|
||||
|
@ -604,6 +605,7 @@ static irqreturn_t aspeed_i2c_bus_irq(int irq, void *dev_id)
|
|||
writel(irq_received & ~ASPEED_I2CD_INTR_RX_DONE,
|
||||
bus->base + ASPEED_I2C_INTR_STS_REG);
|
||||
readl(bus->base + ASPEED_I2C_INTR_STS_REG);
|
||||
irq_received &= ASPEED_I2CD_INTR_RECV_MASK;
|
||||
irq_remaining = irq_received;
|
||||
|
||||
#if IS_ENABLED(CONFIG_I2C_SLAVE)
|
||||
|
|
|
@ -1709,6 +1709,16 @@ static inline int i801_acpi_probe(struct i801_priv *priv) { return 0; }
|
|||
static inline void i801_acpi_remove(struct i801_priv *priv) { }
|
||||
#endif
|
||||
|
||||
static unsigned char i801_setup_hstcfg(struct i801_priv *priv)
|
||||
{
|
||||
unsigned char hstcfg = priv->original_hstcfg;
|
||||
|
||||
hstcfg &= ~SMBHSTCFG_I2C_EN; /* SMBus timing */
|
||||
hstcfg |= SMBHSTCFG_HST_EN;
|
||||
pci_write_config_byte(priv->pci_dev, SMBHSTCFG, hstcfg);
|
||||
return hstcfg;
|
||||
}
|
||||
|
||||
static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id)
|
||||
{
|
||||
unsigned char temp;
|
||||
|
@ -1830,14 +1840,10 @@ static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id)
|
|||
return err;
|
||||
}
|
||||
|
||||
pci_read_config_byte(priv->pci_dev, SMBHSTCFG, &temp);
|
||||
priv->original_hstcfg = temp;
|
||||
temp &= ~SMBHSTCFG_I2C_EN; /* SMBus timing */
|
||||
if (!(temp & SMBHSTCFG_HST_EN)) {
|
||||
pci_read_config_byte(priv->pci_dev, SMBHSTCFG, &priv->original_hstcfg);
|
||||
temp = i801_setup_hstcfg(priv);
|
||||
if (!(priv->original_hstcfg & SMBHSTCFG_HST_EN))
|
||||
dev_info(&dev->dev, "Enabling SMBus device\n");
|
||||
temp |= SMBHSTCFG_HST_EN;
|
||||
}
|
||||
pci_write_config_byte(priv->pci_dev, SMBHSTCFG, temp);
|
||||
|
||||
if (temp & SMBHSTCFG_SMB_SMI_EN) {
|
||||
dev_dbg(&dev->dev, "SMBus using interrupt SMI#\n");
|
||||
|
@ -1952,10 +1958,9 @@ static void i801_shutdown(struct pci_dev *dev)
|
|||
#ifdef CONFIG_PM_SLEEP
|
||||
static int i801_suspend(struct device *dev)
|
||||
{
|
||||
struct pci_dev *pci_dev = to_pci_dev(dev);
|
||||
struct i801_priv *priv = pci_get_drvdata(pci_dev);
|
||||
struct i801_priv *priv = dev_get_drvdata(dev);
|
||||
|
||||
pci_write_config_byte(pci_dev, SMBHSTCFG, priv->original_hstcfg);
|
||||
pci_write_config_byte(priv->pci_dev, SMBHSTCFG, priv->original_hstcfg);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -1963,6 +1968,7 @@ static int i801_resume(struct device *dev)
|
|||
{
|
||||
struct i801_priv *priv = dev_get_drvdata(dev);
|
||||
|
||||
i801_setup_hstcfg(priv);
|
||||
i801_enable_host_notify(&priv->adapter);
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -681,8 +681,8 @@ static int mtk_i2c_calculate_speed(struct mtk_i2c *i2c, unsigned int clk_src,
|
|||
unsigned int cnt_mul;
|
||||
int ret = -EINVAL;
|
||||
|
||||
if (target_speed > I2C_MAX_FAST_MODE_PLUS_FREQ)
|
||||
target_speed = I2C_MAX_FAST_MODE_PLUS_FREQ;
|
||||
if (target_speed > I2C_MAX_HIGH_SPEED_MODE_FREQ)
|
||||
target_speed = I2C_MAX_HIGH_SPEED_MODE_FREQ;
|
||||
|
||||
max_step_cnt = mtk_i2c_max_step_cnt(target_speed);
|
||||
base_step_cnt = max_step_cnt;
|
||||
|
@ -759,7 +759,7 @@ static int mtk_i2c_set_speed(struct mtk_i2c *i2c, unsigned int parent_clk)
|
|||
for (clk_div = 1; clk_div <= max_clk_div; clk_div++) {
|
||||
clk_src = parent_clk / clk_div;
|
||||
|
||||
if (target_speed > I2C_MAX_FAST_MODE_FREQ) {
|
||||
if (target_speed > I2C_MAX_FAST_MODE_PLUS_FREQ) {
|
||||
/* Set master code speed register */
|
||||
ret = mtk_i2c_calculate_speed(i2c, clk_src,
|
||||
I2C_MAX_FAST_MODE_FREQ,
|
||||
|
|
|
@ -25,6 +25,7 @@
|
|||
#include <linux/of_device.h>
|
||||
#include <linux/dma-mapping.h>
|
||||
#include <linux/dmaengine.h>
|
||||
#include <linux/dma/mxs-dma.h>
|
||||
|
||||
#define DRIVER_NAME "mxs-i2c"
|
||||
|
||||
|
@ -200,7 +201,8 @@ static int mxs_i2c_dma_setup_xfer(struct i2c_adapter *adap,
|
|||
dma_map_sg(i2c->dev, &i2c->sg_io[0], 1, DMA_TO_DEVICE);
|
||||
desc = dmaengine_prep_slave_sg(i2c->dmach, &i2c->sg_io[0], 1,
|
||||
DMA_MEM_TO_DEV,
|
||||
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
|
||||
DMA_PREP_INTERRUPT |
|
||||
MXS_DMA_CTRL_WAIT4END);
|
||||
if (!desc) {
|
||||
dev_err(i2c->dev,
|
||||
"Failed to get DMA data write descriptor.\n");
|
||||
|
@ -228,7 +230,8 @@ static int mxs_i2c_dma_setup_xfer(struct i2c_adapter *adap,
|
|||
dma_map_sg(i2c->dev, &i2c->sg_io[1], 1, DMA_FROM_DEVICE);
|
||||
desc = dmaengine_prep_slave_sg(i2c->dmach, &i2c->sg_io[1], 1,
|
||||
DMA_DEV_TO_MEM,
|
||||
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
|
||||
DMA_PREP_INTERRUPT |
|
||||
MXS_DMA_CTRL_WAIT4END);
|
||||
if (!desc) {
|
||||
dev_err(i2c->dev,
|
||||
"Failed to get DMA data write descriptor.\n");
|
||||
|
@ -260,7 +263,8 @@ static int mxs_i2c_dma_setup_xfer(struct i2c_adapter *adap,
|
|||
dma_map_sg(i2c->dev, i2c->sg_io, 2, DMA_TO_DEVICE);
|
||||
desc = dmaengine_prep_slave_sg(i2c->dmach, i2c->sg_io, 2,
|
||||
DMA_MEM_TO_DEV,
|
||||
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
|
||||
DMA_PREP_INTERRUPT |
|
||||
MXS_DMA_CTRL_WAIT4END);
|
||||
if (!desc) {
|
||||
dev_err(i2c->dev,
|
||||
"Failed to get DMA data write descriptor.\n");
|
||||
|
|
|
@ -1464,8 +1464,8 @@ static int i2c_register_adapter(struct i2c_adapter *adap)
|
|||
|
||||
/* create pre-declared device nodes */
|
||||
of_i2c_register_devices(adap);
|
||||
i2c_acpi_register_devices(adap);
|
||||
i2c_acpi_install_space_handler(adap);
|
||||
i2c_acpi_register_devices(adap);
|
||||
|
||||
if (adap->nr < __i2c_first_dynamic_bus_num)
|
||||
i2c_scan_static_board_info(adap);
|
||||
|
|
|
@ -17,10 +17,12 @@
|
|||
#include "trackpoint.h"
|
||||
|
||||
static const char * const trackpoint_variants[] = {
|
||||
[TP_VARIANT_IBM] = "IBM",
|
||||
[TP_VARIANT_ALPS] = "ALPS",
|
||||
[TP_VARIANT_ELAN] = "Elan",
|
||||
[TP_VARIANT_NXP] = "NXP",
|
||||
[TP_VARIANT_IBM] = "IBM",
|
||||
[TP_VARIANT_ALPS] = "ALPS",
|
||||
[TP_VARIANT_ELAN] = "Elan",
|
||||
[TP_VARIANT_NXP] = "NXP",
|
||||
[TP_VARIANT_JYT_SYNAPTICS] = "JYT_Synaptics",
|
||||
[TP_VARIANT_SYNAPTICS] = "Synaptics",
|
||||
};
|
||||
|
||||
/*
|
||||
|
|
|
@ -24,10 +24,12 @@
|
|||
* 0x01 was the original IBM trackpoint, others implement very limited
|
||||
* subset of trackpoint features.
|
||||
*/
|
||||
#define TP_VARIANT_IBM 0x01
|
||||
#define TP_VARIANT_ALPS 0x02
|
||||
#define TP_VARIANT_ELAN 0x03
|
||||
#define TP_VARIANT_NXP 0x04
|
||||
#define TP_VARIANT_IBM 0x01
|
||||
#define TP_VARIANT_ALPS 0x02
|
||||
#define TP_VARIANT_ELAN 0x03
|
||||
#define TP_VARIANT_NXP 0x04
|
||||
#define TP_VARIANT_JYT_SYNAPTICS 0x05
|
||||
#define TP_VARIANT_SYNAPTICS 0x06
|
||||
|
||||
/*
|
||||
* Commands
|
||||
|
|
|
@ -548,6 +548,14 @@ static const struct dmi_system_id __initconst i8042_dmi_nomux_table[] = {
|
|||
DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5738"),
|
||||
},
|
||||
},
|
||||
{
|
||||
/* Entroware Proteus */
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "Entroware"),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "Proteus"),
|
||||
DMI_MATCH(DMI_PRODUCT_VERSION, "EL07R4"),
|
||||
},
|
||||
},
|
||||
{ }
|
||||
};
|
||||
|
||||
|
@ -676,6 +684,14 @@ static const struct dmi_system_id __initconst i8042_dmi_reset_table[] = {
|
|||
DMI_MATCH(DMI_PRODUCT_NAME, "33474HU"),
|
||||
},
|
||||
},
|
||||
{
|
||||
/* Entroware Proteus */
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "Entroware"),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "Proteus"),
|
||||
DMI_MATCH(DMI_PRODUCT_VERSION, "EL07R4"),
|
||||
},
|
||||
},
|
||||
{ }
|
||||
};
|
||||
|
||||
|
|
|
@ -3840,14 +3840,18 @@ int amd_iommu_activate_guest_mode(void *data)
|
|||
{
|
||||
struct amd_ir_data *ir_data = (struct amd_ir_data *)data;
|
||||
struct irte_ga *entry = (struct irte_ga *) ir_data->entry;
|
||||
u64 valid;
|
||||
|
||||
if (!AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir) ||
|
||||
!entry || entry->lo.fields_vapic.guest_mode)
|
||||
return 0;
|
||||
|
||||
valid = entry->lo.fields_vapic.valid;
|
||||
|
||||
entry->lo.val = 0;
|
||||
entry->hi.val = 0;
|
||||
|
||||
entry->lo.fields_vapic.valid = valid;
|
||||
entry->lo.fields_vapic.guest_mode = 1;
|
||||
entry->lo.fields_vapic.ga_log_intr = 1;
|
||||
entry->hi.fields.ga_root_ptr = ir_data->ga_root_ptr;
|
||||
|
@ -3864,12 +3868,14 @@ int amd_iommu_deactivate_guest_mode(void *data)
|
|||
struct amd_ir_data *ir_data = (struct amd_ir_data *)data;
|
||||
struct irte_ga *entry = (struct irte_ga *) ir_data->entry;
|
||||
struct irq_cfg *cfg = ir_data->cfg;
|
||||
u64 valid = entry->lo.fields_remap.valid;
|
||||
u64 valid;
|
||||
|
||||
if (!AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir) ||
|
||||
!entry || !entry->lo.fields_vapic.guest_mode)
|
||||
return 0;
|
||||
|
||||
valid = entry->lo.fields_remap.valid;
|
||||
|
||||
entry->lo.val = 0;
|
||||
entry->hi.val = 0;
|
||||
|
||||
|
|
|
@ -860,10 +860,14 @@ EXPORT_SYMBOL_GPL(dm_table_set_type);
|
|||
int device_supports_dax(struct dm_target *ti, struct dm_dev *dev,
|
||||
sector_t start, sector_t len, void *data)
|
||||
{
|
||||
int blocksize = *(int *) data;
|
||||
int blocksize = *(int *) data, id;
|
||||
bool rc;
|
||||
|
||||
return generic_fsdax_supported(dev->dax_dev, dev->bdev, blocksize,
|
||||
start, len);
|
||||
id = dax_read_lock();
|
||||
rc = dax_supported(dev->dax_dev, dev->bdev, blocksize, start, len);
|
||||
dax_read_unlock(id);
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
/* Check devices support synchronous DAX */
|
||||
|
|
|
@ -1136,15 +1136,16 @@ static bool dm_dax_supported(struct dax_device *dax_dev, struct block_device *bd
|
|||
{
|
||||
struct mapped_device *md = dax_get_private(dax_dev);
|
||||
struct dm_table *map;
|
||||
bool ret = false;
|
||||
int srcu_idx;
|
||||
bool ret;
|
||||
|
||||
map = dm_get_live_table(md, &srcu_idx);
|
||||
if (!map)
|
||||
return false;
|
||||
goto out;
|
||||
|
||||
ret = dm_table_supports_dax(map, device_supports_dax, &blocksize);
|
||||
|
||||
out:
|
||||
dm_put_live_table(md, srcu_idx);
|
||||
|
||||
return ret;
|
||||
|
|
|
@ -1907,16 +1907,15 @@ static int spi_nor_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len)
|
|||
}
|
||||
|
||||
/**
|
||||
* spi_nor_sr1_bit6_quad_enable() - Set/Unset the Quad Enable BIT(6) in the
|
||||
* Status Register 1.
|
||||
* spi_nor_sr1_bit6_quad_enable() - Set the Quad Enable BIT(6) in the Status
|
||||
* Register 1.
|
||||
* @nor: pointer to a 'struct spi_nor'
|
||||
* @enable: true to enable Quad mode, false to disable Quad mode.
|
||||
*
|
||||
* Bit 6 of the Status Register 1 is the QE bit for Macronix like QSPI memories.
|
||||
*
|
||||
* Return: 0 on success, -errno otherwise.
|
||||
*/
|
||||
int spi_nor_sr1_bit6_quad_enable(struct spi_nor *nor, bool enable)
|
||||
int spi_nor_sr1_bit6_quad_enable(struct spi_nor *nor)
|
||||
{
|
||||
int ret;
|
||||
|
||||
|
@ -1924,56 +1923,45 @@ int spi_nor_sr1_bit6_quad_enable(struct spi_nor *nor, bool enable)
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
if ((enable && (nor->bouncebuf[0] & SR1_QUAD_EN_BIT6)) ||
|
||||
(!enable && !(nor->bouncebuf[0] & SR1_QUAD_EN_BIT6)))
|
||||
if (nor->bouncebuf[0] & SR1_QUAD_EN_BIT6)
|
||||
return 0;
|
||||
|
||||
if (enable)
|
||||
nor->bouncebuf[0] |= SR1_QUAD_EN_BIT6;
|
||||
else
|
||||
nor->bouncebuf[0] &= ~SR1_QUAD_EN_BIT6;
|
||||
nor->bouncebuf[0] |= SR1_QUAD_EN_BIT6;
|
||||
|
||||
return spi_nor_write_sr1_and_check(nor, nor->bouncebuf[0]);
|
||||
}
|
||||
|
||||
/**
|
||||
* spi_nor_sr2_bit1_quad_enable() - set/unset the Quad Enable BIT(1) in the
|
||||
* Status Register 2.
|
||||
* spi_nor_sr2_bit1_quad_enable() - set the Quad Enable BIT(1) in the Status
|
||||
* Register 2.
|
||||
* @nor: pointer to a 'struct spi_nor'.
|
||||
* @enable: true to enable Quad mode, false to disable Quad mode.
|
||||
*
|
||||
* Bit 1 of the Status Register 2 is the QE bit for Spansion like QSPI memories.
|
||||
*
|
||||
* Return: 0 on success, -errno otherwise.
|
||||
*/
|
||||
int spi_nor_sr2_bit1_quad_enable(struct spi_nor *nor, bool enable)
|
||||
int spi_nor_sr2_bit1_quad_enable(struct spi_nor *nor)
|
||||
{
|
||||
int ret;
|
||||
|
||||
if (nor->flags & SNOR_F_NO_READ_CR)
|
||||
return spi_nor_write_16bit_cr_and_check(nor,
|
||||
enable ? SR2_QUAD_EN_BIT1 : 0);
|
||||
return spi_nor_write_16bit_cr_and_check(nor, SR2_QUAD_EN_BIT1);
|
||||
|
||||
ret = spi_nor_read_cr(nor, nor->bouncebuf);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if ((enable && (nor->bouncebuf[0] & SR2_QUAD_EN_BIT1)) ||
|
||||
(!enable && !(nor->bouncebuf[0] & SR2_QUAD_EN_BIT1)))
|
||||
if (nor->bouncebuf[0] & SR2_QUAD_EN_BIT1)
|
||||
return 0;
|
||||
|
||||
if (enable)
|
||||
nor->bouncebuf[0] |= SR2_QUAD_EN_BIT1;
|
||||
else
|
||||
nor->bouncebuf[0] &= ~SR2_QUAD_EN_BIT1;
|
||||
nor->bouncebuf[0] |= SR2_QUAD_EN_BIT1;
|
||||
|
||||
return spi_nor_write_16bit_cr_and_check(nor, nor->bouncebuf[0]);
|
||||
}
|
||||
|
||||
/**
|
||||
* spi_nor_sr2_bit7_quad_enable() - set/unset QE bit in Status Register 2.
|
||||
* spi_nor_sr2_bit7_quad_enable() - set QE bit in Status Register 2.
|
||||
* @nor: pointer to a 'struct spi_nor'
|
||||
* @enable: true to enable Quad mode, false to disable Quad mode.
|
||||
*
|
||||
* Set the Quad Enable (QE) bit in the Status Register 2.
|
||||
*
|
||||
|
@ -1983,7 +1971,7 @@ int spi_nor_sr2_bit1_quad_enable(struct spi_nor *nor, bool enable)
|
|||
*
|
||||
* Return: 0 on success, -errno otherwise.
|
||||
*/
|
||||
int spi_nor_sr2_bit7_quad_enable(struct spi_nor *nor, bool enable)
|
||||
int spi_nor_sr2_bit7_quad_enable(struct spi_nor *nor)
|
||||
{
|
||||
u8 *sr2 = nor->bouncebuf;
|
||||
int ret;
|
||||
|
@ -1993,15 +1981,11 @@ int spi_nor_sr2_bit7_quad_enable(struct spi_nor *nor, bool enable)
|
|||
ret = spi_nor_read_sr2(nor, sr2);
|
||||
if (ret)
|
||||
return ret;
|
||||
if ((enable && (*sr2 & SR2_QUAD_EN_BIT7)) ||
|
||||
(!enable && !(*sr2 & SR2_QUAD_EN_BIT7)))
|
||||
if (*sr2 & SR2_QUAD_EN_BIT7)
|
||||
return 0;
|
||||
|
||||
/* Update the Quad Enable bit. */
|
||||
if (enable)
|
||||
*sr2 |= SR2_QUAD_EN_BIT7;
|
||||
else
|
||||
*sr2 &= ~SR2_QUAD_EN_BIT7;
|
||||
*sr2 |= SR2_QUAD_EN_BIT7;
|
||||
|
||||
ret = spi_nor_write_sr2(nor, sr2);
|
||||
if (ret)
|
||||
|
@ -2914,13 +2898,12 @@ static int spi_nor_init_params(struct spi_nor *nor)
|
|||
}
|
||||
|
||||
/**
|
||||
* spi_nor_quad_enable() - enable/disable Quad I/O if needed.
|
||||
* spi_nor_quad_enable() - enable Quad I/O if needed.
|
||||
* @nor: pointer to a 'struct spi_nor'
|
||||
* @enable: true to enable Quad mode. false to disable Quad mode.
|
||||
*
|
||||
* Return: 0 on success, -errno otherwise.
|
||||
*/
|
||||
static int spi_nor_quad_enable(struct spi_nor *nor, bool enable)
|
||||
static int spi_nor_quad_enable(struct spi_nor *nor)
|
||||
{
|
||||
if (!nor->params->quad_enable)
|
||||
return 0;
|
||||
|
@ -2929,7 +2912,7 @@ static int spi_nor_quad_enable(struct spi_nor *nor, bool enable)
|
|||
spi_nor_get_protocol_width(nor->write_proto) == 4))
|
||||
return 0;
|
||||
|
||||
return nor->params->quad_enable(nor, enable);
|
||||
return nor->params->quad_enable(nor);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -2953,7 +2936,7 @@ static int spi_nor_init(struct spi_nor *nor)
|
|||
{
|
||||
int err;
|
||||
|
||||
err = spi_nor_quad_enable(nor, true);
|
||||
err = spi_nor_quad_enable(nor);
|
||||
if (err) {
|
||||
dev_dbg(nor->dev, "quad mode not supported\n");
|
||||
return err;
|
||||
|
@ -3000,8 +2983,6 @@ void spi_nor_restore(struct spi_nor *nor)
|
|||
if (nor->addr_width == 4 && !(nor->flags & SNOR_F_4B_OPCODES) &&
|
||||
nor->flags & SNOR_F_BROKEN_RESET)
|
||||
nor->params->set_4byte_addr_mode(nor, false);
|
||||
|
||||
spi_nor_quad_enable(nor, false);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(spi_nor_restore);
|
||||
|
||||
|
|
|
@ -198,7 +198,7 @@ struct spi_nor_locking_ops {
|
|||
* higher index in the array, the higher priority.
|
||||
* @erase_map: the erase map parsed from the SFDP Sector Map Parameter
|
||||
* Table.
|
||||
* @quad_enable: enables/disables SPI NOR Quad mode.
|
||||
* @quad_enable: enables SPI NOR quad mode.
|
||||
* @set_4byte_addr_mode: puts the SPI NOR in 4 byte addressing mode.
|
||||
* @convert_addr: converts an absolute address into something the flash
|
||||
* will understand. Particularly useful when pagesize is
|
||||
|
@ -219,7 +219,7 @@ struct spi_nor_flash_parameter {
|
|||
|
||||
struct spi_nor_erase_map erase_map;
|
||||
|
||||
int (*quad_enable)(struct spi_nor *nor, bool enable);
|
||||
int (*quad_enable)(struct spi_nor *nor);
|
||||
int (*set_4byte_addr_mode)(struct spi_nor *nor, bool enable);
|
||||
u32 (*convert_addr)(struct spi_nor *nor, u32 addr);
|
||||
int (*setup)(struct spi_nor *nor, const struct spi_nor_hwcaps *hwcaps);
|
||||
|
@ -406,9 +406,9 @@ int spi_nor_write_ear(struct spi_nor *nor, u8 ear);
|
|||
int spi_nor_wait_till_ready(struct spi_nor *nor);
|
||||
int spi_nor_lock_and_prep(struct spi_nor *nor);
|
||||
void spi_nor_unlock_and_unprep(struct spi_nor *nor);
|
||||
int spi_nor_sr1_bit6_quad_enable(struct spi_nor *nor, bool enable);
|
||||
int spi_nor_sr2_bit1_quad_enable(struct spi_nor *nor, bool enable);
|
||||
int spi_nor_sr2_bit7_quad_enable(struct spi_nor *nor, bool enable);
|
||||
int spi_nor_sr1_bit6_quad_enable(struct spi_nor *nor);
|
||||
int spi_nor_sr2_bit1_quad_enable(struct spi_nor *nor);
|
||||
int spi_nor_sr2_bit7_quad_enable(struct spi_nor *nor);
|
||||
|
||||
int spi_nor_xread_sr(struct spi_nor *nor, u8 *sr);
|
||||
ssize_t spi_nor_read_data(struct spi_nor *nor, loff_t from, size_t len,
|
||||
|
|
|
@ -1039,6 +1039,7 @@ static const struct x86_cpu_id rapl_ids[] __initconst = {
|
|||
X86_MATCH_INTEL_FAM6_MODEL(ROCKETLAKE, &rapl_defaults_core),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE, &rapl_defaults_core),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(SAPPHIRERAPIDS_X, &rapl_defaults_spr_server),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(LAKEFIELD, &rapl_defaults_core),
|
||||
|
||||
X86_MATCH_INTEL_FAM6_MODEL(ATOM_SILVERMONT, &rapl_defaults_byt),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(ATOM_AIRMONT, &rapl_defaults_cht),
|
||||
|
|
|
@ -1692,9 +1692,9 @@ int cca_findcard2(u32 **apqns, u32 *nr_apqns, u16 cardnr, u16 domain,
|
|||
*nr_apqns = 0;
|
||||
|
||||
/* fetch status of all crypto cards */
|
||||
device_status = kmalloc_array(MAX_ZDEV_ENTRIES_EXT,
|
||||
sizeof(struct zcrypt_device_status_ext),
|
||||
GFP_KERNEL);
|
||||
device_status = kvmalloc_array(MAX_ZDEV_ENTRIES_EXT,
|
||||
sizeof(struct zcrypt_device_status_ext),
|
||||
GFP_KERNEL);
|
||||
if (!device_status)
|
||||
return -ENOMEM;
|
||||
zcrypt_device_status_mask_ext(device_status);
|
||||
|
@ -1762,7 +1762,7 @@ int cca_findcard2(u32 **apqns, u32 *nr_apqns, u16 cardnr, u16 domain,
|
|||
verify = 0;
|
||||
}
|
||||
|
||||
kfree(device_status);
|
||||
kvfree(device_status);
|
||||
return rc;
|
||||
}
|
||||
EXPORT_SYMBOL(cca_findcard2);
|
||||
|
|
|
@ -182,10 +182,11 @@ int sas_notify_lldd_dev_found(struct domain_device *dev)
|
|||
pr_warn("driver on host %s cannot handle device %016llx, error:%d\n",
|
||||
dev_name(sas_ha->dev),
|
||||
SAS_ADDR(dev->sas_addr), res);
|
||||
return res;
|
||||
}
|
||||
set_bit(SAS_DEV_FOUND, &dev->state);
|
||||
kref_get(&dev->kref);
|
||||
return res;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -7,6 +7,7 @@
|
|||
*/
|
||||
|
||||
#include <linux/crc32.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/property.h>
|
||||
#include <linux/slab.h>
|
||||
#include "tb.h"
|
||||
|
@ -389,8 +390,8 @@ static int tb_drom_parse_entries(struct tb_switch *sw)
|
|||
struct tb_drom_entry_header *entry = (void *) (sw->drom + pos);
|
||||
if (pos + 1 == drom_size || pos + entry->len > drom_size
|
||||
|| !entry->len) {
|
||||
tb_sw_warn(sw, "drom buffer overrun, aborting\n");
|
||||
return -EIO;
|
||||
tb_sw_warn(sw, "DROM buffer overrun\n");
|
||||
return -EILSEQ;
|
||||
}
|
||||
|
||||
switch (entry->type) {
|
||||
|
@ -526,7 +527,8 @@ int tb_drom_read(struct tb_switch *sw)
|
|||
u16 size;
|
||||
u32 crc;
|
||||
struct tb_drom_header *header;
|
||||
int res;
|
||||
int res, retries = 1;
|
||||
|
||||
if (sw->drom)
|
||||
return 0;
|
||||
|
||||
|
@ -612,7 +614,17 @@ int tb_drom_read(struct tb_switch *sw)
|
|||
tb_sw_warn(sw, "drom device_rom_revision %#x unknown\n",
|
||||
header->device_rom_revision);
|
||||
|
||||
return tb_drom_parse_entries(sw);
|
||||
res = tb_drom_parse_entries(sw);
|
||||
/* If the DROM parsing fails, wait a moment and retry once */
|
||||
if (res == -EILSEQ && retries--) {
|
||||
tb_sw_warn(sw, "parsing DROM failed, retrying\n");
|
||||
msleep(100);
|
||||
res = tb_drom_read_n(sw, 0, sw->drom, size);
|
||||
if (!res)
|
||||
goto parse;
|
||||
}
|
||||
|
||||
return res;
|
||||
err:
|
||||
kfree(sw->drom);
|
||||
sw->drom = NULL;
|
||||
|
|
|
@ -5566,6 +5566,17 @@ static const struct pci_device_id serial_pci_tbl[] = {
|
|||
PCI_ANY_ID, PCI_ANY_ID,
|
||||
0, 0, pbn_wch384_4 },
|
||||
|
||||
/*
|
||||
* Realtek RealManage
|
||||
*/
|
||||
{ PCI_VENDOR_ID_REALTEK, 0x816a,
|
||||
PCI_ANY_ID, PCI_ANY_ID,
|
||||
0, 0, pbn_b0_1_115200 },
|
||||
|
||||
{ PCI_VENDOR_ID_REALTEK, 0x816b,
|
||||
PCI_ANY_ID, PCI_ANY_ID,
|
||||
0, 0, pbn_b0_1_115200 },
|
||||
|
||||
/* Fintek PCI serial cards */
|
||||
{ PCI_DEVICE(0x1c29, 0x1104), .driver_data = pbn_fintek_4 },
|
||||
{ PCI_DEVICE(0x1c29, 0x1108), .driver_data = pbn_fintek_8 },
|
||||
|
|
|
@ -1916,24 +1916,12 @@ static inline bool uart_console_enabled(struct uart_port *port)
|
|||
return uart_console(port) && (port->cons->flags & CON_ENABLED);
|
||||
}
|
||||
|
||||
static void __uart_port_spin_lock_init(struct uart_port *port)
|
||||
static void uart_port_spin_lock_init(struct uart_port *port)
|
||||
{
|
||||
spin_lock_init(&port->lock);
|
||||
lockdep_set_class(&port->lock, &port_lock_key);
|
||||
}
|
||||
|
||||
/*
|
||||
* Ensure that the serial console lock is initialised early.
|
||||
* If this port is a console, then the spinlock is already initialised.
|
||||
*/
|
||||
static inline void uart_port_spin_lock_init(struct uart_port *port)
|
||||
{
|
||||
if (uart_console(port))
|
||||
return;
|
||||
|
||||
__uart_port_spin_lock_init(port);
|
||||
}
|
||||
|
||||
#if defined(CONFIG_SERIAL_CORE_CONSOLE) || defined(CONFIG_CONSOLE_POLL)
|
||||
/**
|
||||
* uart_console_write - write a console message to a serial port
|
||||
|
@ -2086,7 +2074,15 @@ uart_set_options(struct uart_port *port, struct console *co,
|
|||
struct ktermios termios;
|
||||
static struct ktermios dummy;
|
||||
|
||||
uart_port_spin_lock_init(port);
|
||||
/*
|
||||
* Ensure that the serial-console lock is initialised early.
|
||||
*
|
||||
* Note that the console-enabled check is needed because of kgdboc,
|
||||
* which can end up calling uart_set_options() for an already enabled
|
||||
* console via tty_find_polling_driver() and uart_poll_init().
|
||||
*/
|
||||
if (!uart_console_enabled(port) && !port->console_reinit)
|
||||
uart_port_spin_lock_init(port);
|
||||
|
||||
memset(&termios, 0, sizeof(struct ktermios));
|
||||
|
||||
|
@ -2378,13 +2374,6 @@ uart_configure_port(struct uart_driver *drv, struct uart_state *state,
|
|||
/* Power up port for set_mctrl() */
|
||||
uart_change_pm(state, UART_PM_STATE_ON);
|
||||
|
||||
/*
|
||||
* If this driver supports console, and it hasn't been
|
||||
* successfully registered yet, initialise spin lock for it.
|
||||
*/
|
||||
if (port->cons && !(port->cons->flags & CON_ENABLED))
|
||||
__uart_port_spin_lock_init(port);
|
||||
|
||||
/*
|
||||
* Ensure that the modem control lines are de-activated.
|
||||
* keep the DTR setting that is set in uart_set_options()
|
||||
|
@ -2801,10 +2790,12 @@ static ssize_t console_store(struct device *dev,
|
|||
if (oldconsole && !newconsole) {
|
||||
ret = unregister_console(uport->cons);
|
||||
} else if (!oldconsole && newconsole) {
|
||||
if (uart_console(uport))
|
||||
if (uart_console(uport)) {
|
||||
uport->console_reinit = 1;
|
||||
register_console(uport->cons);
|
||||
else
|
||||
} else {
|
||||
ret = -ENOENT;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
ret = -ENXIO;
|
||||
|
@ -2900,7 +2891,12 @@ int uart_add_one_port(struct uart_driver *drv, struct uart_port *uport)
|
|||
goto out;
|
||||
}
|
||||
|
||||
uart_port_spin_lock_init(uport);
|
||||
/*
|
||||
* If this port is in use as a console then the spinlock is already
|
||||
* initialised.
|
||||
*/
|
||||
if (!uart_console_enabled(uport))
|
||||
uart_port_spin_lock_init(uport);
|
||||
|
||||
if (uport->cons && uport->dev)
|
||||
of_console_check(uport->dev->of_node, uport->cons->name, uport->line);
|
||||
|
|
|
@ -827,6 +827,11 @@ static ssize_t usblp_read(struct file *file, char __user *buffer, size_t len, lo
|
|||
if (rv < 0)
|
||||
return rv;
|
||||
|
||||
if (!usblp->present) {
|
||||
count = -ENODEV;
|
||||
goto done;
|
||||
}
|
||||
|
||||
if ((avail = usblp->rstatus) < 0) {
|
||||
printk(KERN_ERR "usblp%d: error %d reading from printer\n",
|
||||
usblp->minor, (int)avail);
|
||||
|
|
|
@ -397,6 +397,10 @@ static const struct usb_device_id usb_quirk_list[] = {
|
|||
/* Generic RTL8153 based ethernet adapters */
|
||||
{ USB_DEVICE(0x0bda, 0x8153), .driver_info = USB_QUIRK_NO_LPM },
|
||||
|
||||
/* SONiX USB DEVICE Touchpad */
|
||||
{ USB_DEVICE(0x0c45, 0x7056), .driver_info =
|
||||
USB_QUIRK_IGNORE_REMOTE_WAKEUP },
|
||||
|
||||
/* Action Semiconductor flash disk */
|
||||
{ USB_DEVICE(0x10d6, 0x2200), .driver_info =
|
||||
USB_QUIRK_STRING_FETCH_255 },
|
||||
|
|
|
@ -22,6 +22,7 @@
|
|||
#include <linux/interrupt.h>
|
||||
#include <linux/usb.h>
|
||||
#include <linux/usb/hcd.h>
|
||||
#include <linux/usb/otg.h>
|
||||
#include <linux/moduleparam.h>
|
||||
#include <linux/dma-mapping.h>
|
||||
#include <linux/debugfs.h>
|
||||
|
|
|
@ -14,7 +14,6 @@
|
|||
*/
|
||||
|
||||
/*-------------------------------------------------------------------------*/
|
||||
#include <linux/usb/otg.h>
|
||||
|
||||
#define PORT_WAKE_BITS (PORT_WKOC_E|PORT_WKDISC_E|PORT_WKCONN_E)
|
||||
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue