mirror of https://gitee.com/openkylin/linux.git
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
This commit is contained in:
commit
580bdf5650
|
@ -1,12 +0,0 @@
|
|||
What: /sys/devices/.../deferred_probe
|
||||
Date: August 2016
|
||||
Contact: Ben Hutchings <ben.hutchings@codethink.co.uk>
|
||||
Description:
|
||||
The /sys/devices/.../deferred_probe attribute is
|
||||
present for all devices. If a driver detects during
|
||||
probing a device that a related device is not yet
|
||||
ready, it may defer probing of the first device. The
|
||||
kernel will retry probing the first device after any
|
||||
other device is successfully probed. This attribute
|
||||
reads as 1 if probing of this device is currently
|
||||
deferred, or 0 otherwise.
|
|
@ -62,6 +62,9 @@ wants to support one of the below features, it should adapt the bindings below.
|
|||
"irq" and "wakeup" names are recognized by I2C core, other names are
|
||||
left to individual drivers.
|
||||
|
||||
- host-notify
|
||||
device uses SMBus host notify protocol instead of interrupt line.
|
||||
|
||||
- multi-master
|
||||
states that there is another master active on this bus. The OS can use
|
||||
this information to adapt power management to keep the arbitration awake
|
||||
|
@ -81,6 +84,11 @@ Binding may contain optional "interrupts" property, describing interrupts
|
|||
used by the device. I2C core will assign "irq" interrupt (or the very first
|
||||
interrupt if not using interrupt names) as primary interrupt for the slave.
|
||||
|
||||
Alternatively, devices supporting SMbus Host Notify, and connected to
|
||||
adapters that support this feature, may use "host-notify" property. I2C
|
||||
core will create a virtual interrupt for Host Notify and assign it as
|
||||
primary interrupt for the slave.
|
||||
|
||||
Also, if device is marked as a wakeup source, I2C core will set up "wakeup"
|
||||
interrupt for the device. If "wakeup" interrupt name is not present in the
|
||||
binding, then primary interrupt will be used as wakeup interrupt.
|
||||
|
|
|
@ -5,7 +5,7 @@ Required properties:
|
|||
- compatible: "sigma,smp8758-nand"
|
||||
- reg: address/size of nfc_reg, nfc_mem, and pbus_reg
|
||||
- dmas: reference to the DMA channel used by the controller
|
||||
- dma-names: "nfc_sbox"
|
||||
- dma-names: "rxtx"
|
||||
- clocks: reference to the system clock
|
||||
- #address-cells: <1>
|
||||
- #size-cells: <0>
|
||||
|
@ -17,9 +17,9 @@ Example:
|
|||
|
||||
nandc: nand-controller@2c000 {
|
||||
compatible = "sigma,smp8758-nand";
|
||||
reg = <0x2c000 0x30 0x2d000 0x800 0x20000 0x1000>;
|
||||
reg = <0x2c000 0x30>, <0x2d000 0x800>, <0x20000 0x1000>;
|
||||
dmas = <&dma0 3>;
|
||||
dma-names = "nfc_sbox";
|
||||
dma-names = "rxtx";
|
||||
clocks = <&clkgen SYS_CLK>;
|
||||
#address-cells = <1>;
|
||||
#size-cells = <0>;
|
||||
|
|
|
@ -3,9 +3,11 @@
|
|||
Required properties:
|
||||
- reg - The ID number for the phy, usually a small integer
|
||||
- ti,rx-internal-delay - RGMII Receive Clock Delay - see dt-bindings/net/ti-dp83867.h
|
||||
for applicable values
|
||||
for applicable values. Required only if interface type is
|
||||
PHY_INTERFACE_MODE_RGMII_ID or PHY_INTERFACE_MODE_RGMII_RXID
|
||||
- ti,tx-internal-delay - RGMII Transmit Clock Delay - see dt-bindings/net/ti-dp83867.h
|
||||
for applicable values
|
||||
for applicable values. Required only if interface type is
|
||||
PHY_INTERFACE_MODE_RGMII_ID or PHY_INTERFACE_MODE_RGMII_TXID
|
||||
- ti,fifo-depth - Transmitt FIFO depth- see dt-bindings/net/ti-dp83867.h
|
||||
for applicable values
|
||||
|
||||
|
|
17
MAINTAINERS
17
MAINTAINERS
|
@ -2193,14 +2193,6 @@ L: alsa-devel@alsa-project.org (moderated for non-subscribers)
|
|||
S: Supported
|
||||
F: sound/soc/atmel
|
||||
|
||||
ATMEL DMA DRIVER
|
||||
M: Nicolas Ferre <nicolas.ferre@atmel.com>
|
||||
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
|
||||
S: Supported
|
||||
F: drivers/dma/at_hdmac.c
|
||||
F: drivers/dma/at_hdmac_regs.h
|
||||
F: include/linux/platform_data/dma-atmel.h
|
||||
|
||||
ATMEL XDMA DRIVER
|
||||
M: Ludovic Desroches <ludovic.desroches@atmel.com>
|
||||
L: linux-arm-kernel@lists.infradead.org
|
||||
|
@ -8184,6 +8176,15 @@ S: Maintained
|
|||
F: drivers/tty/serial/atmel_serial.c
|
||||
F: include/linux/atmel_serial.h
|
||||
|
||||
MICROCHIP / ATMEL DMA DRIVER
|
||||
M: Ludovic Desroches <ludovic.desroches@microchip.com>
|
||||
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
|
||||
L: dmaengine@vger.kernel.org
|
||||
S: Supported
|
||||
F: drivers/dma/at_hdmac.c
|
||||
F: drivers/dma/at_hdmac_regs.h
|
||||
F: include/linux/platform_data/dma-atmel.h
|
||||
|
||||
MICROCHIP / ATMEL ISC DRIVER
|
||||
M: Songjun Wu <songjun.wu@microchip.com>
|
||||
L: linux-media@vger.kernel.org
|
||||
|
|
2
Makefile
2
Makefile
|
@ -1,7 +1,7 @@
|
|||
VERSION = 4
|
||||
PATCHLEVEL = 10
|
||||
SUBLEVEL = 0
|
||||
EXTRAVERSION = -rc3
|
||||
EXTRAVERSION = -rc4
|
||||
NAME = Roaring Lionus
|
||||
|
||||
# *DOCUMENTATION*
|
||||
|
|
|
@ -164,22 +164,25 @@ lr .req x30 // link register
|
|||
|
||||
/*
|
||||
* Pseudo-ops for PC-relative adr/ldr/str <reg>, <symbol> where
|
||||
* <symbol> is within the range +/- 4 GB of the PC.
|
||||
* <symbol> is within the range +/- 4 GB of the PC when running
|
||||
* in core kernel context. In module context, a movz/movk sequence
|
||||
* is used, since modules may be loaded far away from the kernel
|
||||
* when KASLR is in effect.
|
||||
*/
|
||||
/*
|
||||
* @dst: destination register (64 bit wide)
|
||||
* @sym: name of the symbol
|
||||
* @tmp: optional scratch register to be used if <dst> == sp, which
|
||||
* is not allowed in an adrp instruction
|
||||
*/
|
||||
.macro adr_l, dst, sym, tmp=
|
||||
.ifb \tmp
|
||||
.macro adr_l, dst, sym
|
||||
#ifndef MODULE
|
||||
adrp \dst, \sym
|
||||
add \dst, \dst, :lo12:\sym
|
||||
.else
|
||||
adrp \tmp, \sym
|
||||
add \dst, \tmp, :lo12:\sym
|
||||
.endif
|
||||
#else
|
||||
movz \dst, #:abs_g3:\sym
|
||||
movk \dst, #:abs_g2_nc:\sym
|
||||
movk \dst, #:abs_g1_nc:\sym
|
||||
movk \dst, #:abs_g0_nc:\sym
|
||||
#endif
|
||||
.endm
|
||||
|
||||
/*
|
||||
|
@ -190,6 +193,7 @@ lr .req x30 // link register
|
|||
* the address
|
||||
*/
|
||||
.macro ldr_l, dst, sym, tmp=
|
||||
#ifndef MODULE
|
||||
.ifb \tmp
|
||||
adrp \dst, \sym
|
||||
ldr \dst, [\dst, :lo12:\sym]
|
||||
|
@ -197,6 +201,15 @@ lr .req x30 // link register
|
|||
adrp \tmp, \sym
|
||||
ldr \dst, [\tmp, :lo12:\sym]
|
||||
.endif
|
||||
#else
|
||||
.ifb \tmp
|
||||
adr_l \dst, \sym
|
||||
ldr \dst, [\dst]
|
||||
.else
|
||||
adr_l \tmp, \sym
|
||||
ldr \dst, [\tmp]
|
||||
.endif
|
||||
#endif
|
||||
.endm
|
||||
|
||||
/*
|
||||
|
@ -206,8 +219,13 @@ lr .req x30 // link register
|
|||
* while <src> needs to be preserved.
|
||||
*/
|
||||
.macro str_l, src, sym, tmp
|
||||
#ifndef MODULE
|
||||
adrp \tmp, \sym
|
||||
str \src, [\tmp, :lo12:\sym]
|
||||
#else
|
||||
adr_l \tmp, \sym
|
||||
str \src, [\tmp]
|
||||
#endif
|
||||
.endm
|
||||
|
||||
/*
|
||||
|
|
|
@ -239,7 +239,7 @@ int huge_ptep_set_access_flags(struct vm_area_struct *vma,
|
|||
ncontig = find_num_contig(vma->vm_mm, addr, cpte,
|
||||
*cpte, &pgsize);
|
||||
for (i = 0; i < ncontig; ++i, ++cpte, addr += pgsize) {
|
||||
changed = ptep_set_access_flags(vma, addr, cpte,
|
||||
changed |= ptep_set_access_flags(vma, addr, cpte,
|
||||
pfn_pte(pfn,
|
||||
hugeprot),
|
||||
dirty);
|
||||
|
|
|
@ -14,6 +14,7 @@
|
|||
|
||||
#include <linux/types.h>
|
||||
#include "ctype.h"
|
||||
#include "string.h"
|
||||
|
||||
int memcmp(const void *s1, const void *s2, size_t len)
|
||||
{
|
||||
|
|
|
@ -18,4 +18,13 @@ int memcmp(const void *s1, const void *s2, size_t len);
|
|||
#define memset(d,c,l) __builtin_memset(d,c,l)
|
||||
#define memcmp __builtin_memcmp
|
||||
|
||||
extern int strcmp(const char *str1, const char *str2);
|
||||
extern int strncmp(const char *cs, const char *ct, size_t count);
|
||||
extern size_t strlen(const char *s);
|
||||
extern char *strstr(const char *s1, const char *s2);
|
||||
extern size_t strnlen(const char *s, size_t maxlen);
|
||||
extern unsigned int atou(const char *s);
|
||||
extern unsigned long long simple_strtoull(const char *cp, char **endp,
|
||||
unsigned int base);
|
||||
|
||||
#endif /* BOOT_STRING_H */
|
||||
|
|
|
@ -254,23 +254,6 @@ ENTRY(__switch_to_asm)
|
|||
jmp __switch_to
|
||||
END(__switch_to_asm)
|
||||
|
||||
/*
|
||||
* The unwinder expects the last frame on the stack to always be at the same
|
||||
* offset from the end of the page, which allows it to validate the stack.
|
||||
* Calling schedule_tail() directly would break that convention because its an
|
||||
* asmlinkage function so its argument has to be pushed on the stack. This
|
||||
* wrapper creates a proper "end of stack" frame header before the call.
|
||||
*/
|
||||
ENTRY(schedule_tail_wrapper)
|
||||
FRAME_BEGIN
|
||||
|
||||
pushl %eax
|
||||
call schedule_tail
|
||||
popl %eax
|
||||
|
||||
FRAME_END
|
||||
ret
|
||||
ENDPROC(schedule_tail_wrapper)
|
||||
/*
|
||||
* A newly forked process directly context switches into this address.
|
||||
*
|
||||
|
@ -279,15 +262,24 @@ ENDPROC(schedule_tail_wrapper)
|
|||
* edi: kernel thread arg
|
||||
*/
|
||||
ENTRY(ret_from_fork)
|
||||
call schedule_tail_wrapper
|
||||
FRAME_BEGIN /* help unwinder find end of stack */
|
||||
|
||||
/*
|
||||
* schedule_tail() is asmlinkage so we have to put its 'prev' argument
|
||||
* on the stack.
|
||||
*/
|
||||
pushl %eax
|
||||
call schedule_tail
|
||||
popl %eax
|
||||
|
||||
testl %ebx, %ebx
|
||||
jnz 1f /* kernel threads are uncommon */
|
||||
|
||||
2:
|
||||
/* When we fork, we trace the syscall return in the child, too. */
|
||||
movl %esp, %eax
|
||||
leal FRAME_OFFSET(%esp), %eax
|
||||
call syscall_return_slowpath
|
||||
FRAME_END
|
||||
jmp restore_all
|
||||
|
||||
/* kernel thread */
|
||||
|
|
|
@ -36,6 +36,7 @@
|
|||
#include <asm/smap.h>
|
||||
#include <asm/pgtable_types.h>
|
||||
#include <asm/export.h>
|
||||
#include <asm/frame.h>
|
||||
#include <linux/err.h>
|
||||
|
||||
.code64
|
||||
|
@ -408,17 +409,19 @@ END(__switch_to_asm)
|
|||
* r12: kernel thread arg
|
||||
*/
|
||||
ENTRY(ret_from_fork)
|
||||
FRAME_BEGIN /* help unwinder find end of stack */
|
||||
movq %rax, %rdi
|
||||
call schedule_tail /* rdi: 'prev' task parameter */
|
||||
call schedule_tail /* rdi: 'prev' task parameter */
|
||||
|
||||
testq %rbx, %rbx /* from kernel_thread? */
|
||||
jnz 1f /* kernel threads are uncommon */
|
||||
testq %rbx, %rbx /* from kernel_thread? */
|
||||
jnz 1f /* kernel threads are uncommon */
|
||||
|
||||
2:
|
||||
movq %rsp, %rdi
|
||||
leaq FRAME_OFFSET(%rsp),%rdi /* pt_regs pointer */
|
||||
call syscall_return_slowpath /* returns with IRQs disabled */
|
||||
TRACE_IRQS_ON /* user mode is traced as IRQS on */
|
||||
SWAPGS
|
||||
FRAME_END
|
||||
jmp restore_regs_and_iret
|
||||
|
||||
1:
|
||||
|
|
|
@ -505,6 +505,10 @@ int x86_pmu_hw_config(struct perf_event *event)
|
|||
|
||||
if (event->attr.precise_ip > precise)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
/* There's no sense in having PEBS for non sampling events: */
|
||||
if (!is_sampling_event(event))
|
||||
return -EINVAL;
|
||||
}
|
||||
/*
|
||||
* check that PEBS LBR correction does not conflict with
|
||||
|
|
|
@ -3987,7 +3987,7 @@ __init int intel_pmu_init(void)
|
|||
x86_pmu.num_counters, INTEL_PMC_MAX_GENERIC);
|
||||
x86_pmu.num_counters = INTEL_PMC_MAX_GENERIC;
|
||||
}
|
||||
x86_pmu.intel_ctrl = (1 << x86_pmu.num_counters) - 1;
|
||||
x86_pmu.intel_ctrl = (1ULL << x86_pmu.num_counters) - 1;
|
||||
|
||||
if (x86_pmu.num_counters_fixed > INTEL_PMC_MAX_FIXED) {
|
||||
WARN(1, KERN_ERR "hw perf events fixed %d > max(%d), clipping!",
|
||||
|
|
|
@ -434,6 +434,7 @@ static struct pmu cstate_core_pmu = {
|
|||
.stop = cstate_pmu_event_stop,
|
||||
.read = cstate_pmu_event_update,
|
||||
.capabilities = PERF_PMU_CAP_NO_INTERRUPT,
|
||||
.module = THIS_MODULE,
|
||||
};
|
||||
|
||||
static struct pmu cstate_pkg_pmu = {
|
||||
|
@ -447,6 +448,7 @@ static struct pmu cstate_pkg_pmu = {
|
|||
.stop = cstate_pmu_event_stop,
|
||||
.read = cstate_pmu_event_update,
|
||||
.capabilities = PERF_PMU_CAP_NO_INTERRUPT,
|
||||
.module = THIS_MODULE,
|
||||
};
|
||||
|
||||
static const struct cstate_model nhm_cstates __initconst = {
|
||||
|
|
|
@ -1389,9 +1389,13 @@ static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs)
|
|||
continue;
|
||||
|
||||
/* log dropped samples number */
|
||||
if (error[bit])
|
||||
if (error[bit]) {
|
||||
perf_log_lost_samples(event, error[bit]);
|
||||
|
||||
if (perf_event_account_interrupt(event))
|
||||
x86_pmu_stop(event, 0);
|
||||
}
|
||||
|
||||
if (counts[bit]) {
|
||||
__intel_pmu_pebs_event(event, iregs, base,
|
||||
top, bit, counts[bit]);
|
||||
|
|
|
@ -697,6 +697,7 @@ static int __init init_rapl_pmus(void)
|
|||
rapl_pmus->pmu.start = rapl_pmu_event_start;
|
||||
rapl_pmus->pmu.stop = rapl_pmu_event_stop;
|
||||
rapl_pmus->pmu.read = rapl_pmu_event_read;
|
||||
rapl_pmus->pmu.module = THIS_MODULE;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -733,6 +733,7 @@ static int uncore_pmu_register(struct intel_uncore_pmu *pmu)
|
|||
.start = uncore_pmu_event_start,
|
||||
.stop = uncore_pmu_event_stop,
|
||||
.read = uncore_pmu_event_read,
|
||||
.module = THIS_MODULE,
|
||||
};
|
||||
} else {
|
||||
pmu->pmu = *pmu->type->pmu;
|
||||
|
|
|
@ -2686,7 +2686,7 @@ static struct intel_uncore_type *hswep_msr_uncores[] = {
|
|||
|
||||
void hswep_uncore_cpu_init(void)
|
||||
{
|
||||
int pkg = topology_phys_to_logical_pkg(0);
|
||||
int pkg = boot_cpu_data.logical_proc_id;
|
||||
|
||||
if (hswep_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
|
||||
hswep_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
|
||||
|
|
|
@ -57,7 +57,7 @@
|
|||
#define INTEL_FAM6_ATOM_SILVERMONT2 0x4D /* Avaton/Rangely */
|
||||
#define INTEL_FAM6_ATOM_AIRMONT 0x4C /* CherryTrail / Braswell */
|
||||
#define INTEL_FAM6_ATOM_MERRIFIELD 0x4A /* Tangier */
|
||||
#define INTEL_FAM6_ATOM_MOOREFIELD 0x5A /* Annidale */
|
||||
#define INTEL_FAM6_ATOM_MOOREFIELD 0x5A /* Anniedale */
|
||||
#define INTEL_FAM6_ATOM_GOLDMONT 0x5C
|
||||
#define INTEL_FAM6_ATOM_DENVERTON 0x5F /* Goldmont Microserver */
|
||||
|
||||
|
|
|
@ -52,6 +52,21 @@ struct extended_sigtable {
|
|||
|
||||
#define exttable_size(et) ((et)->count * EXT_SIGNATURE_SIZE + EXT_HEADER_SIZE)
|
||||
|
||||
static inline u32 intel_get_microcode_revision(void)
|
||||
{
|
||||
u32 rev, dummy;
|
||||
|
||||
native_wrmsrl(MSR_IA32_UCODE_REV, 0);
|
||||
|
||||
/* As documented in the SDM: Do a CPUID 1 here */
|
||||
native_cpuid_eax(1);
|
||||
|
||||
/* get the current revision from MSR 0x8B */
|
||||
native_rdmsr(MSR_IA32_UCODE_REV, dummy, rev);
|
||||
|
||||
return rev;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_MICROCODE_INTEL
|
||||
extern void __init load_ucode_intel_bsp(void);
|
||||
extern void load_ucode_intel_ap(void);
|
||||
|
|
|
@ -219,6 +219,24 @@ static inline void native_cpuid(unsigned int *eax, unsigned int *ebx,
|
|||
: "memory");
|
||||
}
|
||||
|
||||
#define native_cpuid_reg(reg) \
|
||||
static inline unsigned int native_cpuid_##reg(unsigned int op) \
|
||||
{ \
|
||||
unsigned int eax = op, ebx, ecx = 0, edx; \
|
||||
\
|
||||
native_cpuid(&eax, &ebx, &ecx, &edx); \
|
||||
\
|
||||
return reg; \
|
||||
}
|
||||
|
||||
/*
|
||||
* Native CPUID functions returning a single datum.
|
||||
*/
|
||||
native_cpuid_reg(eax)
|
||||
native_cpuid_reg(ebx)
|
||||
native_cpuid_reg(ecx)
|
||||
native_cpuid_reg(edx)
|
||||
|
||||
static inline void load_cr3(pgd_t *pgdir)
|
||||
{
|
||||
write_cr3(__pa(pgdir));
|
||||
|
|
|
@ -58,7 +58,7 @@ get_frame_pointer(struct task_struct *task, struct pt_regs *regs)
|
|||
if (task == current)
|
||||
return __builtin_frame_address(0);
|
||||
|
||||
return (unsigned long *)((struct inactive_task_frame *)task->thread.sp)->bp;
|
||||
return &((struct inactive_task_frame *)task->thread.sp)->bp;
|
||||
}
|
||||
#else
|
||||
static inline unsigned long *
|
||||
|
|
|
@ -36,7 +36,10 @@ static inline void prepare_switch_to(struct task_struct *prev,
|
|||
|
||||
asmlinkage void ret_from_fork(void);
|
||||
|
||||
/* data that is pointed to by thread.sp */
|
||||
/*
|
||||
* This is the structure pointed to by thread.sp for an inactive task. The
|
||||
* order of the fields must match the code in __switch_to_asm().
|
||||
*/
|
||||
struct inactive_task_frame {
|
||||
#ifdef CONFIG_X86_64
|
||||
unsigned long r15;
|
||||
|
@ -48,6 +51,11 @@ struct inactive_task_frame {
|
|||
unsigned long di;
|
||||
#endif
|
||||
unsigned long bx;
|
||||
|
||||
/*
|
||||
* These two fields must be together. They form a stack frame header,
|
||||
* needed by get_frame_pointer().
|
||||
*/
|
||||
unsigned long bp;
|
||||
unsigned long ret_addr;
|
||||
};
|
||||
|
|
|
@ -309,15 +309,8 @@ static void amd_get_topology(struct cpuinfo_x86 *c)
|
|||
|
||||
/* get information required for multi-node processors */
|
||||
if (boot_cpu_has(X86_FEATURE_TOPOEXT)) {
|
||||
u32 eax, ebx, ecx, edx;
|
||||
|
||||
cpuid(0x8000001e, &eax, &ebx, &ecx, &edx);
|
||||
node_id = ecx & 7;
|
||||
|
||||
/* get compute unit information */
|
||||
smp_num_siblings = ((ebx >> 8) & 3) + 1;
|
||||
c->x86_max_cores /= smp_num_siblings;
|
||||
c->cpu_core_id = ebx & 0xff;
|
||||
node_id = cpuid_ecx(0x8000001e) & 7;
|
||||
|
||||
/*
|
||||
* We may have multiple LLCs if L3 caches exist, so check if we
|
||||
|
|
|
@ -1221,7 +1221,7 @@ static __init int setup_disablecpuid(char *arg)
|
|||
{
|
||||
int bit;
|
||||
|
||||
if (get_option(&arg, &bit) && bit < NCAPINTS*32)
|
||||
if (get_option(&arg, &bit) && bit >= 0 && bit < NCAPINTS * 32)
|
||||
setup_clear_cpu_cap(bit);
|
||||
else
|
||||
return 0;
|
||||
|
|
|
@ -14,6 +14,7 @@
|
|||
#include <asm/bugs.h>
|
||||
#include <asm/cpu.h>
|
||||
#include <asm/intel-family.h>
|
||||
#include <asm/microcode_intel.h>
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
#include <linux/topology.h>
|
||||
|
@ -78,14 +79,8 @@ static void early_init_intel(struct cpuinfo_x86 *c)
|
|||
(c->x86 == 0x6 && c->x86_model >= 0x0e))
|
||||
set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
|
||||
|
||||
if (c->x86 >= 6 && !cpu_has(c, X86_FEATURE_IA64)) {
|
||||
unsigned lower_word;
|
||||
|
||||
wrmsr(MSR_IA32_UCODE_REV, 0, 0);
|
||||
/* Required by the SDM */
|
||||
sync_core();
|
||||
rdmsr(MSR_IA32_UCODE_REV, lower_word, c->microcode);
|
||||
}
|
||||
if (c->x86 >= 6 && !cpu_has(c, X86_FEATURE_IA64))
|
||||
c->microcode = intel_get_microcode_revision();
|
||||
|
||||
/*
|
||||
* Atom erratum AAE44/AAF40/AAG38/AAH41:
|
||||
|
|
|
@ -150,7 +150,7 @@ static struct ucode_patch *__alloc_microcode_buf(void *data, unsigned int size)
|
|||
{
|
||||
struct ucode_patch *p;
|
||||
|
||||
p = kzalloc(size, GFP_KERNEL);
|
||||
p = kzalloc(sizeof(struct ucode_patch), GFP_KERNEL);
|
||||
if (!p)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
|
@ -368,26 +368,6 @@ scan_microcode(void *data, size_t size, struct ucode_cpu_info *uci, bool save)
|
|||
return patch;
|
||||
}
|
||||
|
||||
static void cpuid_1(void)
|
||||
{
|
||||
/*
|
||||
* According to the Intel SDM, Volume 3, 9.11.7:
|
||||
*
|
||||
* CPUID returns a value in a model specific register in
|
||||
* addition to its usual register return values. The
|
||||
* semantics of CPUID cause it to deposit an update ID value
|
||||
* in the 64-bit model-specific register at address 08BH
|
||||
* (IA32_BIOS_SIGN_ID). If no update is present in the
|
||||
* processor, the value in the MSR remains unmodified.
|
||||
*
|
||||
* Use native_cpuid -- this code runs very early and we don't
|
||||
* want to mess with paravirt.
|
||||
*/
|
||||
unsigned int eax = 1, ebx, ecx = 0, edx;
|
||||
|
||||
native_cpuid(&eax, &ebx, &ecx, &edx);
|
||||
}
|
||||
|
||||
static int collect_cpu_info_early(struct ucode_cpu_info *uci)
|
||||
{
|
||||
unsigned int val[2];
|
||||
|
@ -410,15 +390,8 @@ static int collect_cpu_info_early(struct ucode_cpu_info *uci)
|
|||
native_rdmsr(MSR_IA32_PLATFORM_ID, val[0], val[1]);
|
||||
csig.pf = 1 << ((val[1] >> 18) & 7);
|
||||
}
|
||||
native_wrmsrl(MSR_IA32_UCODE_REV, 0);
|
||||
|
||||
/* As documented in the SDM: Do a CPUID 1 here */
|
||||
cpuid_1();
|
||||
|
||||
/* get the current revision from MSR 0x8B */
|
||||
native_rdmsr(MSR_IA32_UCODE_REV, val[0], val[1]);
|
||||
|
||||
csig.rev = val[1];
|
||||
csig.rev = intel_get_microcode_revision();
|
||||
|
||||
uci->cpu_sig = csig;
|
||||
uci->valid = 1;
|
||||
|
@ -602,7 +575,7 @@ static inline void print_ucode(struct ucode_cpu_info *uci)
|
|||
static int apply_microcode_early(struct ucode_cpu_info *uci, bool early)
|
||||
{
|
||||
struct microcode_intel *mc;
|
||||
unsigned int val[2];
|
||||
u32 rev;
|
||||
|
||||
mc = uci->mc;
|
||||
if (!mc)
|
||||
|
@ -610,21 +583,16 @@ static int apply_microcode_early(struct ucode_cpu_info *uci, bool early)
|
|||
|
||||
/* write microcode via MSR 0x79 */
|
||||
native_wrmsrl(MSR_IA32_UCODE_WRITE, (unsigned long)mc->bits);
|
||||
native_wrmsrl(MSR_IA32_UCODE_REV, 0);
|
||||
|
||||
/* As documented in the SDM: Do a CPUID 1 here */
|
||||
cpuid_1();
|
||||
|
||||
/* get the current revision from MSR 0x8B */
|
||||
native_rdmsr(MSR_IA32_UCODE_REV, val[0], val[1]);
|
||||
if (val[1] != mc->hdr.rev)
|
||||
rev = intel_get_microcode_revision();
|
||||
if (rev != mc->hdr.rev)
|
||||
return -1;
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
/* Flush global tlb. This is precaution. */
|
||||
flush_tlb_early();
|
||||
#endif
|
||||
uci->cpu_sig.rev = val[1];
|
||||
uci->cpu_sig.rev = rev;
|
||||
|
||||
if (early)
|
||||
print_ucode(uci);
|
||||
|
@ -804,8 +772,8 @@ static int apply_microcode_intel(int cpu)
|
|||
struct microcode_intel *mc;
|
||||
struct ucode_cpu_info *uci;
|
||||
struct cpuinfo_x86 *c;
|
||||
unsigned int val[2];
|
||||
static int prev_rev;
|
||||
u32 rev;
|
||||
|
||||
/* We should bind the task to the CPU */
|
||||
if (WARN_ON(raw_smp_processor_id() != cpu))
|
||||
|
@ -822,33 +790,28 @@ static int apply_microcode_intel(int cpu)
|
|||
|
||||
/* write microcode via MSR 0x79 */
|
||||
wrmsrl(MSR_IA32_UCODE_WRITE, (unsigned long)mc->bits);
|
||||
wrmsrl(MSR_IA32_UCODE_REV, 0);
|
||||
|
||||
/* As documented in the SDM: Do a CPUID 1 here */
|
||||
cpuid_1();
|
||||
rev = intel_get_microcode_revision();
|
||||
|
||||
/* get the current revision from MSR 0x8B */
|
||||
rdmsr(MSR_IA32_UCODE_REV, val[0], val[1]);
|
||||
|
||||
if (val[1] != mc->hdr.rev) {
|
||||
if (rev != mc->hdr.rev) {
|
||||
pr_err("CPU%d update to revision 0x%x failed\n",
|
||||
cpu, mc->hdr.rev);
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (val[1] != prev_rev) {
|
||||
if (rev != prev_rev) {
|
||||
pr_info("updated to revision 0x%x, date = %04x-%02x-%02x\n",
|
||||
val[1],
|
||||
rev,
|
||||
mc->hdr.date & 0xffff,
|
||||
mc->hdr.date >> 24,
|
||||
(mc->hdr.date >> 16) & 0xff);
|
||||
prev_rev = val[1];
|
||||
prev_rev = rev;
|
||||
}
|
||||
|
||||
c = &cpu_data(cpu);
|
||||
|
||||
uci->cpu_sig.rev = val[1];
|
||||
c->microcode = val[1];
|
||||
uci->cpu_sig.rev = rev;
|
||||
c->microcode = rev;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -860,7 +823,7 @@ static enum ucode_state generic_load_microcode(int cpu, void *data, size_t size,
|
|||
u8 *ucode_ptr = data, *new_mc = NULL, *mc = NULL;
|
||||
int new_rev = uci->cpu_sig.rev;
|
||||
unsigned int leftover = size;
|
||||
unsigned int curr_mc_size = 0;
|
||||
unsigned int curr_mc_size = 0, new_mc_size = 0;
|
||||
unsigned int csig, cpf;
|
||||
|
||||
while (leftover) {
|
||||
|
@ -901,6 +864,7 @@ static enum ucode_state generic_load_microcode(int cpu, void *data, size_t size,
|
|||
vfree(new_mc);
|
||||
new_rev = mc_header.rev;
|
||||
new_mc = mc;
|
||||
new_mc_size = mc_size;
|
||||
mc = NULL; /* trigger new vmalloc */
|
||||
}
|
||||
|
||||
|
@ -926,7 +890,7 @@ static enum ucode_state generic_load_microcode(int cpu, void *data, size_t size,
|
|||
* permanent memory. So it will be loaded early when a CPU is hot added
|
||||
* or resumes.
|
||||
*/
|
||||
save_mc_for_early(new_mc, curr_mc_size);
|
||||
save_mc_for_early(new_mc, new_mc_size);
|
||||
|
||||
pr_debug("CPU%d found a matching microcode update with version 0x%x (current=0x%x)\n",
|
||||
cpu, new_rev, uci->cpu_sig.rev);
|
||||
|
|
|
@ -694,6 +694,7 @@ unsigned long native_calibrate_tsc(void)
|
|||
crystal_khz = 24000; /* 24.0 MHz */
|
||||
break;
|
||||
case INTEL_FAM6_SKYLAKE_X:
|
||||
case INTEL_FAM6_ATOM_DENVERTON:
|
||||
crystal_khz = 25000; /* 25.0 MHz */
|
||||
break;
|
||||
case INTEL_FAM6_ATOM_GOLDMONT:
|
||||
|
|
|
@ -6,6 +6,21 @@
|
|||
|
||||
#define FRAME_HEADER_SIZE (sizeof(long) * 2)
|
||||
|
||||
/*
|
||||
* This disables KASAN checking when reading a value from another task's stack,
|
||||
* since the other task could be running on another CPU and could have poisoned
|
||||
* the stack in the meantime.
|
||||
*/
|
||||
#define READ_ONCE_TASK_STACK(task, x) \
|
||||
({ \
|
||||
unsigned long val; \
|
||||
if (task == current) \
|
||||
val = READ_ONCE(x); \
|
||||
else \
|
||||
val = READ_ONCE_NOCHECK(x); \
|
||||
val; \
|
||||
})
|
||||
|
||||
static void unwind_dump(struct unwind_state *state, unsigned long *sp)
|
||||
{
|
||||
static bool dumped_before = false;
|
||||
|
@ -48,7 +63,8 @@ unsigned long unwind_get_return_address(struct unwind_state *state)
|
|||
if (state->regs && user_mode(state->regs))
|
||||
return 0;
|
||||
|
||||
addr = ftrace_graph_ret_addr(state->task, &state->graph_idx, *addr_p,
|
||||
addr = READ_ONCE_TASK_STACK(state->task, *addr_p);
|
||||
addr = ftrace_graph_ret_addr(state->task, &state->graph_idx, addr,
|
||||
addr_p);
|
||||
|
||||
return __kernel_text_address(addr) ? addr : 0;
|
||||
|
@ -162,7 +178,7 @@ bool unwind_next_frame(struct unwind_state *state)
|
|||
if (state->regs)
|
||||
next_bp = (unsigned long *)state->regs->bp;
|
||||
else
|
||||
next_bp = (unsigned long *)*state->bp;
|
||||
next_bp = (unsigned long *)READ_ONCE_TASK_STACK(state->task,*state->bp);
|
||||
|
||||
/* is the next frame pointer an encoded pointer to pt_regs? */
|
||||
regs = decode_frame_pointer(next_bp);
|
||||
|
@ -207,6 +223,16 @@ bool unwind_next_frame(struct unwind_state *state)
|
|||
return true;
|
||||
|
||||
bad_address:
|
||||
/*
|
||||
* When unwinding a non-current task, the task might actually be
|
||||
* running on another CPU, in which case it could be modifying its
|
||||
* stack while we're reading it. This is generally not a problem and
|
||||
* can be ignored as long as the caller understands that unwinding
|
||||
* another task will not always succeed.
|
||||
*/
|
||||
if (state->task != current)
|
||||
goto the_end;
|
||||
|
||||
if (state->regs) {
|
||||
printk_deferred_once(KERN_WARNING
|
||||
"WARNING: kernel stack regs at %p in %s:%d has bad 'bp' value %p\n",
|
||||
|
|
|
@ -818,6 +818,20 @@ static int segmented_read_std(struct x86_emulate_ctxt *ctxt,
|
|||
return ctxt->ops->read_std(ctxt, linear, data, size, &ctxt->exception);
|
||||
}
|
||||
|
||||
static int segmented_write_std(struct x86_emulate_ctxt *ctxt,
|
||||
struct segmented_address addr,
|
||||
void *data,
|
||||
unsigned int size)
|
||||
{
|
||||
int rc;
|
||||
ulong linear;
|
||||
|
||||
rc = linearize(ctxt, addr, size, true, &linear);
|
||||
if (rc != X86EMUL_CONTINUE)
|
||||
return rc;
|
||||
return ctxt->ops->write_std(ctxt, linear, data, size, &ctxt->exception);
|
||||
}
|
||||
|
||||
/*
|
||||
* Prefetch the remaining bytes of the instruction without crossing page
|
||||
* boundary if they are not in fetch_cache yet.
|
||||
|
@ -1571,7 +1585,6 @@ static int write_segment_descriptor(struct x86_emulate_ctxt *ctxt,
|
|||
&ctxt->exception);
|
||||
}
|
||||
|
||||
/* Does not support long mode */
|
||||
static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
|
||||
u16 selector, int seg, u8 cpl,
|
||||
enum x86_transfer_type transfer,
|
||||
|
@ -1608,20 +1621,34 @@ static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
|
|||
|
||||
rpl = selector & 3;
|
||||
|
||||
/* NULL selector is not valid for TR, CS and SS (except for long mode) */
|
||||
if ((seg == VCPU_SREG_CS
|
||||
|| (seg == VCPU_SREG_SS
|
||||
&& (ctxt->mode != X86EMUL_MODE_PROT64 || rpl != cpl))
|
||||
|| seg == VCPU_SREG_TR)
|
||||
&& null_selector)
|
||||
goto exception;
|
||||
|
||||
/* TR should be in GDT only */
|
||||
if (seg == VCPU_SREG_TR && (selector & (1 << 2)))
|
||||
goto exception;
|
||||
|
||||
if (null_selector) /* for NULL selector skip all following checks */
|
||||
/* NULL selector is not valid for TR, CS and (except for long mode) SS */
|
||||
if (null_selector) {
|
||||
if (seg == VCPU_SREG_CS || seg == VCPU_SREG_TR)
|
||||
goto exception;
|
||||
|
||||
if (seg == VCPU_SREG_SS) {
|
||||
if (ctxt->mode != X86EMUL_MODE_PROT64 || rpl != cpl)
|
||||
goto exception;
|
||||
|
||||
/*
|
||||
* ctxt->ops->set_segment expects the CPL to be in
|
||||
* SS.DPL, so fake an expand-up 32-bit data segment.
|
||||
*/
|
||||
seg_desc.type = 3;
|
||||
seg_desc.p = 1;
|
||||
seg_desc.s = 1;
|
||||
seg_desc.dpl = cpl;
|
||||
seg_desc.d = 1;
|
||||
seg_desc.g = 1;
|
||||
}
|
||||
|
||||
/* Skip all following checks */
|
||||
goto load;
|
||||
}
|
||||
|
||||
ret = read_segment_descriptor(ctxt, selector, &seg_desc, &desc_addr);
|
||||
if (ret != X86EMUL_CONTINUE)
|
||||
|
@ -1737,6 +1764,21 @@ static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
|
|||
u16 selector, int seg)
|
||||
{
|
||||
u8 cpl = ctxt->ops->cpl(ctxt);
|
||||
|
||||
/*
|
||||
* None of MOV, POP and LSS can load a NULL selector in CPL=3, but
|
||||
* they can load it at CPL<3 (Intel's manual says only LSS can,
|
||||
* but it's wrong).
|
||||
*
|
||||
* However, the Intel manual says that putting IST=1/DPL=3 in
|
||||
* an interrupt gate will result in SS=3 (the AMD manual instead
|
||||
* says it doesn't), so allow SS=3 in __load_segment_descriptor
|
||||
* and only forbid it here.
|
||||
*/
|
||||
if (seg == VCPU_SREG_SS && selector == 3 &&
|
||||
ctxt->mode == X86EMUL_MODE_PROT64)
|
||||
return emulate_exception(ctxt, GP_VECTOR, 0, true);
|
||||
|
||||
return __load_segment_descriptor(ctxt, selector, seg, cpl,
|
||||
X86_TRANSFER_NONE, NULL);
|
||||
}
|
||||
|
@ -3685,8 +3727,8 @@ static int emulate_store_desc_ptr(struct x86_emulate_ctxt *ctxt,
|
|||
}
|
||||
/* Disable writeback. */
|
||||
ctxt->dst.type = OP_NONE;
|
||||
return segmented_write(ctxt, ctxt->dst.addr.mem,
|
||||
&desc_ptr, 2 + ctxt->op_bytes);
|
||||
return segmented_write_std(ctxt, ctxt->dst.addr.mem,
|
||||
&desc_ptr, 2 + ctxt->op_bytes);
|
||||
}
|
||||
|
||||
static int em_sgdt(struct x86_emulate_ctxt *ctxt)
|
||||
|
@ -3932,7 +3974,7 @@ static int em_fxsave(struct x86_emulate_ctxt *ctxt)
|
|||
else
|
||||
size = offsetof(struct fxregs_state, xmm_space[0]);
|
||||
|
||||
return segmented_write(ctxt, ctxt->memop.addr.mem, &fx_state, size);
|
||||
return segmented_write_std(ctxt, ctxt->memop.addr.mem, &fx_state, size);
|
||||
}
|
||||
|
||||
static int fxrstor_fixup(struct x86_emulate_ctxt *ctxt,
|
||||
|
@ -3974,7 +4016,7 @@ static int em_fxrstor(struct x86_emulate_ctxt *ctxt)
|
|||
if (rc != X86EMUL_CONTINUE)
|
||||
return rc;
|
||||
|
||||
rc = segmented_read(ctxt, ctxt->memop.addr.mem, &fx_state, 512);
|
||||
rc = segmented_read_std(ctxt, ctxt->memop.addr.mem, &fx_state, 512);
|
||||
if (rc != X86EMUL_CONTINUE)
|
||||
return rc;
|
||||
|
||||
|
|
|
@ -2426,3 +2426,9 @@ void kvm_lapic_init(void)
|
|||
jump_label_rate_limit(&apic_hw_disabled, HZ);
|
||||
jump_label_rate_limit(&apic_sw_disabled, HZ);
|
||||
}
|
||||
|
||||
void kvm_lapic_exit(void)
|
||||
{
|
||||
static_key_deferred_flush(&apic_hw_disabled);
|
||||
static_key_deferred_flush(&apic_sw_disabled);
|
||||
}
|
||||
|
|
|
@ -110,6 +110,7 @@ static inline bool kvm_hv_vapic_assist_page_enabled(struct kvm_vcpu *vcpu)
|
|||
|
||||
int kvm_lapic_enable_pv_eoi(struct kvm_vcpu *vcpu, u64 data);
|
||||
void kvm_lapic_init(void);
|
||||
void kvm_lapic_exit(void);
|
||||
|
||||
#define VEC_POS(v) ((v) & (32 - 1))
|
||||
#define REG_POS(v) (((v) >> 5) << 4)
|
||||
|
|
|
@ -3342,6 +3342,8 @@ static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
|
|||
|
||||
switch (cap->cap) {
|
||||
case KVM_CAP_HYPERV_SYNIC:
|
||||
if (!irqchip_in_kernel(vcpu->kvm))
|
||||
return -EINVAL;
|
||||
return kvm_hv_activate_synic(vcpu);
|
||||
default:
|
||||
return -EINVAL;
|
||||
|
@ -6045,6 +6047,7 @@ int kvm_arch_init(void *opaque)
|
|||
|
||||
void kvm_arch_exit(void)
|
||||
{
|
||||
kvm_lapic_exit();
|
||||
perf_unregister_guest_info_callbacks(&kvm_guest_cbs);
|
||||
|
||||
if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC))
|
||||
|
|
|
@ -293,7 +293,7 @@ siginfo_t *mpx_generate_siginfo(struct pt_regs *regs)
|
|||
* We were not able to extract an address from the instruction,
|
||||
* probably because there was something invalid in it.
|
||||
*/
|
||||
if (info->si_addr == (void *)-1) {
|
||||
if (info->si_addr == (void __user *)-1) {
|
||||
err = -EINVAL;
|
||||
goto err_out;
|
||||
}
|
||||
|
|
|
@ -210,6 +210,70 @@ int __init efi_memblock_x86_reserve_range(void)
|
|||
return 0;
|
||||
}
|
||||
|
||||
#define OVERFLOW_ADDR_SHIFT (64 - EFI_PAGE_SHIFT)
|
||||
#define OVERFLOW_ADDR_MASK (U64_MAX << OVERFLOW_ADDR_SHIFT)
|
||||
#define U64_HIGH_BIT (~(U64_MAX >> 1))
|
||||
|
||||
static bool __init efi_memmap_entry_valid(const efi_memory_desc_t *md, int i)
|
||||
{
|
||||
u64 end = (md->num_pages << EFI_PAGE_SHIFT) + md->phys_addr - 1;
|
||||
u64 end_hi = 0;
|
||||
char buf[64];
|
||||
|
||||
if (md->num_pages == 0) {
|
||||
end = 0;
|
||||
} else if (md->num_pages > EFI_PAGES_MAX ||
|
||||
EFI_PAGES_MAX - md->num_pages <
|
||||
(md->phys_addr >> EFI_PAGE_SHIFT)) {
|
||||
end_hi = (md->num_pages & OVERFLOW_ADDR_MASK)
|
||||
>> OVERFLOW_ADDR_SHIFT;
|
||||
|
||||
if ((md->phys_addr & U64_HIGH_BIT) && !(end & U64_HIGH_BIT))
|
||||
end_hi += 1;
|
||||
} else {
|
||||
return true;
|
||||
}
|
||||
|
||||
pr_warn_once(FW_BUG "Invalid EFI memory map entries:\n");
|
||||
|
||||
if (end_hi) {
|
||||
pr_warn("mem%02u: %s range=[0x%016llx-0x%llx%016llx] (invalid)\n",
|
||||
i, efi_md_typeattr_format(buf, sizeof(buf), md),
|
||||
md->phys_addr, end_hi, end);
|
||||
} else {
|
||||
pr_warn("mem%02u: %s range=[0x%016llx-0x%016llx] (invalid)\n",
|
||||
i, efi_md_typeattr_format(buf, sizeof(buf), md),
|
||||
md->phys_addr, end);
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
static void __init efi_clean_memmap(void)
|
||||
{
|
||||
efi_memory_desc_t *out = efi.memmap.map;
|
||||
const efi_memory_desc_t *in = out;
|
||||
const efi_memory_desc_t *end = efi.memmap.map_end;
|
||||
int i, n_removal;
|
||||
|
||||
for (i = n_removal = 0; in < end; i++) {
|
||||
if (efi_memmap_entry_valid(in, i)) {
|
||||
if (out != in)
|
||||
memcpy(out, in, efi.memmap.desc_size);
|
||||
out = (void *)out + efi.memmap.desc_size;
|
||||
} else {
|
||||
n_removal++;
|
||||
}
|
||||
in = (void *)in + efi.memmap.desc_size;
|
||||
}
|
||||
|
||||
if (n_removal > 0) {
|
||||
u64 size = efi.memmap.nr_map - n_removal;
|
||||
|
||||
pr_warn("Removing %d invalid memory map entries.\n", n_removal);
|
||||
efi_memmap_install(efi.memmap.phys_map, size);
|
||||
}
|
||||
}
|
||||
|
||||
void __init efi_print_memmap(void)
|
||||
{
|
||||
efi_memory_desc_t *md;
|
||||
|
@ -472,6 +536,8 @@ void __init efi_init(void)
|
|||
}
|
||||
}
|
||||
|
||||
efi_clean_memmap();
|
||||
|
||||
if (efi_enabled(EFI_DBG))
|
||||
efi_print_memmap();
|
||||
}
|
||||
|
|
|
@ -214,7 +214,7 @@ void __init efi_arch_mem_reserve(phys_addr_t addr, u64 size)
|
|||
|
||||
new_size = efi.memmap.desc_size * num_entries;
|
||||
|
||||
new_phys = memblock_alloc(new_size, 0);
|
||||
new_phys = efi_memmap_alloc(num_entries);
|
||||
if (!new_phys) {
|
||||
pr_err("Could not allocate boot services memmap\n");
|
||||
return;
|
||||
|
@ -355,7 +355,7 @@ void __init efi_free_boot_services(void)
|
|||
}
|
||||
|
||||
new_size = efi.memmap.desc_size * num_entries;
|
||||
new_phys = memblock_alloc(new_size, 0);
|
||||
new_phys = efi_memmap_alloc(num_entries);
|
||||
if (!new_phys) {
|
||||
pr_err("Failed to allocate new EFI memmap\n");
|
||||
return;
|
||||
|
|
|
@ -15,7 +15,7 @@ obj-$(subst m,y,$(CONFIG_INTEL_MID_POWER_BUTTON)) += platform_msic_power_btn.o
|
|||
obj-$(subst m,y,$(CONFIG_GPIO_INTEL_PMIC)) += platform_pmic_gpio.o
|
||||
obj-$(subst m,y,$(CONFIG_INTEL_MFLD_THERMAL)) += platform_msic_thermal.o
|
||||
# SPI Devices
|
||||
obj-$(subst m,y,$(CONFIG_SPI_SPIDEV)) += platform_spidev.o
|
||||
obj-$(subst m,y,$(CONFIG_SPI_SPIDEV)) += platform_mrfld_spidev.o
|
||||
# I2C Devices
|
||||
obj-$(subst m,y,$(CONFIG_SENSORS_EMC1403)) += platform_emc1403.o
|
||||
obj-$(subst m,y,$(CONFIG_SENSORS_LIS3LV02D)) += platform_lis331.o
|
||||
|
|
|
@ -11,6 +11,7 @@
|
|||
* of the License.
|
||||
*/
|
||||
|
||||
#include <linux/err.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/sfi.h>
|
||||
#include <linux/spi/pxa2xx_spi.h>
|
||||
|
@ -34,6 +35,9 @@ static void __init *spidev_platform_data(void *info)
|
|||
{
|
||||
struct spi_board_info *spi_info = info;
|
||||
|
||||
if (intel_mid_identify_cpu() != INTEL_MID_CPU_CHIP_TANGIER)
|
||||
return ERR_PTR(-ENODEV);
|
||||
|
||||
spi_info->mode = SPI_MODE_0;
|
||||
spi_info->controller_data = &spidev_spi_chip;
|
||||
|
|
@ -301,13 +301,6 @@ int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
|
|||
if ((sector | nr_sects) & bs_mask)
|
||||
return -EINVAL;
|
||||
|
||||
if (discard) {
|
||||
ret = __blkdev_issue_discard(bdev, sector, nr_sects, gfp_mask,
|
||||
BLKDEV_DISCARD_ZERO, biop);
|
||||
if (ret == 0 || (ret && ret != -EOPNOTSUPP))
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = __blkdev_issue_write_zeroes(bdev, sector, nr_sects, gfp_mask,
|
||||
biop);
|
||||
if (ret == 0 || (ret && ret != -EOPNOTSUPP))
|
||||
|
@ -370,6 +363,12 @@ int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
|
|||
struct bio *bio = NULL;
|
||||
struct blk_plug plug;
|
||||
|
||||
if (discard) {
|
||||
if (!blkdev_issue_discard(bdev, sector, nr_sects, gfp_mask,
|
||||
BLKDEV_DISCARD_ZERO))
|
||||
return 0;
|
||||
}
|
||||
|
||||
blk_start_plug(&plug);
|
||||
ret = __blkdev_issue_zeroout(bdev, sector, nr_sects, gfp_mask,
|
||||
&bio, discard);
|
||||
|
|
|
@ -16,7 +16,7 @@
|
|||
static inline sector_t blk_zone_start(struct request_queue *q,
|
||||
sector_t sector)
|
||||
{
|
||||
sector_t zone_mask = blk_queue_zone_size(q) - 1;
|
||||
sector_t zone_mask = blk_queue_zone_sectors(q) - 1;
|
||||
|
||||
return sector & ~zone_mask;
|
||||
}
|
||||
|
@ -222,7 +222,7 @@ int blkdev_reset_zones(struct block_device *bdev,
|
|||
return -EINVAL;
|
||||
|
||||
/* Check alignment (handle eventual smaller last zone) */
|
||||
zone_sectors = blk_queue_zone_size(q);
|
||||
zone_sectors = blk_queue_zone_sectors(q);
|
||||
if (sector & (zone_sectors - 1))
|
||||
return -EINVAL;
|
||||
|
||||
|
|
|
@ -434,7 +434,7 @@ static bool part_zone_aligned(struct gendisk *disk,
|
|||
struct block_device *bdev,
|
||||
sector_t from, sector_t size)
|
||||
{
|
||||
unsigned int zone_size = bdev_zone_size(bdev);
|
||||
unsigned int zone_sectors = bdev_zone_sectors(bdev);
|
||||
|
||||
/*
|
||||
* If this function is called, then the disk is a zoned block device
|
||||
|
@ -446,7 +446,7 @@ static bool part_zone_aligned(struct gendisk *disk,
|
|||
* regular block devices (no zone operation) and their zone size will
|
||||
* be reported as 0. Allow this case.
|
||||
*/
|
||||
if (!zone_size)
|
||||
if (!zone_sectors)
|
||||
return true;
|
||||
|
||||
/*
|
||||
|
@ -455,24 +455,24 @@ static bool part_zone_aligned(struct gendisk *disk,
|
|||
* use it. Check the zone size too: it should be a power of 2 number
|
||||
* of sectors.
|
||||
*/
|
||||
if (WARN_ON_ONCE(!is_power_of_2(zone_size))) {
|
||||
if (WARN_ON_ONCE(!is_power_of_2(zone_sectors))) {
|
||||
u32 rem;
|
||||
|
||||
div_u64_rem(from, zone_size, &rem);
|
||||
div_u64_rem(from, zone_sectors, &rem);
|
||||
if (rem)
|
||||
return false;
|
||||
if ((from + size) < get_capacity(disk)) {
|
||||
div_u64_rem(size, zone_size, &rem);
|
||||
div_u64_rem(size, zone_sectors, &rem);
|
||||
if (rem)
|
||||
return false;
|
||||
}
|
||||
|
||||
} else {
|
||||
|
||||
if (from & (zone_size - 1))
|
||||
if (from & (zone_sectors - 1))
|
||||
return false;
|
||||
if ((from + size) < get_capacity(disk) &&
|
||||
(size & (zone_size - 1)))
|
||||
(size & (zone_sectors - 1)))
|
||||
return false;
|
||||
|
||||
}
|
||||
|
|
|
@ -132,9 +132,9 @@ config HT16K33
|
|||
tristate "Holtek Ht16K33 LED controller with keyscan"
|
||||
depends on FB && OF && I2C && INPUT
|
||||
select FB_SYS_FOPS
|
||||
select FB_CFB_FILLRECT
|
||||
select FB_CFB_COPYAREA
|
||||
select FB_CFB_IMAGEBLIT
|
||||
select FB_SYS_FILLRECT
|
||||
select FB_SYS_COPYAREA
|
||||
select FB_SYS_IMAGEBLIT
|
||||
select INPUT_MATRIXKMAP
|
||||
select FB_BACKLIGHT
|
||||
help
|
||||
|
|
|
@ -141,8 +141,6 @@ extern void device_unblock_probing(void);
|
|||
extern struct kset *devices_kset;
|
||||
extern void devices_kset_move_last(struct device *dev);
|
||||
|
||||
extern struct device_attribute dev_attr_deferred_probe;
|
||||
|
||||
#if defined(CONFIG_MODULES) && defined(CONFIG_SYSFS)
|
||||
extern void module_add_driver(struct module *mod, struct device_driver *drv);
|
||||
extern void module_remove_driver(struct device_driver *drv);
|
||||
|
|
|
@ -1060,14 +1060,8 @@ static int device_add_attrs(struct device *dev)
|
|||
goto err_remove_dev_groups;
|
||||
}
|
||||
|
||||
error = device_create_file(dev, &dev_attr_deferred_probe);
|
||||
if (error)
|
||||
goto err_remove_online;
|
||||
|
||||
return 0;
|
||||
|
||||
err_remove_online:
|
||||
device_remove_file(dev, &dev_attr_online);
|
||||
err_remove_dev_groups:
|
||||
device_remove_groups(dev, dev->groups);
|
||||
err_remove_type_groups:
|
||||
|
@ -1085,7 +1079,6 @@ static void device_remove_attrs(struct device *dev)
|
|||
struct class *class = dev->class;
|
||||
const struct device_type *type = dev->type;
|
||||
|
||||
device_remove_file(dev, &dev_attr_deferred_probe);
|
||||
device_remove_file(dev, &dev_attr_online);
|
||||
device_remove_groups(dev, dev->groups);
|
||||
|
||||
|
|
|
@ -53,19 +53,6 @@ static LIST_HEAD(deferred_probe_pending_list);
|
|||
static LIST_HEAD(deferred_probe_active_list);
|
||||
static atomic_t deferred_trigger_count = ATOMIC_INIT(0);
|
||||
|
||||
static ssize_t deferred_probe_show(struct device *dev,
|
||||
struct device_attribute *attr, char *buf)
|
||||
{
|
||||
bool value;
|
||||
|
||||
mutex_lock(&deferred_probe_mutex);
|
||||
value = !list_empty(&dev->p->deferred_probe);
|
||||
mutex_unlock(&deferred_probe_mutex);
|
||||
|
||||
return sprintf(buf, "%d\n", value);
|
||||
}
|
||||
DEVICE_ATTR_RO(deferred_probe);
|
||||
|
||||
/*
|
||||
* In some cases, like suspend to RAM or hibernation, It might be reasonable
|
||||
* to prohibit probing of devices as it could be unsafe.
|
||||
|
|
|
@ -1042,6 +1042,7 @@ static int __init nbd_init(void)
|
|||
return -ENOMEM;
|
||||
|
||||
for (i = 0; i < nbds_max; i++) {
|
||||
struct request_queue *q;
|
||||
struct gendisk *disk = alloc_disk(1 << part_shift);
|
||||
if (!disk)
|
||||
goto out;
|
||||
|
@ -1067,12 +1068,13 @@ static int __init nbd_init(void)
|
|||
* every gendisk to have its very own request_queue struct.
|
||||
* These structs are big so we dynamically allocate them.
|
||||
*/
|
||||
disk->queue = blk_mq_init_queue(&nbd_dev[i].tag_set);
|
||||
if (!disk->queue) {
|
||||
q = blk_mq_init_queue(&nbd_dev[i].tag_set);
|
||||
if (IS_ERR(q)) {
|
||||
blk_mq_free_tag_set(&nbd_dev[i].tag_set);
|
||||
put_disk(disk);
|
||||
goto out;
|
||||
}
|
||||
disk->queue = q;
|
||||
|
||||
/*
|
||||
* Tell the block layer that we are not a rotational device
|
||||
|
|
|
@ -56,6 +56,7 @@ struct virtblk_req {
|
|||
struct virtio_blk_outhdr out_hdr;
|
||||
struct virtio_scsi_inhdr in_hdr;
|
||||
u8 status;
|
||||
u8 sense[SCSI_SENSE_BUFFERSIZE];
|
||||
struct scatterlist sg[];
|
||||
};
|
||||
|
||||
|
@ -102,7 +103,8 @@ static int __virtblk_add_req(struct virtqueue *vq,
|
|||
}
|
||||
|
||||
if (type == cpu_to_virtio32(vq->vdev, VIRTIO_BLK_T_SCSI_CMD)) {
|
||||
sg_init_one(&sense, vbr->req->sense, SCSI_SENSE_BUFFERSIZE);
|
||||
memcpy(vbr->sense, vbr->req->sense, SCSI_SENSE_BUFFERSIZE);
|
||||
sg_init_one(&sense, vbr->sense, SCSI_SENSE_BUFFERSIZE);
|
||||
sgs[num_out + num_in++] = &sense;
|
||||
sg_init_one(&inhdr, &vbr->in_hdr, sizeof(vbr->in_hdr));
|
||||
sgs[num_out + num_in++] = &inhdr;
|
||||
|
@ -628,11 +630,12 @@ static int virtblk_probe(struct virtio_device *vdev)
|
|||
if (err)
|
||||
goto out_put_disk;
|
||||
|
||||
q = vblk->disk->queue = blk_mq_init_queue(&vblk->tag_set);
|
||||
q = blk_mq_init_queue(&vblk->tag_set);
|
||||
if (IS_ERR(q)) {
|
||||
err = -ENOMEM;
|
||||
goto out_free_tags;
|
||||
}
|
||||
vblk->disk->queue = q;
|
||||
|
||||
q->queuedata = vblk;
|
||||
|
||||
|
|
|
@ -381,9 +381,6 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
|
|||
char *kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
|
||||
int err = 0;
|
||||
|
||||
if (!pfn_valid(PFN_DOWN(p)))
|
||||
return -EIO;
|
||||
|
||||
read = 0;
|
||||
if (p < (unsigned long) high_memory) {
|
||||
low_count = count;
|
||||
|
@ -412,6 +409,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
|
|||
* by the kernel or data corruption may occur
|
||||
*/
|
||||
kbuf = xlate_dev_kmem_ptr((void *)p);
|
||||
if (!virt_addr_valid(kbuf))
|
||||
return -ENXIO;
|
||||
|
||||
if (copy_to_user(buf, kbuf, sz))
|
||||
return -EFAULT;
|
||||
|
@ -482,6 +481,8 @@ static ssize_t do_write_kmem(unsigned long p, const char __user *buf,
|
|||
* corruption may occur.
|
||||
*/
|
||||
ptr = xlate_dev_kmem_ptr((void *)p);
|
||||
if (!virt_addr_valid(ptr))
|
||||
return -ENXIO;
|
||||
|
||||
copied = copy_from_user(ptr, buf, sz);
|
||||
if (copied) {
|
||||
|
@ -512,9 +513,6 @@ static ssize_t write_kmem(struct file *file, const char __user *buf,
|
|||
char *kbuf; /* k-addr because vwrite() takes vmlist_lock rwlock */
|
||||
int err = 0;
|
||||
|
||||
if (!pfn_valid(PFN_DOWN(p)))
|
||||
return -EIO;
|
||||
|
||||
if (p < (unsigned long) high_memory) {
|
||||
unsigned long to_write = min_t(unsigned long, count,
|
||||
(unsigned long)high_memory - p);
|
||||
|
|
|
@ -290,6 +290,7 @@ static int register_device(int minor, struct pp_struct *pp)
|
|||
struct pardevice *pdev = NULL;
|
||||
char *name;
|
||||
struct pardev_cb ppdev_cb;
|
||||
int rc = 0;
|
||||
|
||||
name = kasprintf(GFP_KERNEL, CHRDEV "%x", minor);
|
||||
if (name == NULL)
|
||||
|
@ -298,8 +299,8 @@ static int register_device(int minor, struct pp_struct *pp)
|
|||
port = parport_find_number(minor);
|
||||
if (!port) {
|
||||
pr_warn("%s: no associated port!\n", name);
|
||||
kfree(name);
|
||||
return -ENXIO;
|
||||
rc = -ENXIO;
|
||||
goto err;
|
||||
}
|
||||
|
||||
memset(&ppdev_cb, 0, sizeof(ppdev_cb));
|
||||
|
@ -308,16 +309,18 @@ static int register_device(int minor, struct pp_struct *pp)
|
|||
ppdev_cb.private = pp;
|
||||
pdev = parport_register_dev_model(port, name, &ppdev_cb, minor);
|
||||
parport_put_port(port);
|
||||
kfree(name);
|
||||
|
||||
if (!pdev) {
|
||||
pr_warn("%s: failed to register device!\n", name);
|
||||
return -ENXIO;
|
||||
rc = -ENXIO;
|
||||
goto err;
|
||||
}
|
||||
|
||||
pp->pdev = pdev;
|
||||
dev_dbg(&pdev->dev, "registered pardevice\n");
|
||||
return 0;
|
||||
err:
|
||||
kfree(name);
|
||||
return rc;
|
||||
}
|
||||
|
||||
static enum ieee1284_phase init_phase(int mode)
|
||||
|
|
|
@ -24,5 +24,5 @@ config DW_DMAC_PCI
|
|||
select DW_DMAC_CORE
|
||||
help
|
||||
Support the Synopsys DesignWare AHB DMA controller on the
|
||||
platfroms that enumerate it as a PCI device. For example,
|
||||
platforms that enumerate it as a PCI device. For example,
|
||||
Intel Medfield has integrated this GPDMA controller.
|
||||
|
|
|
@ -64,6 +64,8 @@
|
|||
#define PCI_DEVICE_ID_INTEL_IOAT_BDX8 0x6f2e
|
||||
#define PCI_DEVICE_ID_INTEL_IOAT_BDX9 0x6f2f
|
||||
|
||||
#define PCI_DEVICE_ID_INTEL_IOAT_SKX 0x2021
|
||||
|
||||
#define IOAT_VER_1_2 0x12 /* Version 1.2 */
|
||||
#define IOAT_VER_2_0 0x20 /* Version 2.0 */
|
||||
#define IOAT_VER_3_0 0x30 /* Version 3.0 */
|
||||
|
|
|
@ -106,6 +106,8 @@ static struct pci_device_id ioat_pci_tbl[] = {
|
|||
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDX8) },
|
||||
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDX9) },
|
||||
|
||||
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SKX) },
|
||||
|
||||
/* I/OAT v3.3 platforms */
|
||||
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BWD0) },
|
||||
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BWD1) },
|
||||
|
@ -243,10 +245,15 @@ static bool is_bdx_ioat(struct pci_dev *pdev)
|
|||
}
|
||||
}
|
||||
|
||||
static inline bool is_skx_ioat(struct pci_dev *pdev)
|
||||
{
|
||||
return (pdev->device == PCI_DEVICE_ID_INTEL_IOAT_SKX) ? true : false;
|
||||
}
|
||||
|
||||
static bool is_xeon_cb32(struct pci_dev *pdev)
|
||||
{
|
||||
return is_jf_ioat(pdev) || is_snb_ioat(pdev) || is_ivb_ioat(pdev) ||
|
||||
is_hsw_ioat(pdev) || is_bdx_ioat(pdev);
|
||||
is_hsw_ioat(pdev) || is_bdx_ioat(pdev) || is_skx_ioat(pdev);
|
||||
}
|
||||
|
||||
bool is_bwd_ioat(struct pci_dev *pdev)
|
||||
|
@ -693,7 +700,7 @@ static int ioat_alloc_chan_resources(struct dma_chan *c)
|
|||
/* doing 2 32bit writes to mmio since 1 64b write doesn't work */
|
||||
ioat_chan->completion =
|
||||
dma_pool_zalloc(ioat_chan->ioat_dma->completion_pool,
|
||||
GFP_KERNEL, &ioat_chan->completion_dma);
|
||||
GFP_NOWAIT, &ioat_chan->completion_dma);
|
||||
if (!ioat_chan->completion)
|
||||
return -ENOMEM;
|
||||
|
||||
|
@ -703,7 +710,7 @@ static int ioat_alloc_chan_resources(struct dma_chan *c)
|
|||
ioat_chan->reg_base + IOAT_CHANCMP_OFFSET_HIGH);
|
||||
|
||||
order = IOAT_MAX_ORDER;
|
||||
ring = ioat_alloc_ring(c, order, GFP_KERNEL);
|
||||
ring = ioat_alloc_ring(c, order, GFP_NOWAIT);
|
||||
if (!ring)
|
||||
return -ENOMEM;
|
||||
|
||||
|
@ -1357,6 +1364,8 @@ static int ioat_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
|
|||
|
||||
device->version = readb(device->reg_base + IOAT_VER_OFFSET);
|
||||
if (device->version >= IOAT_VER_3_0) {
|
||||
if (is_skx_ioat(pdev))
|
||||
device->version = IOAT_VER_3_2;
|
||||
err = ioat3_dma_probe(device, ioat_dca_enabled);
|
||||
|
||||
if (device->version >= IOAT_VER_3_3)
|
||||
|
|
|
@ -938,6 +938,23 @@ static struct dma_async_tx_descriptor *omap_dma_prep_slave_sg(
|
|||
d->ccr |= CCR_DST_AMODE_POSTINC;
|
||||
if (port_window) {
|
||||
d->ccr |= CCR_SRC_AMODE_DBLIDX;
|
||||
|
||||
if (port_window_bytes >= 64)
|
||||
d->csdp |= CSDP_SRC_BURST_64;
|
||||
else if (port_window_bytes >= 32)
|
||||
d->csdp |= CSDP_SRC_BURST_32;
|
||||
else if (port_window_bytes >= 16)
|
||||
d->csdp |= CSDP_SRC_BURST_16;
|
||||
|
||||
} else {
|
||||
d->ccr |= CCR_SRC_AMODE_CONSTANT;
|
||||
}
|
||||
} else {
|
||||
d->csdp = CSDP_SRC_BURST_64 | CSDP_SRC_PACKED;
|
||||
|
||||
d->ccr |= CCR_SRC_AMODE_POSTINC;
|
||||
if (port_window) {
|
||||
d->ccr |= CCR_DST_AMODE_DBLIDX;
|
||||
d->ei = 1;
|
||||
/*
|
||||
* One frame covers the port_window and by configure
|
||||
|
@ -948,27 +965,11 @@ static struct dma_async_tx_descriptor *omap_dma_prep_slave_sg(
|
|||
d->fi = -(port_window_bytes - 1);
|
||||
|
||||
if (port_window_bytes >= 64)
|
||||
d->csdp = CSDP_SRC_BURST_64 | CSDP_SRC_PACKED;
|
||||
d->csdp |= CSDP_DST_BURST_64;
|
||||
else if (port_window_bytes >= 32)
|
||||
d->csdp = CSDP_SRC_BURST_32 | CSDP_SRC_PACKED;
|
||||
d->csdp |= CSDP_DST_BURST_32;
|
||||
else if (port_window_bytes >= 16)
|
||||
d->csdp = CSDP_SRC_BURST_16 | CSDP_SRC_PACKED;
|
||||
} else {
|
||||
d->ccr |= CCR_SRC_AMODE_CONSTANT;
|
||||
}
|
||||
} else {
|
||||
d->csdp = CSDP_SRC_BURST_64 | CSDP_SRC_PACKED;
|
||||
|
||||
d->ccr |= CCR_SRC_AMODE_POSTINC;
|
||||
if (port_window) {
|
||||
d->ccr |= CCR_DST_AMODE_DBLIDX;
|
||||
|
||||
if (port_window_bytes >= 64)
|
||||
d->csdp = CSDP_DST_BURST_64 | CSDP_DST_PACKED;
|
||||
else if (port_window_bytes >= 32)
|
||||
d->csdp = CSDP_DST_BURST_32 | CSDP_DST_PACKED;
|
||||
else if (port_window_bytes >= 16)
|
||||
d->csdp = CSDP_DST_BURST_16 | CSDP_DST_PACKED;
|
||||
d->csdp |= CSDP_DST_BURST_16;
|
||||
} else {
|
||||
d->ccr |= CCR_DST_AMODE_CONSTANT;
|
||||
}
|
||||
|
@ -1017,7 +1018,7 @@ static struct dma_async_tx_descriptor *omap_dma_prep_slave_sg(
|
|||
osg->addr = sg_dma_address(sgent);
|
||||
osg->en = en;
|
||||
osg->fn = sg_dma_len(sgent) / frame_bytes;
|
||||
if (port_window && dir == DMA_MEM_TO_DEV) {
|
||||
if (port_window && dir == DMA_DEV_TO_MEM) {
|
||||
osg->ei = 1;
|
||||
/*
|
||||
* One frame covers the port_window and by configure
|
||||
|
@ -1452,6 +1453,7 @@ static int omap_dma_probe(struct platform_device *pdev)
|
|||
struct omap_dmadev *od;
|
||||
struct resource *res;
|
||||
int rc, i, irq;
|
||||
u32 lch_count;
|
||||
|
||||
od = devm_kzalloc(&pdev->dev, sizeof(*od), GFP_KERNEL);
|
||||
if (!od)
|
||||
|
@ -1494,20 +1496,31 @@ static int omap_dma_probe(struct platform_device *pdev)
|
|||
spin_lock_init(&od->lock);
|
||||
spin_lock_init(&od->irq_lock);
|
||||
|
||||
if (!pdev->dev.of_node) {
|
||||
od->dma_requests = od->plat->dma_attr->lch_count;
|
||||
if (unlikely(!od->dma_requests))
|
||||
od->dma_requests = OMAP_SDMA_REQUESTS;
|
||||
} else if (of_property_read_u32(pdev->dev.of_node, "dma-requests",
|
||||
&od->dma_requests)) {
|
||||
/* Number of DMA requests */
|
||||
od->dma_requests = OMAP_SDMA_REQUESTS;
|
||||
if (pdev->dev.of_node && of_property_read_u32(pdev->dev.of_node,
|
||||
"dma-requests",
|
||||
&od->dma_requests)) {
|
||||
dev_info(&pdev->dev,
|
||||
"Missing dma-requests property, using %u.\n",
|
||||
OMAP_SDMA_REQUESTS);
|
||||
od->dma_requests = OMAP_SDMA_REQUESTS;
|
||||
}
|
||||
|
||||
od->lch_map = devm_kcalloc(&pdev->dev, od->dma_requests,
|
||||
sizeof(*od->lch_map), GFP_KERNEL);
|
||||
/* Number of available logical channels */
|
||||
if (!pdev->dev.of_node) {
|
||||
lch_count = od->plat->dma_attr->lch_count;
|
||||
if (unlikely(!lch_count))
|
||||
lch_count = OMAP_SDMA_CHANNELS;
|
||||
} else if (of_property_read_u32(pdev->dev.of_node, "dma-channels",
|
||||
&lch_count)) {
|
||||
dev_info(&pdev->dev,
|
||||
"Missing dma-channels property, using %u.\n",
|
||||
OMAP_SDMA_CHANNELS);
|
||||
lch_count = OMAP_SDMA_CHANNELS;
|
||||
}
|
||||
|
||||
od->lch_map = devm_kcalloc(&pdev->dev, lch_count, sizeof(*od->lch_map),
|
||||
GFP_KERNEL);
|
||||
if (!od->lch_map)
|
||||
return -ENOMEM;
|
||||
|
||||
|
|
|
@ -448,6 +448,9 @@ struct dma_pl330_chan {
|
|||
|
||||
/* for cyclic capability */
|
||||
bool cyclic;
|
||||
|
||||
/* for runtime pm tracking */
|
||||
bool active;
|
||||
};
|
||||
|
||||
struct pl330_dmac {
|
||||
|
@ -2033,6 +2036,7 @@ static void pl330_tasklet(unsigned long data)
|
|||
_stop(pch->thread);
|
||||
spin_unlock(&pch->thread->dmac->lock);
|
||||
power_down = true;
|
||||
pch->active = false;
|
||||
} else {
|
||||
/* Make sure the PL330 Channel thread is active */
|
||||
spin_lock(&pch->thread->dmac->lock);
|
||||
|
@ -2052,6 +2056,7 @@ static void pl330_tasklet(unsigned long data)
|
|||
desc->status = PREP;
|
||||
list_move_tail(&desc->node, &pch->work_list);
|
||||
if (power_down) {
|
||||
pch->active = true;
|
||||
spin_lock(&pch->thread->dmac->lock);
|
||||
_start(pch->thread);
|
||||
spin_unlock(&pch->thread->dmac->lock);
|
||||
|
@ -2166,6 +2171,7 @@ static int pl330_terminate_all(struct dma_chan *chan)
|
|||
unsigned long flags;
|
||||
struct pl330_dmac *pl330 = pch->dmac;
|
||||
LIST_HEAD(list);
|
||||
bool power_down = false;
|
||||
|
||||
pm_runtime_get_sync(pl330->ddma.dev);
|
||||
spin_lock_irqsave(&pch->lock, flags);
|
||||
|
@ -2176,6 +2182,8 @@ static int pl330_terminate_all(struct dma_chan *chan)
|
|||
pch->thread->req[0].desc = NULL;
|
||||
pch->thread->req[1].desc = NULL;
|
||||
pch->thread->req_running = -1;
|
||||
power_down = pch->active;
|
||||
pch->active = false;
|
||||
|
||||
/* Mark all desc done */
|
||||
list_for_each_entry(desc, &pch->submitted_list, node) {
|
||||
|
@ -2193,6 +2201,8 @@ static int pl330_terminate_all(struct dma_chan *chan)
|
|||
list_splice_tail_init(&pch->completed_list, &pl330->desc_pool);
|
||||
spin_unlock_irqrestore(&pch->lock, flags);
|
||||
pm_runtime_mark_last_busy(pl330->ddma.dev);
|
||||
if (power_down)
|
||||
pm_runtime_put_autosuspend(pl330->ddma.dev);
|
||||
pm_runtime_put_autosuspend(pl330->ddma.dev);
|
||||
|
||||
return 0;
|
||||
|
@ -2357,6 +2367,7 @@ static void pl330_issue_pending(struct dma_chan *chan)
|
|||
* updated on work_list emptiness status.
|
||||
*/
|
||||
WARN_ON(list_empty(&pch->submitted_list));
|
||||
pch->active = true;
|
||||
pm_runtime_get_sync(pch->dmac->ddma.dev);
|
||||
}
|
||||
list_splice_tail_init(&pch->submitted_list, &pch->work_list);
|
||||
|
|
|
@ -986,6 +986,7 @@ static void rcar_dmac_free_chan_resources(struct dma_chan *chan)
|
|||
{
|
||||
struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan);
|
||||
struct rcar_dmac *dmac = to_rcar_dmac(chan->device);
|
||||
struct rcar_dmac_chan_map *map = &rchan->map;
|
||||
struct rcar_dmac_desc_page *page, *_page;
|
||||
struct rcar_dmac_desc *desc;
|
||||
LIST_HEAD(list);
|
||||
|
@ -1019,6 +1020,13 @@ static void rcar_dmac_free_chan_resources(struct dma_chan *chan)
|
|||
free_page((unsigned long)page);
|
||||
}
|
||||
|
||||
/* Remove slave mapping if present. */
|
||||
if (map->slave.xfer_size) {
|
||||
dma_unmap_resource(chan->device->dev, map->addr,
|
||||
map->slave.xfer_size, map->dir, 0);
|
||||
map->slave.xfer_size = 0;
|
||||
}
|
||||
|
||||
pm_runtime_put(chan->device->dev);
|
||||
}
|
||||
|
||||
|
|
|
@ -880,7 +880,7 @@ static enum dma_status stm32_dma_tx_status(struct dma_chan *c,
|
|||
struct virt_dma_desc *vdesc;
|
||||
enum dma_status status;
|
||||
unsigned long flags;
|
||||
u32 residue;
|
||||
u32 residue = 0;
|
||||
|
||||
status = dma_cookie_status(c, cookie, state);
|
||||
if ((status == DMA_COMPLETE) || (!state))
|
||||
|
@ -888,16 +888,12 @@ static enum dma_status stm32_dma_tx_status(struct dma_chan *c,
|
|||
|
||||
spin_lock_irqsave(&chan->vchan.lock, flags);
|
||||
vdesc = vchan_find_desc(&chan->vchan, cookie);
|
||||
if (cookie == chan->desc->vdesc.tx.cookie) {
|
||||
if (chan->desc && cookie == chan->desc->vdesc.tx.cookie)
|
||||
residue = stm32_dma_desc_residue(chan, chan->desc,
|
||||
chan->next_sg);
|
||||
} else if (vdesc) {
|
||||
else if (vdesc)
|
||||
residue = stm32_dma_desc_residue(chan,
|
||||
to_stm32_dma_desc(vdesc), 0);
|
||||
} else {
|
||||
residue = 0;
|
||||
}
|
||||
|
||||
dma_set_residue(state, residue);
|
||||
|
||||
spin_unlock_irqrestore(&chan->vchan.lock, flags);
|
||||
|
@ -972,21 +968,18 @@ static struct dma_chan *stm32_dma_of_xlate(struct of_phandle_args *dma_spec,
|
|||
struct stm32_dma_chan *chan;
|
||||
struct dma_chan *c;
|
||||
|
||||
if (dma_spec->args_count < 3)
|
||||
if (dma_spec->args_count < 4)
|
||||
return NULL;
|
||||
|
||||
cfg.channel_id = dma_spec->args[0];
|
||||
cfg.request_line = dma_spec->args[1];
|
||||
cfg.stream_config = dma_spec->args[2];
|
||||
cfg.threshold = 0;
|
||||
cfg.threshold = dma_spec->args[3];
|
||||
|
||||
if ((cfg.channel_id >= STM32_DMA_MAX_CHANNELS) || (cfg.request_line >=
|
||||
STM32_DMA_MAX_REQUEST_ID))
|
||||
return NULL;
|
||||
|
||||
if (dma_spec->args_count > 3)
|
||||
cfg.threshold = dma_spec->args[3];
|
||||
|
||||
chan = &dmadev->chan[cfg.channel_id];
|
||||
|
||||
c = dma_get_slave_channel(&chan->vchan.chan);
|
||||
|
|
|
@ -149,6 +149,7 @@ static int ti_am335x_xbar_probe(struct platform_device *pdev)
|
|||
match = of_match_node(ti_am335x_master_match, dma_node);
|
||||
if (!match) {
|
||||
dev_err(&pdev->dev, "DMA master is not supported\n");
|
||||
of_node_put(dma_node);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
|
@ -339,6 +340,7 @@ static int ti_dra7_xbar_probe(struct platform_device *pdev)
|
|||
match = of_match_node(ti_dra7_master_match, dma_node);
|
||||
if (!match) {
|
||||
dev_err(&pdev->dev, "DMA master is not supported\n");
|
||||
of_node_put(dma_node);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
|
|
|
@ -453,7 +453,7 @@ int extcon_sync(struct extcon_dev *edev, unsigned int id)
|
|||
dev_err(&edev->dev, "out of memory in extcon_set_state\n");
|
||||
kobject_uevent(&edev->dev.kobj, KOBJ_CHANGE);
|
||||
|
||||
return 0;
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
length = name_show(&edev->dev, NULL, prop_buf);
|
||||
|
|
|
@ -71,8 +71,7 @@ void __init efi_fake_memmap(void)
|
|||
}
|
||||
|
||||
/* allocate memory for new EFI memmap */
|
||||
new_memmap_phy = memblock_alloc(efi.memmap.desc_size * new_nr_map,
|
||||
PAGE_SIZE);
|
||||
new_memmap_phy = efi_memmap_alloc(new_nr_map);
|
||||
if (!new_memmap_phy)
|
||||
return;
|
||||
|
||||
|
|
|
@ -39,14 +39,6 @@ efi_status_t efi_file_close(void *handle);
|
|||
|
||||
unsigned long get_dram_base(efi_system_table_t *sys_table_arg);
|
||||
|
||||
efi_status_t update_fdt(efi_system_table_t *sys_table, void *orig_fdt,
|
||||
unsigned long orig_fdt_size,
|
||||
void *fdt, int new_fdt_size, char *cmdline_ptr,
|
||||
u64 initrd_addr, u64 initrd_size,
|
||||
efi_memory_desc_t *memory_map,
|
||||
unsigned long map_size, unsigned long desc_size,
|
||||
u32 desc_ver);
|
||||
|
||||
efi_status_t allocate_new_fdt_and_exit_boot(efi_system_table_t *sys_table,
|
||||
void *handle,
|
||||
unsigned long *new_fdt_addr,
|
||||
|
|
|
@ -16,13 +16,10 @@
|
|||
|
||||
#include "efistub.h"
|
||||
|
||||
efi_status_t update_fdt(efi_system_table_t *sys_table, void *orig_fdt,
|
||||
unsigned long orig_fdt_size,
|
||||
void *fdt, int new_fdt_size, char *cmdline_ptr,
|
||||
u64 initrd_addr, u64 initrd_size,
|
||||
efi_memory_desc_t *memory_map,
|
||||
unsigned long map_size, unsigned long desc_size,
|
||||
u32 desc_ver)
|
||||
static efi_status_t update_fdt(efi_system_table_t *sys_table, void *orig_fdt,
|
||||
unsigned long orig_fdt_size,
|
||||
void *fdt, int new_fdt_size, char *cmdline_ptr,
|
||||
u64 initrd_addr, u64 initrd_size)
|
||||
{
|
||||
int node, num_rsv;
|
||||
int status;
|
||||
|
@ -101,25 +98,23 @@ efi_status_t update_fdt(efi_system_table_t *sys_table, void *orig_fdt,
|
|||
if (status)
|
||||
goto fdt_set_fail;
|
||||
|
||||
fdt_val64 = cpu_to_fdt64((u64)(unsigned long)memory_map);
|
||||
fdt_val64 = U64_MAX; /* placeholder */
|
||||
status = fdt_setprop(fdt, node, "linux,uefi-mmap-start",
|
||||
&fdt_val64, sizeof(fdt_val64));
|
||||
if (status)
|
||||
goto fdt_set_fail;
|
||||
|
||||
fdt_val32 = cpu_to_fdt32(map_size);
|
||||
fdt_val32 = U32_MAX; /* placeholder */
|
||||
status = fdt_setprop(fdt, node, "linux,uefi-mmap-size",
|
||||
&fdt_val32, sizeof(fdt_val32));
|
||||
if (status)
|
||||
goto fdt_set_fail;
|
||||
|
||||
fdt_val32 = cpu_to_fdt32(desc_size);
|
||||
status = fdt_setprop(fdt, node, "linux,uefi-mmap-desc-size",
|
||||
&fdt_val32, sizeof(fdt_val32));
|
||||
if (status)
|
||||
goto fdt_set_fail;
|
||||
|
||||
fdt_val32 = cpu_to_fdt32(desc_ver);
|
||||
status = fdt_setprop(fdt, node, "linux,uefi-mmap-desc-ver",
|
||||
&fdt_val32, sizeof(fdt_val32));
|
||||
if (status)
|
||||
|
@ -148,6 +143,43 @@ efi_status_t update_fdt(efi_system_table_t *sys_table, void *orig_fdt,
|
|||
return EFI_LOAD_ERROR;
|
||||
}
|
||||
|
||||
static efi_status_t update_fdt_memmap(void *fdt, struct efi_boot_memmap *map)
|
||||
{
|
||||
int node = fdt_path_offset(fdt, "/chosen");
|
||||
u64 fdt_val64;
|
||||
u32 fdt_val32;
|
||||
int err;
|
||||
|
||||
if (node < 0)
|
||||
return EFI_LOAD_ERROR;
|
||||
|
||||
fdt_val64 = cpu_to_fdt64((unsigned long)*map->map);
|
||||
err = fdt_setprop_inplace(fdt, node, "linux,uefi-mmap-start",
|
||||
&fdt_val64, sizeof(fdt_val64));
|
||||
if (err)
|
||||
return EFI_LOAD_ERROR;
|
||||
|
||||
fdt_val32 = cpu_to_fdt32(*map->map_size);
|
||||
err = fdt_setprop_inplace(fdt, node, "linux,uefi-mmap-size",
|
||||
&fdt_val32, sizeof(fdt_val32));
|
||||
if (err)
|
||||
return EFI_LOAD_ERROR;
|
||||
|
||||
fdt_val32 = cpu_to_fdt32(*map->desc_size);
|
||||
err = fdt_setprop_inplace(fdt, node, "linux,uefi-mmap-desc-size",
|
||||
&fdt_val32, sizeof(fdt_val32));
|
||||
if (err)
|
||||
return EFI_LOAD_ERROR;
|
||||
|
||||
fdt_val32 = cpu_to_fdt32(*map->desc_ver);
|
||||
err = fdt_setprop_inplace(fdt, node, "linux,uefi-mmap-desc-ver",
|
||||
&fdt_val32, sizeof(fdt_val32));
|
||||
if (err)
|
||||
return EFI_LOAD_ERROR;
|
||||
|
||||
return EFI_SUCCESS;
|
||||
}
|
||||
|
||||
#ifndef EFI_FDT_ALIGN
|
||||
#define EFI_FDT_ALIGN EFI_PAGE_SIZE
|
||||
#endif
|
||||
|
@ -243,20 +275,10 @@ efi_status_t allocate_new_fdt_and_exit_boot(efi_system_table_t *sys_table,
|
|||
goto fail;
|
||||
}
|
||||
|
||||
/*
|
||||
* Now that we have done our final memory allocation (and free)
|
||||
* we can get the memory map key needed for
|
||||
* exit_boot_services().
|
||||
*/
|
||||
status = efi_get_memory_map(sys_table, &map);
|
||||
if (status != EFI_SUCCESS)
|
||||
goto fail_free_new_fdt;
|
||||
|
||||
status = update_fdt(sys_table,
|
||||
(void *)fdt_addr, fdt_size,
|
||||
(void *)*new_fdt_addr, new_fdt_size,
|
||||
cmdline_ptr, initrd_addr, initrd_size,
|
||||
memory_map, map_size, desc_size, desc_ver);
|
||||
cmdline_ptr, initrd_addr, initrd_size);
|
||||
|
||||
/* Succeeding the first time is the expected case. */
|
||||
if (status == EFI_SUCCESS)
|
||||
|
@ -266,20 +288,16 @@ efi_status_t allocate_new_fdt_and_exit_boot(efi_system_table_t *sys_table,
|
|||
/*
|
||||
* We need to allocate more space for the new
|
||||
* device tree, so free existing buffer that is
|
||||
* too small. Also free memory map, as we will need
|
||||
* to get new one that reflects the free/alloc we do
|
||||
* on the device tree buffer.
|
||||
* too small.
|
||||
*/
|
||||
efi_free(sys_table, new_fdt_size, *new_fdt_addr);
|
||||
sys_table->boottime->free_pool(memory_map);
|
||||
new_fdt_size += EFI_PAGE_SIZE;
|
||||
} else {
|
||||
pr_efi_err(sys_table, "Unable to construct new device tree.\n");
|
||||
goto fail_free_mmap;
|
||||
goto fail_free_new_fdt;
|
||||
}
|
||||
}
|
||||
|
||||
sys_table->boottime->free_pool(memory_map);
|
||||
priv.runtime_map = runtime_map;
|
||||
priv.runtime_entry_count = &runtime_entry_count;
|
||||
status = efi_exit_boot_services(sys_table, handle, &map, &priv,
|
||||
|
@ -288,6 +306,16 @@ efi_status_t allocate_new_fdt_and_exit_boot(efi_system_table_t *sys_table,
|
|||
if (status == EFI_SUCCESS) {
|
||||
efi_set_virtual_address_map_t *svam;
|
||||
|
||||
status = update_fdt_memmap((void *)*new_fdt_addr, &map);
|
||||
if (status != EFI_SUCCESS) {
|
||||
/*
|
||||
* The kernel won't get far without the memory map, but
|
||||
* may still be able to print something meaningful so
|
||||
* return success here.
|
||||
*/
|
||||
return EFI_SUCCESS;
|
||||
}
|
||||
|
||||
/* Install the new virtual address map */
|
||||
svam = sys_table->runtime->set_virtual_address_map;
|
||||
status = svam(runtime_entry_count * desc_size, desc_size,
|
||||
|
@ -319,9 +347,6 @@ efi_status_t allocate_new_fdt_and_exit_boot(efi_system_table_t *sys_table,
|
|||
|
||||
pr_efi_err(sys_table, "Exit boot services failed.\n");
|
||||
|
||||
fail_free_mmap:
|
||||
sys_table->boottime->free_pool(memory_map);
|
||||
|
||||
fail_free_new_fdt:
|
||||
efi_free(sys_table, new_fdt_size, *new_fdt_addr);
|
||||
|
||||
|
|
|
@ -9,6 +9,44 @@
|
|||
#include <linux/efi.h>
|
||||
#include <linux/io.h>
|
||||
#include <asm/early_ioremap.h>
|
||||
#include <linux/memblock.h>
|
||||
#include <linux/slab.h>
|
||||
|
||||
static phys_addr_t __init __efi_memmap_alloc_early(unsigned long size)
|
||||
{
|
||||
return memblock_alloc(size, 0);
|
||||
}
|
||||
|
||||
static phys_addr_t __init __efi_memmap_alloc_late(unsigned long size)
|
||||
{
|
||||
unsigned int order = get_order(size);
|
||||
struct page *p = alloc_pages(GFP_KERNEL, order);
|
||||
|
||||
if (!p)
|
||||
return 0;
|
||||
|
||||
return PFN_PHYS(page_to_pfn(p));
|
||||
}
|
||||
|
||||
/**
|
||||
* efi_memmap_alloc - Allocate memory for the EFI memory map
|
||||
* @num_entries: Number of entries in the allocated map.
|
||||
*
|
||||
* Depending on whether mm_init() has already been invoked or not,
|
||||
* either memblock or "normal" page allocation is used.
|
||||
*
|
||||
* Returns the physical address of the allocated memory map on
|
||||
* success, zero on failure.
|
||||
*/
|
||||
phys_addr_t __init efi_memmap_alloc(unsigned int num_entries)
|
||||
{
|
||||
unsigned long size = num_entries * efi.memmap.desc_size;
|
||||
|
||||
if (slab_is_available())
|
||||
return __efi_memmap_alloc_late(size);
|
||||
|
||||
return __efi_memmap_alloc_early(size);
|
||||
}
|
||||
|
||||
/**
|
||||
* __efi_memmap_init - Common code for mapping the EFI memory map
|
||||
|
|
|
@ -2496,6 +2496,7 @@ static const struct hid_device_id hid_ignore_list[] = {
|
|||
{ HID_USB_DEVICE(USB_VENDOR_ID_PANJIT, 0x0002) },
|
||||
{ HID_USB_DEVICE(USB_VENDOR_ID_PANJIT, 0x0003) },
|
||||
{ HID_USB_DEVICE(USB_VENDOR_ID_PANJIT, 0x0004) },
|
||||
{ HID_USB_DEVICE(USB_VENDOR_ID_PETZL, USB_DEVICE_ID_PETZL_HEADLAMP) },
|
||||
{ HID_USB_DEVICE(USB_VENDOR_ID_PHILIPS, USB_DEVICE_ID_PHILIPS_IEEE802154_DONGLE) },
|
||||
{ HID_USB_DEVICE(USB_VENDOR_ID_POWERCOM, USB_DEVICE_ID_POWERCOM_UPS) },
|
||||
#if IS_ENABLED(CONFIG_MOUSE_SYNAPTICS_USB)
|
||||
|
|
|
@ -39,6 +39,9 @@ static __u8 *cp_report_fixup(struct hid_device *hdev, __u8 *rdesc,
|
|||
if (!(quirks & CP_RDESC_SWAPPED_MIN_MAX))
|
||||
return rdesc;
|
||||
|
||||
if (*rsize < 4)
|
||||
return rdesc;
|
||||
|
||||
for (i = 0; i < *rsize - 4; i++)
|
||||
if (rdesc[i] == 0x29 && rdesc[i + 2] == 0x19) {
|
||||
rdesc[i] = 0x19;
|
||||
|
|
|
@ -816,6 +816,9 @@
|
|||
#define USB_VENDOR_ID_PETALYNX 0x18b1
|
||||
#define USB_DEVICE_ID_PETALYNX_MAXTER_REMOTE 0x0037
|
||||
|
||||
#define USB_VENDOR_ID_PETZL 0x2122
|
||||
#define USB_DEVICE_ID_PETZL_HEADLAMP 0x1234
|
||||
|
||||
#define USB_VENDOR_ID_PHILIPS 0x0471
|
||||
#define USB_DEVICE_ID_PHILIPS_IEEE802154_DONGLE 0x0617
|
||||
|
||||
|
|
|
@ -426,6 +426,15 @@ static int i2c_hid_hwreset(struct i2c_client *client)
|
|||
if (ret)
|
||||
goto out_unlock;
|
||||
|
||||
/*
|
||||
* The HID over I2C specification states that if a DEVICE needs time
|
||||
* after the PWR_ON request, it should utilise CLOCK stretching.
|
||||
* However, it has been observered that the Windows driver provides a
|
||||
* 1ms sleep between the PWR_ON and RESET requests and that some devices
|
||||
* rely on this.
|
||||
*/
|
||||
usleep_range(1000, 5000);
|
||||
|
||||
i2c_hid_dbg(ihid, "resetting...\n");
|
||||
|
||||
ret = i2c_hid_command(client, &hid_reset_cmd, NULL, 0);
|
||||
|
|
|
@ -585,10 +585,29 @@ static s32 piix4_access_sb800(struct i2c_adapter *adap, u16 addr,
|
|||
u8 command, int size, union i2c_smbus_data *data)
|
||||
{
|
||||
struct i2c_piix4_adapdata *adapdata = i2c_get_adapdata(adap);
|
||||
unsigned short piix4_smba = adapdata->smba;
|
||||
int retries = MAX_TIMEOUT;
|
||||
int smbslvcnt;
|
||||
u8 smba_en_lo;
|
||||
u8 port;
|
||||
int retval;
|
||||
|
||||
/* Request the SMBUS semaphore, avoid conflicts with the IMC */
|
||||
smbslvcnt = inb_p(SMBSLVCNT);
|
||||
do {
|
||||
outb_p(smbslvcnt | 0x10, SMBSLVCNT);
|
||||
|
||||
/* Check the semaphore status */
|
||||
smbslvcnt = inb_p(SMBSLVCNT);
|
||||
if (smbslvcnt & 0x10)
|
||||
break;
|
||||
|
||||
usleep_range(1000, 2000);
|
||||
} while (--retries);
|
||||
/* SMBus is still owned by the IMC, we give up */
|
||||
if (!retries)
|
||||
return -EBUSY;
|
||||
|
||||
mutex_lock(&piix4_mutex_sb800);
|
||||
|
||||
outb_p(piix4_port_sel_sb800, SB800_PIIX4_SMB_IDX);
|
||||
|
@ -606,6 +625,9 @@ static s32 piix4_access_sb800(struct i2c_adapter *adap, u16 addr,
|
|||
|
||||
mutex_unlock(&piix4_mutex_sb800);
|
||||
|
||||
/* Release the semaphore */
|
||||
outb_p(smbslvcnt | 0x20, SMBSLVCNT);
|
||||
|
||||
return retval;
|
||||
}
|
||||
|
||||
|
|
|
@ -931,7 +931,10 @@ static int i2c_device_probe(struct device *dev)
|
|||
if (!client->irq) {
|
||||
int irq = -ENOENT;
|
||||
|
||||
if (dev->of_node) {
|
||||
if (client->flags & I2C_CLIENT_HOST_NOTIFY) {
|
||||
dev_dbg(dev, "Using Host Notify IRQ\n");
|
||||
irq = i2c_smbus_host_notify_to_irq(client);
|
||||
} else if (dev->of_node) {
|
||||
irq = of_irq_get_byname(dev->of_node, "irq");
|
||||
if (irq == -EINVAL || irq == -ENODATA)
|
||||
irq = of_irq_get(dev->of_node, 0);
|
||||
|
@ -940,14 +943,7 @@ static int i2c_device_probe(struct device *dev)
|
|||
}
|
||||
if (irq == -EPROBE_DEFER)
|
||||
return irq;
|
||||
/*
|
||||
* ACPI and OF did not find any useful IRQ, try to see
|
||||
* if Host Notify can be used.
|
||||
*/
|
||||
if (irq < 0) {
|
||||
dev_dbg(dev, "Using Host Notify IRQ\n");
|
||||
irq = i2c_smbus_host_notify_to_irq(client);
|
||||
}
|
||||
|
||||
if (irq < 0)
|
||||
irq = 0;
|
||||
|
||||
|
@ -1708,7 +1704,7 @@ static struct i2c_client *of_i2c_register_device(struct i2c_adapter *adap,
|
|||
|
||||
if (i2c_check_addr_validity(addr, info.flags)) {
|
||||
dev_err(&adap->dev, "of_i2c: invalid addr=%x on %s\n",
|
||||
info.addr, node->full_name);
|
||||
addr, node->full_name);
|
||||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
|
||||
|
@ -1716,6 +1712,9 @@ static struct i2c_client *of_i2c_register_device(struct i2c_adapter *adap,
|
|||
info.of_node = of_node_get(node);
|
||||
info.archdata = &dev_ad;
|
||||
|
||||
if (of_property_read_bool(node, "host-notify"))
|
||||
info.flags |= I2C_CLIENT_HOST_NOTIFY;
|
||||
|
||||
if (of_get_property(node, "wakeup-source", NULL))
|
||||
info.flags |= I2C_CLIENT_WAKE;
|
||||
|
||||
|
@ -3633,7 +3632,7 @@ int i2c_slave_register(struct i2c_client *client, i2c_slave_cb_t slave_cb)
|
|||
int ret;
|
||||
|
||||
if (!client || !slave_cb) {
|
||||
WARN(1, "insufficent data\n");
|
||||
WARN(1, "insufficient data\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
|
|
|
@ -331,7 +331,7 @@ static noinline int i2cdev_ioctl_smbus(struct i2c_client *client,
|
|||
unsigned long arg)
|
||||
{
|
||||
struct i2c_smbus_ioctl_data data_arg;
|
||||
union i2c_smbus_data temp;
|
||||
union i2c_smbus_data temp = {};
|
||||
int datasize, res;
|
||||
|
||||
if (copy_from_user(&data_arg,
|
||||
|
|
|
@ -22,7 +22,6 @@
|
|||
#include <linux/sched.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/miscdevice.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/poll.h>
|
||||
#include <linux/init.h>
|
||||
|
|
|
@ -1377,6 +1377,12 @@ static int xpad_init_input(struct usb_xpad *xpad)
|
|||
input_dev->name = xpad->name;
|
||||
input_dev->phys = xpad->phys;
|
||||
usb_to_input_id(xpad->udev, &input_dev->id);
|
||||
|
||||
if (xpad->xtype == XTYPE_XBOX360W) {
|
||||
/* x360w controllers and the receiver have different ids */
|
||||
input_dev->id.product = 0x02a1;
|
||||
}
|
||||
|
||||
input_dev->dev.parent = &xpad->intf->dev;
|
||||
|
||||
input_set_drvdata(input_dev, xpad);
|
||||
|
|
|
@ -136,7 +136,6 @@ static const struct i2c_device_id adxl34x_id[] = {
|
|||
|
||||
MODULE_DEVICE_TABLE(i2c, adxl34x_id);
|
||||
|
||||
#ifdef CONFIG_OF
|
||||
static const struct of_device_id adxl34x_of_id[] = {
|
||||
/*
|
||||
* The ADXL346 is backward-compatible with the ADXL345. Differences are
|
||||
|
@ -153,13 +152,12 @@ static const struct of_device_id adxl34x_of_id[] = {
|
|||
};
|
||||
|
||||
MODULE_DEVICE_TABLE(of, adxl34x_of_id);
|
||||
#endif
|
||||
|
||||
static struct i2c_driver adxl34x_driver = {
|
||||
.driver = {
|
||||
.name = "adxl34x",
|
||||
.pm = &adxl34x_i2c_pm,
|
||||
.of_match_table = of_match_ptr(adxl34x_of_id),
|
||||
.of_match_table = adxl34x_of_id,
|
||||
},
|
||||
.probe = adxl34x_i2c_probe,
|
||||
.remove = adxl34x_i2c_remove,
|
||||
|
|
|
@ -114,7 +114,7 @@ enum SS4_PACKET_ID {
|
|||
(_b[1] & 0x7F) \
|
||||
)
|
||||
|
||||
#define SS4_TS_Y_V2(_b) (s8)( \
|
||||
#define SS4_TS_Y_V2(_b) -(s8)( \
|
||||
((_b[3] & 0x01) << 7) | \
|
||||
(_b[2] & 0x7F) \
|
||||
)
|
||||
|
|
|
@ -29,7 +29,7 @@
|
|||
* after soft reset, we should wait for 1 ms
|
||||
* before the device becomes operational
|
||||
*/
|
||||
#define SOFT_RESET_DELAY_MS 3
|
||||
#define SOFT_RESET_DELAY_US 3000
|
||||
/* and after hard reset, we should wait for max 500ms */
|
||||
#define HARD_RESET_DELAY_MS 500
|
||||
|
||||
|
@ -311,7 +311,7 @@ static int synaptics_i2c_reset_config(struct i2c_client *client)
|
|||
if (ret) {
|
||||
dev_err(&client->dev, "Unable to reset device\n");
|
||||
} else {
|
||||
msleep(SOFT_RESET_DELAY_MS);
|
||||
usleep_range(SOFT_RESET_DELAY_US, SOFT_RESET_DELAY_US + 100);
|
||||
ret = synaptics_i2c_config(client);
|
||||
if (ret)
|
||||
dev_err(&client->dev, "Unable to config device\n");
|
||||
|
|
|
@ -41,7 +41,8 @@ config RMI4_SMB
|
|||
|
||||
config RMI4_F03
|
||||
bool "RMI4 Function 03 (PS2 Guest)"
|
||||
depends on RMI4_CORE && SERIO
|
||||
depends on RMI4_CORE
|
||||
depends on SERIO=y || RMI4_CORE=SERIO
|
||||
help
|
||||
Say Y here if you want to add support for RMI4 function 03.
|
||||
|
||||
|
|
|
@ -211,6 +211,12 @@ static const struct dmi_system_id __initconst i8042_dmi_noloop_table[] = {
|
|||
DMI_MATCH(DMI_PRODUCT_VERSION, "Rev 1"),
|
||||
},
|
||||
},
|
||||
{
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "PEGATRON CORPORATION"),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "C15B"),
|
||||
},
|
||||
},
|
||||
{ }
|
||||
};
|
||||
|
||||
|
|
|
@ -914,9 +914,9 @@ static irqreturn_t elants_i2c_irq(int irq, void *_dev)
|
|||
|
||||
case QUEUE_HEADER_NORMAL:
|
||||
report_count = ts->buf[FW_HDR_COUNT];
|
||||
if (report_count > 3) {
|
||||
if (report_count == 0 || report_count > 3) {
|
||||
dev_err(&client->dev,
|
||||
"too large report count: %*ph\n",
|
||||
"bad report count: %*ph\n",
|
||||
HEADER_SIZE, ts->buf);
|
||||
break;
|
||||
}
|
||||
|
|
|
@ -212,6 +212,7 @@ extern int rdev_clear_badblocks(struct md_rdev *rdev, sector_t s, int sectors,
|
|||
int is_new);
|
||||
struct md_cluster_info;
|
||||
|
||||
/* change UNSUPPORTED_MDDEV_FLAGS for each array type if new flag is added */
|
||||
enum mddev_flags {
|
||||
MD_ARRAY_FIRST_USE, /* First use of array, needs initialization */
|
||||
MD_CLOSING, /* If set, we are closing the array, do not open
|
||||
|
@ -702,4 +703,11 @@ static inline int mddev_is_clustered(struct mddev *mddev)
|
|||
{
|
||||
return mddev->cluster_info && mddev->bitmap_info.nodes > 1;
|
||||
}
|
||||
|
||||
/* clear unsupported mddev_flags */
|
||||
static inline void mddev_clear_unsupported_flags(struct mddev *mddev,
|
||||
unsigned long unsupported_flags)
|
||||
{
|
||||
mddev->flags &= ~unsupported_flags;
|
||||
}
|
||||
#endif /* _MD_MD_H */
|
||||
|
|
|
@ -26,6 +26,11 @@
|
|||
#include "raid0.h"
|
||||
#include "raid5.h"
|
||||
|
||||
#define UNSUPPORTED_MDDEV_FLAGS \
|
||||
((1L << MD_HAS_JOURNAL) | \
|
||||
(1L << MD_JOURNAL_CLEAN) | \
|
||||
(1L << MD_FAILFAST_SUPPORTED))
|
||||
|
||||
static int raid0_congested(struct mddev *mddev, int bits)
|
||||
{
|
||||
struct r0conf *conf = mddev->private;
|
||||
|
@ -539,8 +544,7 @@ static void *raid0_takeover_raid45(struct mddev *mddev)
|
|||
mddev->delta_disks = -1;
|
||||
/* make sure it will be not marked as dirty */
|
||||
mddev->recovery_cp = MaxSector;
|
||||
clear_bit(MD_HAS_JOURNAL, &mddev->flags);
|
||||
clear_bit(MD_JOURNAL_CLEAN, &mddev->flags);
|
||||
mddev_clear_unsupported_flags(mddev, UNSUPPORTED_MDDEV_FLAGS);
|
||||
|
||||
create_strip_zones(mddev, &priv_conf);
|
||||
|
||||
|
@ -583,7 +587,7 @@ static void *raid0_takeover_raid10(struct mddev *mddev)
|
|||
mddev->degraded = 0;
|
||||
/* make sure it will be not marked as dirty */
|
||||
mddev->recovery_cp = MaxSector;
|
||||
clear_bit(MD_FAILFAST_SUPPORTED, &mddev->flags);
|
||||
mddev_clear_unsupported_flags(mddev, UNSUPPORTED_MDDEV_FLAGS);
|
||||
|
||||
create_strip_zones(mddev, &priv_conf);
|
||||
return priv_conf;
|
||||
|
@ -626,7 +630,7 @@ static void *raid0_takeover_raid1(struct mddev *mddev)
|
|||
mddev->raid_disks = 1;
|
||||
/* make sure it will be not marked as dirty */
|
||||
mddev->recovery_cp = MaxSector;
|
||||
clear_bit(MD_FAILFAST_SUPPORTED, &mddev->flags);
|
||||
mddev_clear_unsupported_flags(mddev, UNSUPPORTED_MDDEV_FLAGS);
|
||||
|
||||
create_strip_zones(mddev, &priv_conf);
|
||||
return priv_conf;
|
||||
|
|
|
@ -42,6 +42,10 @@
|
|||
#include "raid1.h"
|
||||
#include "bitmap.h"
|
||||
|
||||
#define UNSUPPORTED_MDDEV_FLAGS \
|
||||
((1L << MD_HAS_JOURNAL) | \
|
||||
(1L << MD_JOURNAL_CLEAN))
|
||||
|
||||
/*
|
||||
* Number of guaranteed r1bios in case of extreme VM load:
|
||||
*/
|
||||
|
@ -1066,17 +1070,107 @@ static void raid1_unplug(struct blk_plug_cb *cb, bool from_schedule)
|
|||
kfree(plug);
|
||||
}
|
||||
|
||||
static void raid1_make_request(struct mddev *mddev, struct bio * bio)
|
||||
static void raid1_read_request(struct mddev *mddev, struct bio *bio,
|
||||
struct r1bio *r1_bio)
|
||||
{
|
||||
struct r1conf *conf = mddev->private;
|
||||
struct raid1_info *mirror;
|
||||
struct r1bio *r1_bio;
|
||||
struct bio *read_bio;
|
||||
struct bitmap *bitmap = mddev->bitmap;
|
||||
const int op = bio_op(bio);
|
||||
const unsigned long do_sync = (bio->bi_opf & REQ_SYNC);
|
||||
int sectors_handled;
|
||||
int max_sectors;
|
||||
int rdisk;
|
||||
|
||||
wait_barrier(conf, bio);
|
||||
|
||||
read_again:
|
||||
rdisk = read_balance(conf, r1_bio, &max_sectors);
|
||||
|
||||
if (rdisk < 0) {
|
||||
/* couldn't find anywhere to read from */
|
||||
raid_end_bio_io(r1_bio);
|
||||
return;
|
||||
}
|
||||
mirror = conf->mirrors + rdisk;
|
||||
|
||||
if (test_bit(WriteMostly, &mirror->rdev->flags) &&
|
||||
bitmap) {
|
||||
/*
|
||||
* Reading from a write-mostly device must take care not to
|
||||
* over-take any writes that are 'behind'
|
||||
*/
|
||||
raid1_log(mddev, "wait behind writes");
|
||||
wait_event(bitmap->behind_wait,
|
||||
atomic_read(&bitmap->behind_writes) == 0);
|
||||
}
|
||||
r1_bio->read_disk = rdisk;
|
||||
r1_bio->start_next_window = 0;
|
||||
|
||||
read_bio = bio_clone_mddev(bio, GFP_NOIO, mddev);
|
||||
bio_trim(read_bio, r1_bio->sector - bio->bi_iter.bi_sector,
|
||||
max_sectors);
|
||||
|
||||
r1_bio->bios[rdisk] = read_bio;
|
||||
|
||||
read_bio->bi_iter.bi_sector = r1_bio->sector +
|
||||
mirror->rdev->data_offset;
|
||||
read_bio->bi_bdev = mirror->rdev->bdev;
|
||||
read_bio->bi_end_io = raid1_end_read_request;
|
||||
bio_set_op_attrs(read_bio, op, do_sync);
|
||||
if (test_bit(FailFast, &mirror->rdev->flags) &&
|
||||
test_bit(R1BIO_FailFast, &r1_bio->state))
|
||||
read_bio->bi_opf |= MD_FAILFAST;
|
||||
read_bio->bi_private = r1_bio;
|
||||
|
||||
if (mddev->gendisk)
|
||||
trace_block_bio_remap(bdev_get_queue(read_bio->bi_bdev),
|
||||
read_bio, disk_devt(mddev->gendisk),
|
||||
r1_bio->sector);
|
||||
|
||||
if (max_sectors < r1_bio->sectors) {
|
||||
/*
|
||||
* could not read all from this device, so we will need another
|
||||
* r1_bio.
|
||||
*/
|
||||
sectors_handled = (r1_bio->sector + max_sectors
|
||||
- bio->bi_iter.bi_sector);
|
||||
r1_bio->sectors = max_sectors;
|
||||
spin_lock_irq(&conf->device_lock);
|
||||
if (bio->bi_phys_segments == 0)
|
||||
bio->bi_phys_segments = 2;
|
||||
else
|
||||
bio->bi_phys_segments++;
|
||||
spin_unlock_irq(&conf->device_lock);
|
||||
|
||||
/*
|
||||
* Cannot call generic_make_request directly as that will be
|
||||
* queued in __make_request and subsequent mempool_alloc might
|
||||
* block waiting for it. So hand bio over to raid1d.
|
||||
*/
|
||||
reschedule_retry(r1_bio);
|
||||
|
||||
r1_bio = mempool_alloc(conf->r1bio_pool, GFP_NOIO);
|
||||
|
||||
r1_bio->master_bio = bio;
|
||||
r1_bio->sectors = bio_sectors(bio) - sectors_handled;
|
||||
r1_bio->state = 0;
|
||||
r1_bio->mddev = mddev;
|
||||
r1_bio->sector = bio->bi_iter.bi_sector + sectors_handled;
|
||||
goto read_again;
|
||||
} else
|
||||
generic_make_request(read_bio);
|
||||
}
|
||||
|
||||
static void raid1_write_request(struct mddev *mddev, struct bio *bio,
|
||||
struct r1bio *r1_bio)
|
||||
{
|
||||
struct r1conf *conf = mddev->private;
|
||||
int i, disks;
|
||||
struct bitmap *bitmap;
|
||||
struct bitmap *bitmap = mddev->bitmap;
|
||||
unsigned long flags;
|
||||
const int op = bio_op(bio);
|
||||
const int rw = bio_data_dir(bio);
|
||||
const unsigned long do_sync = (bio->bi_opf & REQ_SYNC);
|
||||
const unsigned long do_flush_fua = (bio->bi_opf &
|
||||
(REQ_PREFLUSH | REQ_FUA));
|
||||
|
@ -1096,15 +1190,15 @@ static void raid1_make_request(struct mddev *mddev, struct bio * bio)
|
|||
|
||||
md_write_start(mddev, bio); /* wait on superblock update early */
|
||||
|
||||
if (bio_data_dir(bio) == WRITE &&
|
||||
((bio_end_sector(bio) > mddev->suspend_lo &&
|
||||
if ((bio_end_sector(bio) > mddev->suspend_lo &&
|
||||
bio->bi_iter.bi_sector < mddev->suspend_hi) ||
|
||||
(mddev_is_clustered(mddev) &&
|
||||
md_cluster_ops->area_resyncing(mddev, WRITE,
|
||||
bio->bi_iter.bi_sector, bio_end_sector(bio))))) {
|
||||
/* As the suspend_* range is controlled by
|
||||
* userspace, we want an interruptible
|
||||
* wait.
|
||||
bio->bi_iter.bi_sector, bio_end_sector(bio)))) {
|
||||
|
||||
/*
|
||||
* As the suspend_* range is controlled by userspace, we want
|
||||
* an interruptible wait.
|
||||
*/
|
||||
DEFINE_WAIT(w);
|
||||
for (;;) {
|
||||
|
@ -1115,128 +1209,15 @@ static void raid1_make_request(struct mddev *mddev, struct bio * bio)
|
|||
bio->bi_iter.bi_sector >= mddev->suspend_hi ||
|
||||
(mddev_is_clustered(mddev) &&
|
||||
!md_cluster_ops->area_resyncing(mddev, WRITE,
|
||||
bio->bi_iter.bi_sector, bio_end_sector(bio))))
|
||||
bio->bi_iter.bi_sector,
|
||||
bio_end_sector(bio))))
|
||||
break;
|
||||
schedule();
|
||||
}
|
||||
finish_wait(&conf->wait_barrier, &w);
|
||||
}
|
||||
|
||||
start_next_window = wait_barrier(conf, bio);
|
||||
|
||||
bitmap = mddev->bitmap;
|
||||
|
||||
/*
|
||||
* make_request() can abort the operation when read-ahead is being
|
||||
* used and no empty request is available.
|
||||
*
|
||||
*/
|
||||
r1_bio = mempool_alloc(conf->r1bio_pool, GFP_NOIO);
|
||||
|
||||
r1_bio->master_bio = bio;
|
||||
r1_bio->sectors = bio_sectors(bio);
|
||||
r1_bio->state = 0;
|
||||
r1_bio->mddev = mddev;
|
||||
r1_bio->sector = bio->bi_iter.bi_sector;
|
||||
|
||||
/* We might need to issue multiple reads to different
|
||||
* devices if there are bad blocks around, so we keep
|
||||
* track of the number of reads in bio->bi_phys_segments.
|
||||
* If this is 0, there is only one r1_bio and no locking
|
||||
* will be needed when requests complete. If it is
|
||||
* non-zero, then it is the number of not-completed requests.
|
||||
*/
|
||||
bio->bi_phys_segments = 0;
|
||||
bio_clear_flag(bio, BIO_SEG_VALID);
|
||||
|
||||
if (rw == READ) {
|
||||
/*
|
||||
* read balancing logic:
|
||||
*/
|
||||
int rdisk;
|
||||
|
||||
read_again:
|
||||
rdisk = read_balance(conf, r1_bio, &max_sectors);
|
||||
|
||||
if (rdisk < 0) {
|
||||
/* couldn't find anywhere to read from */
|
||||
raid_end_bio_io(r1_bio);
|
||||
return;
|
||||
}
|
||||
mirror = conf->mirrors + rdisk;
|
||||
|
||||
if (test_bit(WriteMostly, &mirror->rdev->flags) &&
|
||||
bitmap) {
|
||||
/* Reading from a write-mostly device must
|
||||
* take care not to over-take any writes
|
||||
* that are 'behind'
|
||||
*/
|
||||
raid1_log(mddev, "wait behind writes");
|
||||
wait_event(bitmap->behind_wait,
|
||||
atomic_read(&bitmap->behind_writes) == 0);
|
||||
}
|
||||
r1_bio->read_disk = rdisk;
|
||||
r1_bio->start_next_window = 0;
|
||||
|
||||
read_bio = bio_clone_mddev(bio, GFP_NOIO, mddev);
|
||||
bio_trim(read_bio, r1_bio->sector - bio->bi_iter.bi_sector,
|
||||
max_sectors);
|
||||
|
||||
r1_bio->bios[rdisk] = read_bio;
|
||||
|
||||
read_bio->bi_iter.bi_sector = r1_bio->sector +
|
||||
mirror->rdev->data_offset;
|
||||
read_bio->bi_bdev = mirror->rdev->bdev;
|
||||
read_bio->bi_end_io = raid1_end_read_request;
|
||||
bio_set_op_attrs(read_bio, op, do_sync);
|
||||
if (test_bit(FailFast, &mirror->rdev->flags) &&
|
||||
test_bit(R1BIO_FailFast, &r1_bio->state))
|
||||
read_bio->bi_opf |= MD_FAILFAST;
|
||||
read_bio->bi_private = r1_bio;
|
||||
|
||||
if (mddev->gendisk)
|
||||
trace_block_bio_remap(bdev_get_queue(read_bio->bi_bdev),
|
||||
read_bio, disk_devt(mddev->gendisk),
|
||||
r1_bio->sector);
|
||||
|
||||
if (max_sectors < r1_bio->sectors) {
|
||||
/* could not read all from this device, so we will
|
||||
* need another r1_bio.
|
||||
*/
|
||||
|
||||
sectors_handled = (r1_bio->sector + max_sectors
|
||||
- bio->bi_iter.bi_sector);
|
||||
r1_bio->sectors = max_sectors;
|
||||
spin_lock_irq(&conf->device_lock);
|
||||
if (bio->bi_phys_segments == 0)
|
||||
bio->bi_phys_segments = 2;
|
||||
else
|
||||
bio->bi_phys_segments++;
|
||||
spin_unlock_irq(&conf->device_lock);
|
||||
/* Cannot call generic_make_request directly
|
||||
* as that will be queued in __make_request
|
||||
* and subsequent mempool_alloc might block waiting
|
||||
* for it. So hand bio over to raid1d.
|
||||
*/
|
||||
reschedule_retry(r1_bio);
|
||||
|
||||
r1_bio = mempool_alloc(conf->r1bio_pool, GFP_NOIO);
|
||||
|
||||
r1_bio->master_bio = bio;
|
||||
r1_bio->sectors = bio_sectors(bio) - sectors_handled;
|
||||
r1_bio->state = 0;
|
||||
r1_bio->mddev = mddev;
|
||||
r1_bio->sector = bio->bi_iter.bi_sector +
|
||||
sectors_handled;
|
||||
goto read_again;
|
||||
} else
|
||||
generic_make_request(read_bio);
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* WRITE:
|
||||
*/
|
||||
if (conf->pending_count >= max_queued_requests) {
|
||||
md_wakeup_thread(mddev->thread);
|
||||
raid1_log(mddev, "wait queued");
|
||||
|
@ -1280,8 +1261,7 @@ static void raid1_make_request(struct mddev *mddev, struct bio * bio)
|
|||
int bad_sectors;
|
||||
int is_bad;
|
||||
|
||||
is_bad = is_badblock(rdev, r1_bio->sector,
|
||||
max_sectors,
|
||||
is_bad = is_badblock(rdev, r1_bio->sector, max_sectors,
|
||||
&first_bad, &bad_sectors);
|
||||
if (is_bad < 0) {
|
||||
/* mustn't write here until the bad block is
|
||||
|
@ -1370,7 +1350,8 @@ static void raid1_make_request(struct mddev *mddev, struct bio * bio)
|
|||
continue;
|
||||
|
||||
mbio = bio_clone_mddev(bio, GFP_NOIO, mddev);
|
||||
bio_trim(mbio, r1_bio->sector - bio->bi_iter.bi_sector, max_sectors);
|
||||
bio_trim(mbio, r1_bio->sector - bio->bi_iter.bi_sector,
|
||||
max_sectors);
|
||||
|
||||
if (first_clone) {
|
||||
/* do behind I/O ?
|
||||
|
@ -1464,6 +1445,40 @@ static void raid1_make_request(struct mddev *mddev, struct bio * bio)
|
|||
wake_up(&conf->wait_barrier);
|
||||
}
|
||||
|
||||
static void raid1_make_request(struct mddev *mddev, struct bio *bio)
|
||||
{
|
||||
struct r1conf *conf = mddev->private;
|
||||
struct r1bio *r1_bio;
|
||||
|
||||
/*
|
||||
* make_request() can abort the operation when read-ahead is being
|
||||
* used and no empty request is available.
|
||||
*
|
||||
*/
|
||||
r1_bio = mempool_alloc(conf->r1bio_pool, GFP_NOIO);
|
||||
|
||||
r1_bio->master_bio = bio;
|
||||
r1_bio->sectors = bio_sectors(bio);
|
||||
r1_bio->state = 0;
|
||||
r1_bio->mddev = mddev;
|
||||
r1_bio->sector = bio->bi_iter.bi_sector;
|
||||
|
||||
/*
|
||||
* We might need to issue multiple reads to different devices if there
|
||||
* are bad blocks around, so we keep track of the number of reads in
|
||||
* bio->bi_phys_segments. If this is 0, there is only one r1_bio and
|
||||
* no locking will be needed when requests complete. If it is
|
||||
* non-zero, then it is the number of not-completed requests.
|
||||
*/
|
||||
bio->bi_phys_segments = 0;
|
||||
bio_clear_flag(bio, BIO_SEG_VALID);
|
||||
|
||||
if (bio_data_dir(bio) == READ)
|
||||
raid1_read_request(mddev, bio, r1_bio);
|
||||
else
|
||||
raid1_write_request(mddev, bio, r1_bio);
|
||||
}
|
||||
|
||||
static void raid1_status(struct seq_file *seq, struct mddev *mddev)
|
||||
{
|
||||
struct r1conf *conf = mddev->private;
|
||||
|
@ -3246,8 +3261,8 @@ static void *raid1_takeover(struct mddev *mddev)
|
|||
if (!IS_ERR(conf)) {
|
||||
/* Array must appear to be quiesced */
|
||||
conf->array_frozen = 1;
|
||||
clear_bit(MD_HAS_JOURNAL, &mddev->flags);
|
||||
clear_bit(MD_JOURNAL_CLEAN, &mddev->flags);
|
||||
mddev_clear_unsupported_flags(mddev,
|
||||
UNSUPPORTED_MDDEV_FLAGS);
|
||||
}
|
||||
return conf;
|
||||
}
|
||||
|
|
|
@ -1087,23 +1087,122 @@ static void raid10_unplug(struct blk_plug_cb *cb, bool from_schedule)
|
|||
kfree(plug);
|
||||
}
|
||||
|
||||
static void __make_request(struct mddev *mddev, struct bio *bio)
|
||||
static void raid10_read_request(struct mddev *mddev, struct bio *bio,
|
||||
struct r10bio *r10_bio)
|
||||
{
|
||||
struct r10conf *conf = mddev->private;
|
||||
struct r10bio *r10_bio;
|
||||
struct bio *read_bio;
|
||||
const int op = bio_op(bio);
|
||||
const unsigned long do_sync = (bio->bi_opf & REQ_SYNC);
|
||||
int sectors_handled;
|
||||
int max_sectors;
|
||||
sector_t sectors;
|
||||
struct md_rdev *rdev;
|
||||
int slot;
|
||||
|
||||
/*
|
||||
* Register the new request and wait if the reconstruction
|
||||
* thread has put up a bar for new requests.
|
||||
* Continue immediately if no resync is active currently.
|
||||
*/
|
||||
wait_barrier(conf);
|
||||
|
||||
sectors = bio_sectors(bio);
|
||||
while (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
|
||||
bio->bi_iter.bi_sector < conf->reshape_progress &&
|
||||
bio->bi_iter.bi_sector + sectors > conf->reshape_progress) {
|
||||
/*
|
||||
* IO spans the reshape position. Need to wait for reshape to
|
||||
* pass
|
||||
*/
|
||||
raid10_log(conf->mddev, "wait reshape");
|
||||
allow_barrier(conf);
|
||||
wait_event(conf->wait_barrier,
|
||||
conf->reshape_progress <= bio->bi_iter.bi_sector ||
|
||||
conf->reshape_progress >= bio->bi_iter.bi_sector +
|
||||
sectors);
|
||||
wait_barrier(conf);
|
||||
}
|
||||
|
||||
read_again:
|
||||
rdev = read_balance(conf, r10_bio, &max_sectors);
|
||||
if (!rdev) {
|
||||
raid_end_bio_io(r10_bio);
|
||||
return;
|
||||
}
|
||||
slot = r10_bio->read_slot;
|
||||
|
||||
read_bio = bio_clone_mddev(bio, GFP_NOIO, mddev);
|
||||
bio_trim(read_bio, r10_bio->sector - bio->bi_iter.bi_sector,
|
||||
max_sectors);
|
||||
|
||||
r10_bio->devs[slot].bio = read_bio;
|
||||
r10_bio->devs[slot].rdev = rdev;
|
||||
|
||||
read_bio->bi_iter.bi_sector = r10_bio->devs[slot].addr +
|
||||
choose_data_offset(r10_bio, rdev);
|
||||
read_bio->bi_bdev = rdev->bdev;
|
||||
read_bio->bi_end_io = raid10_end_read_request;
|
||||
bio_set_op_attrs(read_bio, op, do_sync);
|
||||
if (test_bit(FailFast, &rdev->flags) &&
|
||||
test_bit(R10BIO_FailFast, &r10_bio->state))
|
||||
read_bio->bi_opf |= MD_FAILFAST;
|
||||
read_bio->bi_private = r10_bio;
|
||||
|
||||
if (mddev->gendisk)
|
||||
trace_block_bio_remap(bdev_get_queue(read_bio->bi_bdev),
|
||||
read_bio, disk_devt(mddev->gendisk),
|
||||
r10_bio->sector);
|
||||
if (max_sectors < r10_bio->sectors) {
|
||||
/*
|
||||
* Could not read all from this device, so we will need another
|
||||
* r10_bio.
|
||||
*/
|
||||
sectors_handled = (r10_bio->sector + max_sectors
|
||||
- bio->bi_iter.bi_sector);
|
||||
r10_bio->sectors = max_sectors;
|
||||
spin_lock_irq(&conf->device_lock);
|
||||
if (bio->bi_phys_segments == 0)
|
||||
bio->bi_phys_segments = 2;
|
||||
else
|
||||
bio->bi_phys_segments++;
|
||||
spin_unlock_irq(&conf->device_lock);
|
||||
/*
|
||||
* Cannot call generic_make_request directly as that will be
|
||||
* queued in __generic_make_request and subsequent
|
||||
* mempool_alloc might block waiting for it. so hand bio over
|
||||
* to raid10d.
|
||||
*/
|
||||
reschedule_retry(r10_bio);
|
||||
|
||||
r10_bio = mempool_alloc(conf->r10bio_pool, GFP_NOIO);
|
||||
|
||||
r10_bio->master_bio = bio;
|
||||
r10_bio->sectors = bio_sectors(bio) - sectors_handled;
|
||||
r10_bio->state = 0;
|
||||
r10_bio->mddev = mddev;
|
||||
r10_bio->sector = bio->bi_iter.bi_sector + sectors_handled;
|
||||
goto read_again;
|
||||
} else
|
||||
generic_make_request(read_bio);
|
||||
return;
|
||||
}
|
||||
|
||||
static void raid10_write_request(struct mddev *mddev, struct bio *bio,
|
||||
struct r10bio *r10_bio)
|
||||
{
|
||||
struct r10conf *conf = mddev->private;
|
||||
int i;
|
||||
const int op = bio_op(bio);
|
||||
const int rw = bio_data_dir(bio);
|
||||
const unsigned long do_sync = (bio->bi_opf & REQ_SYNC);
|
||||
const unsigned long do_fua = (bio->bi_opf & REQ_FUA);
|
||||
unsigned long flags;
|
||||
struct md_rdev *blocked_rdev;
|
||||
struct blk_plug_cb *cb;
|
||||
struct raid10_plug_cb *plug = NULL;
|
||||
sector_t sectors;
|
||||
int sectors_handled;
|
||||
int max_sectors;
|
||||
int sectors;
|
||||
|
||||
md_write_start(mddev, bio);
|
||||
|
||||
|
@ -1118,8 +1217,9 @@ static void __make_request(struct mddev *mddev, struct bio *bio)
|
|||
while (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
|
||||
bio->bi_iter.bi_sector < conf->reshape_progress &&
|
||||
bio->bi_iter.bi_sector + sectors > conf->reshape_progress) {
|
||||
/* IO spans the reshape position. Need to wait for
|
||||
* reshape to pass
|
||||
/*
|
||||
* IO spans the reshape position. Need to wait for reshape to
|
||||
* pass
|
||||
*/
|
||||
raid10_log(conf->mddev, "wait reshape");
|
||||
allow_barrier(conf);
|
||||
|
@ -1129,8 +1229,8 @@ static void __make_request(struct mddev *mddev, struct bio *bio)
|
|||
sectors);
|
||||
wait_barrier(conf);
|
||||
}
|
||||
|
||||
if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
|
||||
bio_data_dir(bio) == WRITE &&
|
||||
(mddev->reshape_backwards
|
||||
? (bio->bi_iter.bi_sector < conf->reshape_safe &&
|
||||
bio->bi_iter.bi_sector + sectors > conf->reshape_progress)
|
||||
|
@ -1148,98 +1248,6 @@ static void __make_request(struct mddev *mddev, struct bio *bio)
|
|||
conf->reshape_safe = mddev->reshape_position;
|
||||
}
|
||||
|
||||
r10_bio = mempool_alloc(conf->r10bio_pool, GFP_NOIO);
|
||||
|
||||
r10_bio->master_bio = bio;
|
||||
r10_bio->sectors = sectors;
|
||||
|
||||
r10_bio->mddev = mddev;
|
||||
r10_bio->sector = bio->bi_iter.bi_sector;
|
||||
r10_bio->state = 0;
|
||||
|
||||
/* We might need to issue multiple reads to different
|
||||
* devices if there are bad blocks around, so we keep
|
||||
* track of the number of reads in bio->bi_phys_segments.
|
||||
* If this is 0, there is only one r10_bio and no locking
|
||||
* will be needed when the request completes. If it is
|
||||
* non-zero, then it is the number of not-completed requests.
|
||||
*/
|
||||
bio->bi_phys_segments = 0;
|
||||
bio_clear_flag(bio, BIO_SEG_VALID);
|
||||
|
||||
if (rw == READ) {
|
||||
/*
|
||||
* read balancing logic:
|
||||
*/
|
||||
struct md_rdev *rdev;
|
||||
int slot;
|
||||
|
||||
read_again:
|
||||
rdev = read_balance(conf, r10_bio, &max_sectors);
|
||||
if (!rdev) {
|
||||
raid_end_bio_io(r10_bio);
|
||||
return;
|
||||
}
|
||||
slot = r10_bio->read_slot;
|
||||
|
||||
read_bio = bio_clone_mddev(bio, GFP_NOIO, mddev);
|
||||
bio_trim(read_bio, r10_bio->sector - bio->bi_iter.bi_sector,
|
||||
max_sectors);
|
||||
|
||||
r10_bio->devs[slot].bio = read_bio;
|
||||
r10_bio->devs[slot].rdev = rdev;
|
||||
|
||||
read_bio->bi_iter.bi_sector = r10_bio->devs[slot].addr +
|
||||
choose_data_offset(r10_bio, rdev);
|
||||
read_bio->bi_bdev = rdev->bdev;
|
||||
read_bio->bi_end_io = raid10_end_read_request;
|
||||
bio_set_op_attrs(read_bio, op, do_sync);
|
||||
if (test_bit(FailFast, &rdev->flags) &&
|
||||
test_bit(R10BIO_FailFast, &r10_bio->state))
|
||||
read_bio->bi_opf |= MD_FAILFAST;
|
||||
read_bio->bi_private = r10_bio;
|
||||
|
||||
if (mddev->gendisk)
|
||||
trace_block_bio_remap(bdev_get_queue(read_bio->bi_bdev),
|
||||
read_bio, disk_devt(mddev->gendisk),
|
||||
r10_bio->sector);
|
||||
if (max_sectors < r10_bio->sectors) {
|
||||
/* Could not read all from this device, so we will
|
||||
* need another r10_bio.
|
||||
*/
|
||||
sectors_handled = (r10_bio->sector + max_sectors
|
||||
- bio->bi_iter.bi_sector);
|
||||
r10_bio->sectors = max_sectors;
|
||||
spin_lock_irq(&conf->device_lock);
|
||||
if (bio->bi_phys_segments == 0)
|
||||
bio->bi_phys_segments = 2;
|
||||
else
|
||||
bio->bi_phys_segments++;
|
||||
spin_unlock_irq(&conf->device_lock);
|
||||
/* Cannot call generic_make_request directly
|
||||
* as that will be queued in __generic_make_request
|
||||
* and subsequent mempool_alloc might block
|
||||
* waiting for it. so hand bio over to raid10d.
|
||||
*/
|
||||
reschedule_retry(r10_bio);
|
||||
|
||||
r10_bio = mempool_alloc(conf->r10bio_pool, GFP_NOIO);
|
||||
|
||||
r10_bio->master_bio = bio;
|
||||
r10_bio->sectors = bio_sectors(bio) - sectors_handled;
|
||||
r10_bio->state = 0;
|
||||
r10_bio->mddev = mddev;
|
||||
r10_bio->sector = bio->bi_iter.bi_sector +
|
||||
sectors_handled;
|
||||
goto read_again;
|
||||
} else
|
||||
generic_make_request(read_bio);
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* WRITE:
|
||||
*/
|
||||
if (conf->pending_count >= max_queued_requests) {
|
||||
md_wakeup_thread(mddev->thread);
|
||||
raid10_log(mddev, "wait queued");
|
||||
|
@ -1300,8 +1308,7 @@ static void __make_request(struct mddev *mddev, struct bio *bio)
|
|||
int bad_sectors;
|
||||
int is_bad;
|
||||
|
||||
is_bad = is_badblock(rdev, dev_sector,
|
||||
max_sectors,
|
||||
is_bad = is_badblock(rdev, dev_sector, max_sectors,
|
||||
&first_bad, &bad_sectors);
|
||||
if (is_bad < 0) {
|
||||
/* Mustn't write here until the bad block
|
||||
|
@ -1405,8 +1412,7 @@ static void __make_request(struct mddev *mddev, struct bio *bio)
|
|||
r10_bio->devs[i].bio = mbio;
|
||||
|
||||
mbio->bi_iter.bi_sector = (r10_bio->devs[i].addr+
|
||||
choose_data_offset(r10_bio,
|
||||
rdev));
|
||||
choose_data_offset(r10_bio, rdev));
|
||||
mbio->bi_bdev = rdev->bdev;
|
||||
mbio->bi_end_io = raid10_end_write_request;
|
||||
bio_set_op_attrs(mbio, op, do_sync | do_fua);
|
||||
|
@ -1457,8 +1463,7 @@ static void __make_request(struct mddev *mddev, struct bio *bio)
|
|||
r10_bio->devs[i].repl_bio = mbio;
|
||||
|
||||
mbio->bi_iter.bi_sector = (r10_bio->devs[i].addr +
|
||||
choose_data_offset(
|
||||
r10_bio, rdev));
|
||||
choose_data_offset(r10_bio, rdev));
|
||||
mbio->bi_bdev = rdev->bdev;
|
||||
mbio->bi_end_io = raid10_end_write_request;
|
||||
bio_set_op_attrs(mbio, op, do_sync | do_fua);
|
||||
|
@ -1503,6 +1508,36 @@ static void __make_request(struct mddev *mddev, struct bio *bio)
|
|||
one_write_done(r10_bio);
|
||||
}
|
||||
|
||||
static void __make_request(struct mddev *mddev, struct bio *bio)
|
||||
{
|
||||
struct r10conf *conf = mddev->private;
|
||||
struct r10bio *r10_bio;
|
||||
|
||||
r10_bio = mempool_alloc(conf->r10bio_pool, GFP_NOIO);
|
||||
|
||||
r10_bio->master_bio = bio;
|
||||
r10_bio->sectors = bio_sectors(bio);
|
||||
|
||||
r10_bio->mddev = mddev;
|
||||
r10_bio->sector = bio->bi_iter.bi_sector;
|
||||
r10_bio->state = 0;
|
||||
|
||||
/*
|
||||
* We might need to issue multiple reads to different devices if there
|
||||
* are bad blocks around, so we keep track of the number of reads in
|
||||
* bio->bi_phys_segments. If this is 0, there is only one r10_bio and
|
||||
* no locking will be needed when the request completes. If it is
|
||||
* non-zero, then it is the number of not-completed requests.
|
||||
*/
|
||||
bio->bi_phys_segments = 0;
|
||||
bio_clear_flag(bio, BIO_SEG_VALID);
|
||||
|
||||
if (bio_data_dir(bio) == READ)
|
||||
raid10_read_request(mddev, bio, r10_bio);
|
||||
else
|
||||
raid10_write_request(mddev, bio, r10_bio);
|
||||
}
|
||||
|
||||
static void raid10_make_request(struct mddev *mddev, struct bio *bio)
|
||||
{
|
||||
struct r10conf *conf = mddev->private;
|
||||
|
|
|
@ -1682,8 +1682,7 @@ r5l_recovery_replay_one_stripe(struct r5conf *conf,
|
|||
|
||||
static struct stripe_head *
|
||||
r5c_recovery_alloc_stripe(struct r5conf *conf,
|
||||
sector_t stripe_sect,
|
||||
sector_t log_start)
|
||||
sector_t stripe_sect)
|
||||
{
|
||||
struct stripe_head *sh;
|
||||
|
||||
|
@ -1692,7 +1691,6 @@ r5c_recovery_alloc_stripe(struct r5conf *conf,
|
|||
return NULL; /* no more stripe available */
|
||||
|
||||
r5l_recovery_reset_stripe(sh);
|
||||
sh->log_start = log_start;
|
||||
|
||||
return sh;
|
||||
}
|
||||
|
@ -1862,7 +1860,7 @@ r5c_recovery_analyze_meta_block(struct r5l_log *log,
|
|||
stripe_sect);
|
||||
|
||||
if (!sh) {
|
||||
sh = r5c_recovery_alloc_stripe(conf, stripe_sect, ctx->pos);
|
||||
sh = r5c_recovery_alloc_stripe(conf, stripe_sect);
|
||||
/*
|
||||
* cannot get stripe from raid5_get_active_stripe
|
||||
* try replay some stripes
|
||||
|
@ -1871,7 +1869,7 @@ r5c_recovery_analyze_meta_block(struct r5l_log *log,
|
|||
r5c_recovery_replay_stripes(
|
||||
cached_stripe_list, ctx);
|
||||
sh = r5c_recovery_alloc_stripe(
|
||||
conf, stripe_sect, ctx->pos);
|
||||
conf, stripe_sect);
|
||||
}
|
||||
if (!sh) {
|
||||
pr_debug("md/raid:%s: Increasing stripe cache size to %d to recovery data on journal.\n",
|
||||
|
@ -1879,8 +1877,8 @@ r5c_recovery_analyze_meta_block(struct r5l_log *log,
|
|||
conf->min_nr_stripes * 2);
|
||||
raid5_set_cache_size(mddev,
|
||||
conf->min_nr_stripes * 2);
|
||||
sh = r5c_recovery_alloc_stripe(
|
||||
conf, stripe_sect, ctx->pos);
|
||||
sh = r5c_recovery_alloc_stripe(conf,
|
||||
stripe_sect);
|
||||
}
|
||||
if (!sh) {
|
||||
pr_err("md/raid:%s: Cannot get enough stripes due to memory pressure. Recovery failed.\n",
|
||||
|
@ -1894,7 +1892,6 @@ r5c_recovery_analyze_meta_block(struct r5l_log *log,
|
|||
if (!test_bit(STRIPE_R5C_CACHING, &sh->state) &&
|
||||
test_bit(R5_Wantwrite, &sh->dev[sh->pd_idx].flags)) {
|
||||
r5l_recovery_replay_one_stripe(conf, sh, ctx);
|
||||
sh->log_start = ctx->pos;
|
||||
list_move_tail(&sh->lru, cached_stripe_list);
|
||||
}
|
||||
r5l_recovery_load_data(log, sh, ctx, payload,
|
||||
|
@ -1933,8 +1930,6 @@ static void r5c_recovery_load_one_stripe(struct r5l_log *log,
|
|||
set_bit(R5_UPTODATE, &dev->flags);
|
||||
}
|
||||
}
|
||||
list_add_tail(&sh->r5c, &log->stripe_in_journal_list);
|
||||
atomic_inc(&log->stripe_in_journal_count);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -2070,6 +2065,7 @@ r5c_recovery_rewrite_data_only_stripes(struct r5l_log *log,
|
|||
struct stripe_head *sh, *next;
|
||||
struct mddev *mddev = log->rdev->mddev;
|
||||
struct page *page;
|
||||
sector_t next_checkpoint = MaxSector;
|
||||
|
||||
page = alloc_page(GFP_KERNEL);
|
||||
if (!page) {
|
||||
|
@ -2078,6 +2074,8 @@ r5c_recovery_rewrite_data_only_stripes(struct r5l_log *log,
|
|||
return -ENOMEM;
|
||||
}
|
||||
|
||||
WARN_ON(list_empty(&ctx->cached_list));
|
||||
|
||||
list_for_each_entry_safe(sh, next, &ctx->cached_list, lru) {
|
||||
struct r5l_meta_block *mb;
|
||||
int i;
|
||||
|
@ -2123,12 +2121,15 @@ r5c_recovery_rewrite_data_only_stripes(struct r5l_log *log,
|
|||
sync_page_io(log->rdev, ctx->pos, PAGE_SIZE, page,
|
||||
REQ_OP_WRITE, REQ_FUA, false);
|
||||
sh->log_start = ctx->pos;
|
||||
list_add_tail(&sh->r5c, &log->stripe_in_journal_list);
|
||||
atomic_inc(&log->stripe_in_journal_count);
|
||||
ctx->pos = write_pos;
|
||||
ctx->seq += 1;
|
||||
|
||||
next_checkpoint = sh->log_start;
|
||||
list_del_init(&sh->lru);
|
||||
raid5_release_stripe(sh);
|
||||
}
|
||||
log->next_checkpoint = next_checkpoint;
|
||||
__free_page(page);
|
||||
return 0;
|
||||
}
|
||||
|
@ -2139,7 +2140,6 @@ static int r5l_recovery_log(struct r5l_log *log)
|
|||
struct r5l_recovery_ctx ctx;
|
||||
int ret;
|
||||
sector_t pos;
|
||||
struct stripe_head *sh;
|
||||
|
||||
ctx.pos = log->last_checkpoint;
|
||||
ctx.seq = log->last_cp_seq;
|
||||
|
@ -2164,16 +2164,13 @@ static int r5l_recovery_log(struct r5l_log *log)
|
|||
log->next_checkpoint = ctx.pos;
|
||||
r5l_log_write_empty_meta_block(log, ctx.pos, ctx.seq++);
|
||||
ctx.pos = r5l_ring_add(log, ctx.pos, BLOCK_SECTORS);
|
||||
} else {
|
||||
sh = list_last_entry(&ctx.cached_list, struct stripe_head, lru);
|
||||
log->next_checkpoint = sh->log_start;
|
||||
}
|
||||
|
||||
if ((ctx.data_only_stripes == 0) && (ctx.data_parity_stripes == 0))
|
||||
pr_debug("md/raid:%s: starting from clean shutdown\n",
|
||||
mdname(mddev));
|
||||
else {
|
||||
pr_debug("md/raid:%s: recoverying %d data-only stripes and %d data-parity stripes\n",
|
||||
pr_debug("md/raid:%s: recovering %d data-only stripes and %d data-parity stripes\n",
|
||||
mdname(mddev), ctx.data_only_stripes,
|
||||
ctx.data_parity_stripes);
|
||||
|
||||
|
@ -2418,9 +2415,6 @@ void r5c_finish_stripe_write_out(struct r5conf *conf,
|
|||
if (do_wakeup)
|
||||
wake_up(&conf->wait_for_overlap);
|
||||
|
||||
if (conf->log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_THROUGH)
|
||||
return;
|
||||
|
||||
spin_lock_irq(&conf->log->stripe_in_journal_lock);
|
||||
list_del_init(&sh->r5c);
|
||||
spin_unlock_irq(&conf->log->stripe_in_journal_lock);
|
||||
|
@ -2639,14 +2633,16 @@ int r5l_init_log(struct r5conf *conf, struct md_rdev *rdev)
|
|||
spin_lock_init(&log->stripe_in_journal_lock);
|
||||
atomic_set(&log->stripe_in_journal_count, 0);
|
||||
|
||||
rcu_assign_pointer(conf->log, log);
|
||||
|
||||
if (r5l_load_log(log))
|
||||
goto error;
|
||||
|
||||
rcu_assign_pointer(conf->log, log);
|
||||
set_bit(MD_HAS_JOURNAL, &conf->mddev->flags);
|
||||
return 0;
|
||||
|
||||
error:
|
||||
rcu_assign_pointer(conf->log, NULL);
|
||||
md_unregister_thread(&log->reclaim_thread);
|
||||
reclaim_thread:
|
||||
mempool_destroy(log->meta_pool);
|
||||
|
|
|
@ -62,6 +62,8 @@
|
|||
#include "raid0.h"
|
||||
#include "bitmap.h"
|
||||
|
||||
#define UNSUPPORTED_MDDEV_FLAGS (1L << MD_FAILFAST_SUPPORTED)
|
||||
|
||||
#define cpu_to_group(cpu) cpu_to_node(cpu)
|
||||
#define ANY_GROUP NUMA_NO_NODE
|
||||
|
||||
|
@ -7829,8 +7831,9 @@ static void *raid5_takeover_raid1(struct mddev *mddev)
|
|||
mddev->new_chunk_sectors = chunksect;
|
||||
|
||||
ret = setup_conf(mddev);
|
||||
if (!IS_ERR_VALUE(ret))
|
||||
clear_bit(MD_FAILFAST_SUPPORTED, &mddev->flags);
|
||||
if (!IS_ERR(ret))
|
||||
mddev_clear_unsupported_flags(mddev,
|
||||
UNSUPPORTED_MDDEV_FLAGS);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
|
|
@ -152,6 +152,9 @@ static void mei_mkhi_fix(struct mei_cl_device *cldev)
|
|||
{
|
||||
int ret;
|
||||
|
||||
if (!cldev->bus->hbm_f_os_supported)
|
||||
return;
|
||||
|
||||
ret = mei_cldev_enable(cldev);
|
||||
if (ret)
|
||||
return;
|
||||
|
|
|
@ -180,6 +180,8 @@ static ssize_t mei_dbgfs_read_devstate(struct file *fp, char __user *ubuf,
|
|||
dev->hbm_f_ev_supported);
|
||||
pos += scnprintf(buf + pos, bufsz - pos, "\tFA: %01d\n",
|
||||
dev->hbm_f_fa_supported);
|
||||
pos += scnprintf(buf + pos, bufsz - pos, "\tOS: %01d\n",
|
||||
dev->hbm_f_os_supported);
|
||||
}
|
||||
|
||||
pos += scnprintf(buf + pos, bufsz - pos, "pg: %s, %s\n",
|
||||
|
|
|
@ -989,6 +989,10 @@ static void mei_hbm_config_features(struct mei_device *dev)
|
|||
/* Fixed Address Client Support */
|
||||
if (dev->version.major_version >= HBM_MAJOR_VERSION_FA)
|
||||
dev->hbm_f_fa_supported = 1;
|
||||
|
||||
/* OS ver message Support */
|
||||
if (dev->version.major_version >= HBM_MAJOR_VERSION_OS)
|
||||
dev->hbm_f_os_supported = 1;
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -76,6 +76,12 @@
|
|||
#define HBM_MINOR_VERSION_FA 0
|
||||
#define HBM_MAJOR_VERSION_FA 2
|
||||
|
||||
/*
|
||||
* MEI version with OS ver message support
|
||||
*/
|
||||
#define HBM_MINOR_VERSION_OS 0
|
||||
#define HBM_MAJOR_VERSION_OS 2
|
||||
|
||||
/* Host bus message command opcode */
|
||||
#define MEI_HBM_CMD_OP_MSK 0x7f
|
||||
/* Host bus message command RESPONSE */
|
||||
|
|
|
@ -406,6 +406,7 @@ const char *mei_pg_state_str(enum mei_pg_state state);
|
|||
* @hbm_f_ev_supported : hbm feature event notification
|
||||
* @hbm_f_fa_supported : hbm feature fixed address client
|
||||
* @hbm_f_ie_supported : hbm feature immediate reply to enum request
|
||||
* @hbm_f_os_supported : hbm feature support OS ver message
|
||||
*
|
||||
* @me_clients_rwsem: rw lock over me_clients list
|
||||
* @me_clients : list of FW clients
|
||||
|
@ -487,6 +488,7 @@ struct mei_device {
|
|||
unsigned int hbm_f_ev_supported:1;
|
||||
unsigned int hbm_f_fa_supported:1;
|
||||
unsigned int hbm_f_ie_supported:1;
|
||||
unsigned int hbm_f_os_supported:1;
|
||||
|
||||
struct rw_semaphore me_clients_rwsem;
|
||||
struct list_head me_clients;
|
||||
|
|
|
@ -506,9 +506,6 @@ static int mmc_poll_for_busy(struct mmc_card *card, unsigned int timeout_ms,
|
|||
}
|
||||
} while (busy);
|
||||
|
||||
if (host->ops->card_busy && send_status)
|
||||
return mmc_switch_status(card);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -577,24 +574,26 @@ int __mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
|
|||
if (!use_busy_signal)
|
||||
goto out;
|
||||
|
||||
/* Switch to new timing before poll and check switch status. */
|
||||
if (timing)
|
||||
mmc_set_timing(host, timing);
|
||||
|
||||
/*If SPI or used HW busy detection above, then we don't need to poll. */
|
||||
if (((host->caps & MMC_CAP_WAIT_WHILE_BUSY) && use_r1b_resp) ||
|
||||
mmc_host_is_spi(host)) {
|
||||
if (send_status)
|
||||
err = mmc_switch_status(card);
|
||||
mmc_host_is_spi(host))
|
||||
goto out_tim;
|
||||
}
|
||||
|
||||
/* Let's try to poll to find out when the command is completed. */
|
||||
err = mmc_poll_for_busy(card, timeout_ms, send_status, retry_crc_err);
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
out_tim:
|
||||
if (err && timing)
|
||||
mmc_set_timing(host, old_timing);
|
||||
/* Switch to new timing before check switch status. */
|
||||
if (timing)
|
||||
mmc_set_timing(host, timing);
|
||||
|
||||
if (send_status) {
|
||||
err = mmc_switch_status(card);
|
||||
if (err && timing)
|
||||
mmc_set_timing(host, old_timing);
|
||||
}
|
||||
out:
|
||||
mmc_retune_release(host);
|
||||
|
||||
|
|
|
@ -578,13 +578,15 @@ static irqreturn_t meson_mmc_irq(int irq, void *dev_id)
|
|||
{
|
||||
struct meson_host *host = dev_id;
|
||||
struct mmc_request *mrq;
|
||||
struct mmc_command *cmd = host->cmd;
|
||||
struct mmc_command *cmd;
|
||||
u32 irq_en, status, raw_status;
|
||||
irqreturn_t ret = IRQ_HANDLED;
|
||||
|
||||
if (WARN_ON(!host))
|
||||
return IRQ_NONE;
|
||||
|
||||
cmd = host->cmd;
|
||||
|
||||
mrq = host->mrq;
|
||||
|
||||
if (WARN_ON(!mrq))
|
||||
|
@ -670,10 +672,10 @@ static irqreturn_t meson_mmc_irq_thread(int irq, void *dev_id)
|
|||
int ret = IRQ_HANDLED;
|
||||
|
||||
if (WARN_ON(!mrq))
|
||||
ret = IRQ_NONE;
|
||||
return IRQ_NONE;
|
||||
|
||||
if (WARN_ON(!cmd))
|
||||
ret = IRQ_NONE;
|
||||
return IRQ_NONE;
|
||||
|
||||
data = cmd->data;
|
||||
if (data) {
|
||||
|
|
|
@ -309,6 +309,9 @@ static void mxs_mmc_ac(struct mxs_mmc_host *host)
|
|||
cmd0 = BF_SSP(cmd->opcode, CMD0_CMD);
|
||||
cmd1 = cmd->arg;
|
||||
|
||||
if (cmd->opcode == MMC_STOP_TRANSMISSION)
|
||||
cmd0 |= BM_SSP_CMD0_APPEND_8CYC;
|
||||
|
||||
if (host->sdio_irq_en) {
|
||||
ctrl0 |= BM_SSP_CTRL0_SDIO_IRQ_CHECK;
|
||||
cmd0 |= BM_SSP_CMD0_CONT_CLKING_EN | BM_SSP_CMD0_SLOW_CLKING_EN;
|
||||
|
@ -417,8 +420,7 @@ static void mxs_mmc_adtc(struct mxs_mmc_host *host)
|
|||
ssp->base + HW_SSP_BLOCK_SIZE);
|
||||
}
|
||||
|
||||
if ((cmd->opcode == MMC_STOP_TRANSMISSION) ||
|
||||
(cmd->opcode == SD_IO_RW_EXTENDED))
|
||||
if (cmd->opcode == SD_IO_RW_EXTENDED)
|
||||
cmd0 |= BM_SSP_CMD0_APPEND_8CYC;
|
||||
|
||||
cmd1 = cmd->arg;
|
||||
|
|
|
@ -395,7 +395,8 @@ static int sdhci_acpi_probe(struct platform_device *pdev)
|
|||
/* Power on the SDHCI controller and its children */
|
||||
acpi_device_fix_up_power(device);
|
||||
list_for_each_entry(child, &device->children, node)
|
||||
acpi_device_fix_up_power(child);
|
||||
if (child->status.present && child->status.enabled)
|
||||
acpi_device_fix_up_power(child);
|
||||
|
||||
if (acpi_bus_get_status(device) || !device->status.present)
|
||||
return -ENODEV;
|
||||
|
|
|
@ -426,6 +426,7 @@ config MTD_NAND_ORION
|
|||
|
||||
config MTD_NAND_OXNAS
|
||||
tristate "NAND Flash support for Oxford Semiconductor SoC"
|
||||
depends on HAS_IOMEM
|
||||
help
|
||||
This enables the NAND flash controller on Oxford Semiconductor SoCs.
|
||||
|
||||
|
@ -540,7 +541,7 @@ config MTD_NAND_FSMC
|
|||
Flexible Static Memory Controller (FSMC)
|
||||
|
||||
config MTD_NAND_XWAY
|
||||
tristate "Support for NAND on Lantiq XWAY SoC"
|
||||
bool "Support for NAND on Lantiq XWAY SoC"
|
||||
depends on LANTIQ && SOC_TYPE_XWAY
|
||||
help
|
||||
Enables support for NAND Flash chips on Lantiq XWAY SoCs. NAND is attached
|
||||
|
|
|
@ -775,7 +775,7 @@ static int lpc32xx_nand_probe(struct platform_device *pdev)
|
|||
init_completion(&host->comp_controller);
|
||||
|
||||
host->irq = platform_get_irq(pdev, 0);
|
||||
if ((host->irq < 0) || (host->irq >= NR_IRQS)) {
|
||||
if (host->irq < 0) {
|
||||
dev_err(&pdev->dev, "failed to get platform irq\n");
|
||||
res = -EINVAL;
|
||||
goto err_exit3;
|
||||
|
|
|
@ -632,11 +632,13 @@ static int tango_nand_probe(struct platform_device *pdev)
|
|||
if (IS_ERR(nfc->pbus_base))
|
||||
return PTR_ERR(nfc->pbus_base);
|
||||
|
||||
writel_relaxed(MODE_RAW, nfc->pbus_base + PBUS_PAD_MODE);
|
||||
|
||||
clk = clk_get(&pdev->dev, NULL);
|
||||
if (IS_ERR(clk))
|
||||
return PTR_ERR(clk);
|
||||
|
||||
nfc->chan = dma_request_chan(&pdev->dev, "nfc_sbox");
|
||||
nfc->chan = dma_request_chan(&pdev->dev, "rxtx");
|
||||
if (IS_ERR(nfc->chan))
|
||||
return PTR_ERR(nfc->chan);
|
||||
|
||||
|
|
|
@ -232,7 +232,6 @@ static const struct of_device_id xway_nand_match[] = {
|
|||
{ .compatible = "lantiq,nand-xway" },
|
||||
{},
|
||||
};
|
||||
MODULE_DEVICE_TABLE(of, xway_nand_match);
|
||||
|
||||
static struct platform_driver xway_nand_driver = {
|
||||
.probe = xway_nand_probe,
|
||||
|
@ -243,6 +242,4 @@ static struct platform_driver xway_nand_driver = {
|
|||
},
|
||||
};
|
||||
|
||||
module_platform_driver(xway_nand_driver);
|
||||
|
||||
MODULE_LICENSE("GPL");
|
||||
builtin_platform_driver(xway_nand_driver);
|
||||
|
|
|
@ -710,11 +710,8 @@ static unsigned int __bcm_sysport_tx_reclaim(struct bcm_sysport_priv *priv,
|
|||
unsigned int c_index, last_c_index, last_tx_cn, num_tx_cbs;
|
||||
unsigned int pkts_compl = 0, bytes_compl = 0;
|
||||
struct bcm_sysport_cb *cb;
|
||||
struct netdev_queue *txq;
|
||||
u32 hw_ind;
|
||||
|
||||
txq = netdev_get_tx_queue(ndev, ring->index);
|
||||
|
||||
/* Compute how many descriptors have been processed since last call */
|
||||
hw_ind = tdma_readl(priv, TDMA_DESC_RING_PROD_CONS_INDEX(ring->index));
|
||||
c_index = (hw_ind >> RING_CONS_INDEX_SHIFT) & RING_CONS_INDEX_MASK;
|
||||
|
@ -745,9 +742,6 @@ static unsigned int __bcm_sysport_tx_reclaim(struct bcm_sysport_priv *priv,
|
|||
|
||||
ring->c_index = c_index;
|
||||
|
||||
if (netif_tx_queue_stopped(txq) && pkts_compl)
|
||||
netif_tx_wake_queue(txq);
|
||||
|
||||
netif_dbg(priv, tx_done, ndev,
|
||||
"ring=%d c_index=%d pkts_compl=%d, bytes_compl=%d\n",
|
||||
ring->index, ring->c_index, pkts_compl, bytes_compl);
|
||||
|
@ -759,16 +753,33 @@ static unsigned int __bcm_sysport_tx_reclaim(struct bcm_sysport_priv *priv,
|
|||
static unsigned int bcm_sysport_tx_reclaim(struct bcm_sysport_priv *priv,
|
||||
struct bcm_sysport_tx_ring *ring)
|
||||
{
|
||||
struct netdev_queue *txq;
|
||||
unsigned int released;
|
||||
unsigned long flags;
|
||||
|
||||
txq = netdev_get_tx_queue(priv->netdev, ring->index);
|
||||
|
||||
spin_lock_irqsave(&ring->lock, flags);
|
||||
released = __bcm_sysport_tx_reclaim(priv, ring);
|
||||
if (released)
|
||||
netif_tx_wake_queue(txq);
|
||||
|
||||
spin_unlock_irqrestore(&ring->lock, flags);
|
||||
|
||||
return released;
|
||||
}
|
||||
|
||||
/* Locked version of the per-ring TX reclaim, but does not wake the queue */
|
||||
static void bcm_sysport_tx_clean(struct bcm_sysport_priv *priv,
|
||||
struct bcm_sysport_tx_ring *ring)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&ring->lock, flags);
|
||||
__bcm_sysport_tx_reclaim(priv, ring);
|
||||
spin_unlock_irqrestore(&ring->lock, flags);
|
||||
}
|
||||
|
||||
static int bcm_sysport_tx_poll(struct napi_struct *napi, int budget)
|
||||
{
|
||||
struct bcm_sysport_tx_ring *ring =
|
||||
|
@ -1252,7 +1263,7 @@ static void bcm_sysport_fini_tx_ring(struct bcm_sysport_priv *priv,
|
|||
napi_disable(&ring->napi);
|
||||
netif_napi_del(&ring->napi);
|
||||
|
||||
bcm_sysport_tx_reclaim(priv, ring);
|
||||
bcm_sysport_tx_clean(priv, ring);
|
||||
|
||||
kfree(ring->cbs);
|
||||
ring->cbs = NULL;
|
||||
|
|
|
@ -47,8 +47,9 @@ struct lmac {
|
|||
struct bgx {
|
||||
u8 bgx_id;
|
||||
struct lmac lmac[MAX_LMAC_PER_BGX];
|
||||
int lmac_count;
|
||||
u8 lmac_count;
|
||||
u8 max_lmac;
|
||||
u8 acpi_lmac_idx;
|
||||
void __iomem *reg_base;
|
||||
struct pci_dev *pdev;
|
||||
bool is_dlm;
|
||||
|
@ -1143,13 +1144,13 @@ static acpi_status bgx_acpi_register_phy(acpi_handle handle,
|
|||
if (acpi_bus_get_device(handle, &adev))
|
||||
goto out;
|
||||
|
||||
acpi_get_mac_address(dev, adev, bgx->lmac[bgx->lmac_count].mac);
|
||||
acpi_get_mac_address(dev, adev, bgx->lmac[bgx->acpi_lmac_idx].mac);
|
||||
|
||||
SET_NETDEV_DEV(&bgx->lmac[bgx->lmac_count].netdev, dev);
|
||||
SET_NETDEV_DEV(&bgx->lmac[bgx->acpi_lmac_idx].netdev, dev);
|
||||
|
||||
bgx->lmac[bgx->lmac_count].lmacid = bgx->lmac_count;
|
||||
bgx->lmac[bgx->acpi_lmac_idx].lmacid = bgx->acpi_lmac_idx;
|
||||
bgx->acpi_lmac_idx++; /* move to next LMAC */
|
||||
out:
|
||||
bgx->lmac_count++;
|
||||
return AE_OK;
|
||||
}
|
||||
|
||||
|
|
|
@ -1118,7 +1118,7 @@ int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr,
|
|||
err:
|
||||
mutex_unlock(&adapter->mcc_lock);
|
||||
|
||||
if (status == MCC_STATUS_UNAUTHORIZED_REQUEST)
|
||||
if (base_status(status) == MCC_STATUS_UNAUTHORIZED_REQUEST)
|
||||
status = -EPERM;
|
||||
|
||||
return status;
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue