mirror of https://gitee.com/openkylin/linux.git
Merge branch 'sched/urgent' into sched/core, to pick up fixes before applying new changes
Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
commit
64b7aad579
|
@ -9,7 +9,8 @@ have dual GMAC each represented by a child node..
|
||||||
Required properties:
|
Required properties:
|
||||||
- compatible: Should be "mediatek,mt7623-eth"
|
- compatible: Should be "mediatek,mt7623-eth"
|
||||||
- reg: Address and length of the register set for the device
|
- reg: Address and length of the register set for the device
|
||||||
- interrupts: Should contain the frame engines interrupt
|
- interrupts: Should contain the three frame engines interrupts in numeric
|
||||||
|
order. These are fe_int0, fe_int1 and fe_int2.
|
||||||
- clocks: the clock used by the core
|
- clocks: the clock used by the core
|
||||||
- clock-names: the names of the clock listed in the clocks property. These are
|
- clock-names: the names of the clock listed in the clocks property. These are
|
||||||
"ethif", "esw", "gp2", "gp1"
|
"ethif", "esw", "gp2", "gp1"
|
||||||
|
@ -42,7 +43,9 @@ eth: ethernet@1b100000 {
|
||||||
<ðsys CLK_ETHSYS_GP2>,
|
<ðsys CLK_ETHSYS_GP2>,
|
||||||
<ðsys CLK_ETHSYS_GP1>;
|
<ðsys CLK_ETHSYS_GP1>;
|
||||||
clock-names = "ethif", "esw", "gp2", "gp1";
|
clock-names = "ethif", "esw", "gp2", "gp1";
|
||||||
interrupts = <GIC_SPI 200 IRQ_TYPE_LEVEL_LOW>;
|
interrupts = <GIC_SPI 200 IRQ_TYPE_LEVEL_LOW
|
||||||
|
GIC_SPI 199 IRQ_TYPE_LEVEL_LOW
|
||||||
|
GIC_SPI 198 IRQ_TYPE_LEVEL_LOW>;
|
||||||
power-domains = <&scpsys MT2701_POWER_DOMAIN_ETH>;
|
power-domains = <&scpsys MT2701_POWER_DOMAIN_ETH>;
|
||||||
resets = <ðsys MT2701_ETHSYS_ETH_RST>;
|
resets = <ðsys MT2701_ETHSYS_ETH_RST>;
|
||||||
reset-names = "eth";
|
reset-names = "eth";
|
||||||
|
|
|
@ -8,15 +8,19 @@ Required properties:
|
||||||
of memory mapped region.
|
of memory mapped region.
|
||||||
- clock-names: from common clock binding:
|
- clock-names: from common clock binding:
|
||||||
Required elements: "24m"
|
Required elements: "24m"
|
||||||
- rockchip,grf: phandle to the syscon managing the "general register files"
|
|
||||||
- #phy-cells : from the generic PHY bindings, must be 0;
|
- #phy-cells : from the generic PHY bindings, must be 0;
|
||||||
|
|
||||||
Example:
|
Example:
|
||||||
|
|
||||||
edp_phy: edp-phy {
|
grf: syscon@ff770000 {
|
||||||
compatible = "rockchip,rk3288-dp-phy";
|
compatible = "rockchip,rk3288-grf", "syscon", "simple-mfd";
|
||||||
rockchip,grf = <&grf>;
|
|
||||||
clocks = <&cru SCLK_EDP_24M>;
|
...
|
||||||
clock-names = "24m";
|
|
||||||
#phy-cells = <0>;
|
edp_phy: edp-phy {
|
||||||
|
compatible = "rockchip,rk3288-dp-phy";
|
||||||
|
clocks = <&cru SCLK_EDP_24M>;
|
||||||
|
clock-names = "24m";
|
||||||
|
#phy-cells = <0>;
|
||||||
|
};
|
||||||
};
|
};
|
||||||
|
|
|
@ -3,17 +3,23 @@ Rockchip EMMC PHY
|
||||||
|
|
||||||
Required properties:
|
Required properties:
|
||||||
- compatible: rockchip,rk3399-emmc-phy
|
- compatible: rockchip,rk3399-emmc-phy
|
||||||
- rockchip,grf : phandle to the syscon managing the "general
|
|
||||||
register files"
|
|
||||||
- #phy-cells: must be 0
|
- #phy-cells: must be 0
|
||||||
- reg: PHY configure reg address offset in "general
|
- reg: PHY register address offset and length in "general
|
||||||
register files"
|
register files"
|
||||||
|
|
||||||
Example:
|
Example:
|
||||||
|
|
||||||
emmcphy: phy {
|
|
||||||
compatible = "rockchip,rk3399-emmc-phy";
|
grf: syscon@ff770000 {
|
||||||
rockchip,grf = <&grf>;
|
compatible = "rockchip,rk3399-grf", "syscon", "simple-mfd";
|
||||||
reg = <0xf780>;
|
#address-cells = <1>;
|
||||||
#phy-cells = <0>;
|
#size-cells = <1>;
|
||||||
|
|
||||||
|
...
|
||||||
|
|
||||||
|
emmcphy: phy@f780 {
|
||||||
|
compatible = "rockchip,rk3399-emmc-phy";
|
||||||
|
reg = <0xf780 0x20>;
|
||||||
|
#phy-cells = <0>;
|
||||||
|
};
|
||||||
};
|
};
|
||||||
|
|
|
@ -15,9 +15,10 @@ Required properties:
|
||||||
is the rtc tick interrupt. The number of cells representing a interrupt
|
is the rtc tick interrupt. The number of cells representing a interrupt
|
||||||
depends on the parent interrupt controller.
|
depends on the parent interrupt controller.
|
||||||
- clocks: Must contain a list of phandle and clock specifier for the rtc
|
- clocks: Must contain a list of phandle and clock specifier for the rtc
|
||||||
and source clocks.
|
clock and in the case of a s3c6410 compatible controller, also
|
||||||
- clock-names: Must contain "rtc" and "rtc_src" entries sorted in the
|
a source clock.
|
||||||
same order as the clocks property.
|
- clock-names: Must contain "rtc" and for a s3c6410 compatible controller,
|
||||||
|
a "rtc_src" sorted in the same order as the clocks property.
|
||||||
|
|
||||||
Example:
|
Example:
|
||||||
|
|
||||||
|
|
|
@ -173,6 +173,10 @@ A few EV_ABS codes have special meanings:
|
||||||
proximity of the device and while the value of the BTN_TOUCH code is 0. If
|
proximity of the device and while the value of the BTN_TOUCH code is 0. If
|
||||||
the input device may be used freely in three dimensions, consider ABS_Z
|
the input device may be used freely in three dimensions, consider ABS_Z
|
||||||
instead.
|
instead.
|
||||||
|
- BTN_TOOL_<name> should be set to 1 when the tool comes into detectable
|
||||||
|
proximity and set to 0 when the tool leaves detectable proximity.
|
||||||
|
BTN_TOOL_<name> signals the type of tool that is currently detected by the
|
||||||
|
hardware and is otherwise independent of ABS_DISTANCE and/or BTN_TOUCH.
|
||||||
|
|
||||||
* ABS_MT_<name>:
|
* ABS_MT_<name>:
|
||||||
- Used to describe multitouch input events. Please see
|
- Used to describe multitouch input events. Please see
|
||||||
|
|
|
@ -19,7 +19,7 @@ ffffff0000000000 - ffffff7fffffffff (=39 bits) %esp fixup stacks
|
||||||
ffffffef00000000 - ffffffff00000000 (=64 GB) EFI region mapping space
|
ffffffef00000000 - ffffffff00000000 (=64 GB) EFI region mapping space
|
||||||
... unused hole ...
|
... unused hole ...
|
||||||
ffffffff80000000 - ffffffffa0000000 (=512 MB) kernel text mapping, from phys 0
|
ffffffff80000000 - ffffffffa0000000 (=512 MB) kernel text mapping, from phys 0
|
||||||
ffffffffa0000000 - ffffffffff5fffff (=1525 MB) module mapping space
|
ffffffffa0000000 - ffffffffff5fffff (=1526 MB) module mapping space
|
||||||
ffffffffff600000 - ffffffffffdfffff (=8 MB) vsyscalls
|
ffffffffff600000 - ffffffffffdfffff (=8 MB) vsyscalls
|
||||||
ffffffffffe00000 - ffffffffffffffff (=2 MB) unused hole
|
ffffffffffe00000 - ffffffffffffffff (=2 MB) unused hole
|
||||||
|
|
||||||
|
@ -31,8 +31,8 @@ vmalloc space is lazily synchronized into the different PML4 pages of
|
||||||
the processes using the page fault handler, with init_level4_pgt as
|
the processes using the page fault handler, with init_level4_pgt as
|
||||||
reference.
|
reference.
|
||||||
|
|
||||||
Current X86-64 implementations only support 40 bits of address space,
|
Current X86-64 implementations support up to 46 bits of address space (64 TB),
|
||||||
but we support up to 46 bits. This expands into MBZ space in the page tables.
|
which is our current limit. This expands into MBZ space in the page tables.
|
||||||
|
|
||||||
We map EFI runtime services in the 'efi_pgd' PGD in a 64Gb large virtual
|
We map EFI runtime services in the 'efi_pgd' PGD in a 64Gb large virtual
|
||||||
memory window (this size is arbitrary, it can be raised later if needed).
|
memory window (this size is arbitrary, it can be raised later if needed).
|
||||||
|
|
5
Makefile
5
Makefile
|
@ -1,7 +1,7 @@
|
||||||
VERSION = 4
|
VERSION = 4
|
||||||
PATCHLEVEL = 6
|
PATCHLEVEL = 6
|
||||||
SUBLEVEL = 0
|
SUBLEVEL = 0
|
||||||
EXTRAVERSION = -rc4
|
EXTRAVERSION = -rc5
|
||||||
NAME = Blurry Fish Butt
|
NAME = Blurry Fish Butt
|
||||||
|
|
||||||
# *DOCUMENTATION*
|
# *DOCUMENTATION*
|
||||||
|
@ -1008,7 +1008,8 @@ prepare0: archprepare FORCE
|
||||||
prepare: prepare0 prepare-objtool
|
prepare: prepare0 prepare-objtool
|
||||||
|
|
||||||
ifdef CONFIG_STACK_VALIDATION
|
ifdef CONFIG_STACK_VALIDATION
|
||||||
has_libelf := $(shell echo "int main() {}" | $(HOSTCC) -xc -o /dev/null -lelf - &> /dev/null && echo 1 || echo 0)
|
has_libelf := $(call try-run,\
|
||||||
|
echo "int main() {}" | $(HOSTCC) -xc -o /dev/null -lelf -,1,0)
|
||||||
ifeq ($(has_libelf),1)
|
ifeq ($(has_libelf),1)
|
||||||
objtool_target := tools/objtool FORCE
|
objtool_target := tools/objtool FORCE
|
||||||
else
|
else
|
||||||
|
|
|
@ -276,7 +276,7 @@ static inline int __attribute_const__ cpuid_feature_extract_field(u32 features,
|
||||||
int feature = (features >> field) & 15;
|
int feature = (features >> field) & 15;
|
||||||
|
|
||||||
/* feature registers are signed values */
|
/* feature registers are signed values */
|
||||||
if (feature > 8)
|
if (feature > 7)
|
||||||
feature -= 16;
|
feature -= 16;
|
||||||
|
|
||||||
return feature;
|
return feature;
|
||||||
|
|
|
@ -512,7 +512,7 @@ static void __init elf_hwcap_fixup(void)
|
||||||
*/
|
*/
|
||||||
if (cpuid_feature_extract(CPUID_EXT_ISAR3, 12) > 1 ||
|
if (cpuid_feature_extract(CPUID_EXT_ISAR3, 12) > 1 ||
|
||||||
(cpuid_feature_extract(CPUID_EXT_ISAR3, 12) == 1 &&
|
(cpuid_feature_extract(CPUID_EXT_ISAR3, 12) == 1 &&
|
||||||
cpuid_feature_extract(CPUID_EXT_ISAR3, 20) >= 3))
|
cpuid_feature_extract(CPUID_EXT_ISAR4, 20) >= 3))
|
||||||
elf_hwcap &= ~HWCAP_SWP;
|
elf_hwcap &= ~HWCAP_SWP;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -762,7 +762,8 @@ static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
|
||||||
if (!mask)
|
if (!mask)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
buf = kzalloc(sizeof(*buf), gfp);
|
buf = kzalloc(sizeof(*buf),
|
||||||
|
gfp & ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM));
|
||||||
if (!buf)
|
if (!buf)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
|
|
|
@ -588,6 +588,15 @@ set_hcr:
|
||||||
msr vpidr_el2, x0
|
msr vpidr_el2, x0
|
||||||
msr vmpidr_el2, x1
|
msr vmpidr_el2, x1
|
||||||
|
|
||||||
|
/*
|
||||||
|
* When VHE is not in use, early init of EL2 and EL1 needs to be
|
||||||
|
* done here.
|
||||||
|
* When VHE _is_ in use, EL1 will not be used in the host and
|
||||||
|
* requires no configuration, and all non-hyp-specific EL2 setup
|
||||||
|
* will be done via the _EL1 system register aliases in __cpu_setup.
|
||||||
|
*/
|
||||||
|
cbnz x2, 1f
|
||||||
|
|
||||||
/* sctlr_el1 */
|
/* sctlr_el1 */
|
||||||
mov x0, #0x0800 // Set/clear RES{1,0} bits
|
mov x0, #0x0800 // Set/clear RES{1,0} bits
|
||||||
CPU_BE( movk x0, #0x33d0, lsl #16 ) // Set EE and E0E on BE systems
|
CPU_BE( movk x0, #0x33d0, lsl #16 ) // Set EE and E0E on BE systems
|
||||||
|
@ -597,6 +606,7 @@ CPU_LE( movk x0, #0x30d0, lsl #16 ) // Clear EE and E0E on LE systems
|
||||||
/* Coprocessor traps. */
|
/* Coprocessor traps. */
|
||||||
mov x0, #0x33ff
|
mov x0, #0x33ff
|
||||||
msr cptr_el2, x0 // Disable copro. traps to EL2
|
msr cptr_el2, x0 // Disable copro. traps to EL2
|
||||||
|
1:
|
||||||
|
|
||||||
#ifdef CONFIG_COMPAT
|
#ifdef CONFIG_COMPAT
|
||||||
msr hstr_el2, xzr // Disable CP15 traps to EL2
|
msr hstr_el2, xzr // Disable CP15 traps to EL2
|
||||||
|
@ -734,7 +744,8 @@ ENDPROC(__secondary_switched)
|
||||||
|
|
||||||
.macro update_early_cpu_boot_status status, tmp1, tmp2
|
.macro update_early_cpu_boot_status status, tmp1, tmp2
|
||||||
mov \tmp2, #\status
|
mov \tmp2, #\status
|
||||||
str_l \tmp2, __early_cpu_boot_status, \tmp1
|
adr_l \tmp1, __early_cpu_boot_status
|
||||||
|
str \tmp2, [\tmp1]
|
||||||
dmb sy
|
dmb sy
|
||||||
dc ivac, \tmp1 // Invalidate potentially stale cache line
|
dc ivac, \tmp1 // Invalidate potentially stale cache line
|
||||||
.endm
|
.endm
|
||||||
|
|
|
@ -52,6 +52,7 @@ static void write_pen_release(u64 val)
|
||||||
static int smp_spin_table_cpu_init(unsigned int cpu)
|
static int smp_spin_table_cpu_init(unsigned int cpu)
|
||||||
{
|
{
|
||||||
struct device_node *dn;
|
struct device_node *dn;
|
||||||
|
int ret;
|
||||||
|
|
||||||
dn = of_get_cpu_node(cpu, NULL);
|
dn = of_get_cpu_node(cpu, NULL);
|
||||||
if (!dn)
|
if (!dn)
|
||||||
|
@ -60,15 +61,15 @@ static int smp_spin_table_cpu_init(unsigned int cpu)
|
||||||
/*
|
/*
|
||||||
* Determine the address from which the CPU is polling.
|
* Determine the address from which the CPU is polling.
|
||||||
*/
|
*/
|
||||||
if (of_property_read_u64(dn, "cpu-release-addr",
|
ret = of_property_read_u64(dn, "cpu-release-addr",
|
||||||
&cpu_release_addr[cpu])) {
|
&cpu_release_addr[cpu]);
|
||||||
|
if (ret)
|
||||||
pr_err("CPU %d: missing or invalid cpu-release-addr property\n",
|
pr_err("CPU %d: missing or invalid cpu-release-addr property\n",
|
||||||
cpu);
|
cpu);
|
||||||
|
|
||||||
return -1;
|
of_node_put(dn);
|
||||||
}
|
|
||||||
|
|
||||||
return 0;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int smp_spin_table_cpu_prepare(unsigned int cpu)
|
static int smp_spin_table_cpu_prepare(unsigned int cpu)
|
||||||
|
|
|
@ -31,6 +31,7 @@
|
||||||
#define PPC_FEATURE_PSERIES_PERFMON_COMPAT \
|
#define PPC_FEATURE_PSERIES_PERFMON_COMPAT \
|
||||||
0x00000040
|
0x00000040
|
||||||
|
|
||||||
|
/* Reserved - do not use 0x00000004 */
|
||||||
#define PPC_FEATURE_TRUE_LE 0x00000002
|
#define PPC_FEATURE_TRUE_LE 0x00000002
|
||||||
#define PPC_FEATURE_PPC_LE 0x00000001
|
#define PPC_FEATURE_PPC_LE 0x00000001
|
||||||
|
|
||||||
|
|
|
@ -148,23 +148,25 @@ static struct ibm_pa_feature {
|
||||||
unsigned long cpu_features; /* CPU_FTR_xxx bit */
|
unsigned long cpu_features; /* CPU_FTR_xxx bit */
|
||||||
unsigned long mmu_features; /* MMU_FTR_xxx bit */
|
unsigned long mmu_features; /* MMU_FTR_xxx bit */
|
||||||
unsigned int cpu_user_ftrs; /* PPC_FEATURE_xxx bit */
|
unsigned int cpu_user_ftrs; /* PPC_FEATURE_xxx bit */
|
||||||
|
unsigned int cpu_user_ftrs2; /* PPC_FEATURE2_xxx bit */
|
||||||
unsigned char pabyte; /* byte number in ibm,pa-features */
|
unsigned char pabyte; /* byte number in ibm,pa-features */
|
||||||
unsigned char pabit; /* bit number (big-endian) */
|
unsigned char pabit; /* bit number (big-endian) */
|
||||||
unsigned char invert; /* if 1, pa bit set => clear feature */
|
unsigned char invert; /* if 1, pa bit set => clear feature */
|
||||||
} ibm_pa_features[] __initdata = {
|
} ibm_pa_features[] __initdata = {
|
||||||
{0, 0, PPC_FEATURE_HAS_MMU, 0, 0, 0},
|
{0, 0, PPC_FEATURE_HAS_MMU, 0, 0, 0, 0},
|
||||||
{0, 0, PPC_FEATURE_HAS_FPU, 0, 1, 0},
|
{0, 0, PPC_FEATURE_HAS_FPU, 0, 0, 1, 0},
|
||||||
{CPU_FTR_CTRL, 0, 0, 0, 3, 0},
|
{CPU_FTR_CTRL, 0, 0, 0, 0, 3, 0},
|
||||||
{CPU_FTR_NOEXECUTE, 0, 0, 0, 6, 0},
|
{CPU_FTR_NOEXECUTE, 0, 0, 0, 0, 6, 0},
|
||||||
{CPU_FTR_NODSISRALIGN, 0, 0, 1, 1, 1},
|
{CPU_FTR_NODSISRALIGN, 0, 0, 0, 1, 1, 1},
|
||||||
{0, MMU_FTR_CI_LARGE_PAGE, 0, 1, 2, 0},
|
{0, MMU_FTR_CI_LARGE_PAGE, 0, 0, 1, 2, 0},
|
||||||
{CPU_FTR_REAL_LE, PPC_FEATURE_TRUE_LE, 5, 0, 0},
|
{CPU_FTR_REAL_LE, 0, PPC_FEATURE_TRUE_LE, 0, 5, 0, 0},
|
||||||
/*
|
/*
|
||||||
* If the kernel doesn't support TM (ie. CONFIG_PPC_TRANSACTIONAL_MEM=n),
|
* If the kernel doesn't support TM (ie CONFIG_PPC_TRANSACTIONAL_MEM=n),
|
||||||
* we don't want to turn on CPU_FTR_TM here, so we use CPU_FTR_TM_COMP
|
* we don't want to turn on TM here, so we use the *_COMP versions
|
||||||
* which is 0 if the kernel doesn't support TM.
|
* which are 0 if the kernel doesn't support TM.
|
||||||
*/
|
*/
|
||||||
{CPU_FTR_TM_COMP, 0, 0, 22, 0, 0},
|
{CPU_FTR_TM_COMP, 0, 0,
|
||||||
|
PPC_FEATURE2_HTM_COMP|PPC_FEATURE2_HTM_NOSC_COMP, 22, 0, 0},
|
||||||
};
|
};
|
||||||
|
|
||||||
static void __init scan_features(unsigned long node, const unsigned char *ftrs,
|
static void __init scan_features(unsigned long node, const unsigned char *ftrs,
|
||||||
|
@ -195,10 +197,12 @@ static void __init scan_features(unsigned long node, const unsigned char *ftrs,
|
||||||
if (bit ^ fp->invert) {
|
if (bit ^ fp->invert) {
|
||||||
cur_cpu_spec->cpu_features |= fp->cpu_features;
|
cur_cpu_spec->cpu_features |= fp->cpu_features;
|
||||||
cur_cpu_spec->cpu_user_features |= fp->cpu_user_ftrs;
|
cur_cpu_spec->cpu_user_features |= fp->cpu_user_ftrs;
|
||||||
|
cur_cpu_spec->cpu_user_features2 |= fp->cpu_user_ftrs2;
|
||||||
cur_cpu_spec->mmu_features |= fp->mmu_features;
|
cur_cpu_spec->mmu_features |= fp->mmu_features;
|
||||||
} else {
|
} else {
|
||||||
cur_cpu_spec->cpu_features &= ~fp->cpu_features;
|
cur_cpu_spec->cpu_features &= ~fp->cpu_features;
|
||||||
cur_cpu_spec->cpu_user_features &= ~fp->cpu_user_ftrs;
|
cur_cpu_spec->cpu_user_features &= ~fp->cpu_user_ftrs;
|
||||||
|
cur_cpu_spec->cpu_user_features2 &= ~fp->cpu_user_ftrs2;
|
||||||
cur_cpu_spec->mmu_features &= ~fp->mmu_features;
|
cur_cpu_spec->mmu_features &= ~fp->mmu_features;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -4,6 +4,9 @@ config MMU
|
||||||
config ZONE_DMA
|
config ZONE_DMA
|
||||||
def_bool y
|
def_bool y
|
||||||
|
|
||||||
|
config CPU_BIG_ENDIAN
|
||||||
|
def_bool y
|
||||||
|
|
||||||
config LOCKDEP_SUPPORT
|
config LOCKDEP_SUPPORT
|
||||||
def_bool y
|
def_bool y
|
||||||
|
|
||||||
|
|
|
@ -44,7 +44,8 @@ struct zpci_fmb {
|
||||||
u64 rpcit_ops;
|
u64 rpcit_ops;
|
||||||
u64 dma_rbytes;
|
u64 dma_rbytes;
|
||||||
u64 dma_wbytes;
|
u64 dma_wbytes;
|
||||||
} __packed __aligned(64);
|
u64 pad[2];
|
||||||
|
} __packed __aligned(128);
|
||||||
|
|
||||||
enum zpci_state {
|
enum zpci_state {
|
||||||
ZPCI_FN_STATE_RESERVED,
|
ZPCI_FN_STATE_RESERVED,
|
||||||
|
|
|
@ -13,4 +13,6 @@
|
||||||
#define __NR_seccomp_exit_32 __NR_exit
|
#define __NR_seccomp_exit_32 __NR_exit
|
||||||
#define __NR_seccomp_sigreturn_32 __NR_sigreturn
|
#define __NR_seccomp_sigreturn_32 __NR_sigreturn
|
||||||
|
|
||||||
|
#include <asm-generic/seccomp.h>
|
||||||
|
|
||||||
#endif /* _ASM_S390_SECCOMP_H */
|
#endif /* _ASM_S390_SECCOMP_H */
|
||||||
|
|
|
@ -105,6 +105,7 @@ void arch_spin_lock_wait_flags(arch_spinlock_t *lp, unsigned long flags)
|
||||||
if (_raw_compare_and_swap(&lp->lock, 0, cpu))
|
if (_raw_compare_and_swap(&lp->lock, 0, cpu))
|
||||||
return;
|
return;
|
||||||
local_irq_restore(flags);
|
local_irq_restore(flags);
|
||||||
|
continue;
|
||||||
}
|
}
|
||||||
/* Check if the lock owner is running. */
|
/* Check if the lock owner is running. */
|
||||||
if (first_diag && cpu_is_preempted(~owner)) {
|
if (first_diag && cpu_is_preempted(~owner)) {
|
||||||
|
|
|
@ -453,10 +453,10 @@ static int sha_complete_job(struct mcryptd_hash_request_ctx *rctx,
|
||||||
|
|
||||||
req = cast_mcryptd_ctx_to_req(req_ctx);
|
req = cast_mcryptd_ctx_to_req(req_ctx);
|
||||||
if (irqs_disabled())
|
if (irqs_disabled())
|
||||||
rctx->complete(&req->base, ret);
|
req_ctx->complete(&req->base, ret);
|
||||||
else {
|
else {
|
||||||
local_bh_disable();
|
local_bh_disable();
|
||||||
rctx->complete(&req->base, ret);
|
req_ctx->complete(&req->base, ret);
|
||||||
local_bh_enable();
|
local_bh_enable();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -4,6 +4,7 @@
|
||||||
#include <asm/page.h>
|
#include <asm/page.h>
|
||||||
#include <asm-generic/hugetlb.h>
|
#include <asm-generic/hugetlb.h>
|
||||||
|
|
||||||
|
#define hugepages_supported() cpu_has_pse
|
||||||
|
|
||||||
static inline int is_hugepage_only_range(struct mm_struct *mm,
|
static inline int is_hugepage_only_range(struct mm_struct *mm,
|
||||||
unsigned long addr,
|
unsigned long addr,
|
||||||
|
|
|
@ -152,6 +152,11 @@ static struct clocksource hyperv_cs = {
|
||||||
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
|
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
static unsigned char hv_get_nmi_reason(void)
|
||||||
|
{
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
static void __init ms_hyperv_init_platform(void)
|
static void __init ms_hyperv_init_platform(void)
|
||||||
{
|
{
|
||||||
/*
|
/*
|
||||||
|
@ -191,6 +196,13 @@ static void __init ms_hyperv_init_platform(void)
|
||||||
machine_ops.crash_shutdown = hv_machine_crash_shutdown;
|
machine_ops.crash_shutdown = hv_machine_crash_shutdown;
|
||||||
#endif
|
#endif
|
||||||
mark_tsc_unstable("running on Hyper-V");
|
mark_tsc_unstable("running on Hyper-V");
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Generation 2 instances don't support reading the NMI status from
|
||||||
|
* 0x61 port.
|
||||||
|
*/
|
||||||
|
if (efi_enabled(EFI_BOOT))
|
||||||
|
x86_platform.get_nmi_reason = hv_get_nmi_reason;
|
||||||
}
|
}
|
||||||
|
|
||||||
const __refconst struct hypervisor_x86 x86_hyper_ms_hyperv = {
|
const __refconst struct hypervisor_x86 x86_hyper_ms_hyperv = {
|
||||||
|
|
|
@ -387,16 +387,16 @@ static int pkcs1pad_decrypt(struct akcipher_request *req)
|
||||||
req_ctx->child_req.src = req->src;
|
req_ctx->child_req.src = req->src;
|
||||||
req_ctx->child_req.src_len = req->src_len;
|
req_ctx->child_req.src_len = req->src_len;
|
||||||
req_ctx->child_req.dst = req_ctx->out_sg;
|
req_ctx->child_req.dst = req_ctx->out_sg;
|
||||||
req_ctx->child_req.dst_len = ctx->key_size - 1;
|
req_ctx->child_req.dst_len = ctx->key_size ;
|
||||||
|
|
||||||
req_ctx->out_buf = kmalloc(ctx->key_size - 1,
|
req_ctx->out_buf = kmalloc(ctx->key_size,
|
||||||
(req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
|
(req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
|
||||||
GFP_KERNEL : GFP_ATOMIC);
|
GFP_KERNEL : GFP_ATOMIC);
|
||||||
if (!req_ctx->out_buf)
|
if (!req_ctx->out_buf)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
pkcs1pad_sg_set_buf(req_ctx->out_sg, req_ctx->out_buf,
|
pkcs1pad_sg_set_buf(req_ctx->out_sg, req_ctx->out_buf,
|
||||||
ctx->key_size - 1, NULL);
|
ctx->key_size, NULL);
|
||||||
|
|
||||||
akcipher_request_set_tfm(&req_ctx->child_req, ctx->child);
|
akcipher_request_set_tfm(&req_ctx->child_req, ctx->child);
|
||||||
akcipher_request_set_callback(&req_ctx->child_req, req->base.flags,
|
akcipher_request_set_callback(&req_ctx->child_req, req->base.flags,
|
||||||
|
@ -595,16 +595,16 @@ static int pkcs1pad_verify(struct akcipher_request *req)
|
||||||
req_ctx->child_req.src = req->src;
|
req_ctx->child_req.src = req->src;
|
||||||
req_ctx->child_req.src_len = req->src_len;
|
req_ctx->child_req.src_len = req->src_len;
|
||||||
req_ctx->child_req.dst = req_ctx->out_sg;
|
req_ctx->child_req.dst = req_ctx->out_sg;
|
||||||
req_ctx->child_req.dst_len = ctx->key_size - 1;
|
req_ctx->child_req.dst_len = ctx->key_size;
|
||||||
|
|
||||||
req_ctx->out_buf = kmalloc(ctx->key_size - 1,
|
req_ctx->out_buf = kmalloc(ctx->key_size,
|
||||||
(req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
|
(req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
|
||||||
GFP_KERNEL : GFP_ATOMIC);
|
GFP_KERNEL : GFP_ATOMIC);
|
||||||
if (!req_ctx->out_buf)
|
if (!req_ctx->out_buf)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
pkcs1pad_sg_set_buf(req_ctx->out_sg, req_ctx->out_buf,
|
pkcs1pad_sg_set_buf(req_ctx->out_sg, req_ctx->out_buf,
|
||||||
ctx->key_size - 1, NULL);
|
ctx->key_size, NULL);
|
||||||
|
|
||||||
akcipher_request_set_tfm(&req_ctx->child_req, ctx->child);
|
akcipher_request_set_tfm(&req_ctx->child_req, ctx->child);
|
||||||
akcipher_request_set_callback(&req_ctx->child_req, req->base.flags,
|
akcipher_request_set_callback(&req_ctx->child_req, req->base.flags,
|
||||||
|
|
|
@ -136,7 +136,6 @@ static bool bcma_is_core_needed_early(u16 core_id)
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
#if defined(CONFIG_OF) && defined(CONFIG_OF_ADDRESS)
|
|
||||||
static struct device_node *bcma_of_find_child_device(struct platform_device *parent,
|
static struct device_node *bcma_of_find_child_device(struct platform_device *parent,
|
||||||
struct bcma_device *core)
|
struct bcma_device *core)
|
||||||
{
|
{
|
||||||
|
@ -184,7 +183,7 @@ static unsigned int bcma_of_get_irq(struct platform_device *parent,
|
||||||
struct of_phandle_args out_irq;
|
struct of_phandle_args out_irq;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
if (!parent || !parent->dev.of_node)
|
if (!IS_ENABLED(CONFIG_OF_IRQ) || !parent || !parent->dev.of_node)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
ret = bcma_of_irq_parse(parent, core, &out_irq, num);
|
ret = bcma_of_irq_parse(parent, core, &out_irq, num);
|
||||||
|
@ -202,23 +201,15 @@ static void bcma_of_fill_device(struct platform_device *parent,
|
||||||
{
|
{
|
||||||
struct device_node *node;
|
struct device_node *node;
|
||||||
|
|
||||||
|
if (!IS_ENABLED(CONFIG_OF_IRQ))
|
||||||
|
return;
|
||||||
|
|
||||||
node = bcma_of_find_child_device(parent, core);
|
node = bcma_of_find_child_device(parent, core);
|
||||||
if (node)
|
if (node)
|
||||||
core->dev.of_node = node;
|
core->dev.of_node = node;
|
||||||
|
|
||||||
core->irq = bcma_of_get_irq(parent, core, 0);
|
core->irq = bcma_of_get_irq(parent, core, 0);
|
||||||
}
|
}
|
||||||
#else
|
|
||||||
static void bcma_of_fill_device(struct platform_device *parent,
|
|
||||||
struct bcma_device *core)
|
|
||||||
{
|
|
||||||
}
|
|
||||||
static inline unsigned int bcma_of_get_irq(struct platform_device *parent,
|
|
||||||
struct bcma_device *core, int num)
|
|
||||||
{
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
#endif /* CONFIG_OF */
|
|
||||||
|
|
||||||
unsigned int bcma_core_irq(struct bcma_device *core, int num)
|
unsigned int bcma_core_irq(struct bcma_device *core, int num)
|
||||||
{
|
{
|
||||||
|
|
|
@ -42,7 +42,7 @@ static void __init tango_clocksource_init(struct device_node *np)
|
||||||
|
|
||||||
ret = clocksource_mmio_init(xtal_in_cnt, "tango-xtal", xtal_freq, 350,
|
ret = clocksource_mmio_init(xtal_in_cnt, "tango-xtal", xtal_freq, 350,
|
||||||
32, clocksource_mmio_readl_up);
|
32, clocksource_mmio_readl_up);
|
||||||
if (!ret) {
|
if (ret) {
|
||||||
pr_err("%s: registration failed\n", np->full_name);
|
pr_err("%s: registration failed\n", np->full_name);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
|
@ -1491,6 +1491,9 @@ static unsigned int cpufreq_update_current_freq(struct cpufreq_policy *policy)
|
||||||
{
|
{
|
||||||
unsigned int new_freq;
|
unsigned int new_freq;
|
||||||
|
|
||||||
|
if (cpufreq_suspended)
|
||||||
|
return 0;
|
||||||
|
|
||||||
new_freq = cpufreq_driver->get(policy->cpu);
|
new_freq = cpufreq_driver->get(policy->cpu);
|
||||||
if (!new_freq)
|
if (!new_freq)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
|
@ -1130,6 +1130,10 @@ static inline int32_t get_target_pstate_use_performance(struct cpudata *cpu)
|
||||||
sample_ratio = div_fp(int_tofp(pid_params.sample_rate_ns),
|
sample_ratio = div_fp(int_tofp(pid_params.sample_rate_ns),
|
||||||
int_tofp(duration_ns));
|
int_tofp(duration_ns));
|
||||||
core_busy = mul_fp(core_busy, sample_ratio);
|
core_busy = mul_fp(core_busy, sample_ratio);
|
||||||
|
} else {
|
||||||
|
sample_ratio = div_fp(100 * cpu->sample.mperf, cpu->sample.tsc);
|
||||||
|
if (sample_ratio < int_tofp(1))
|
||||||
|
core_busy = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
cpu->sample.busy_scaled = core_busy;
|
cpu->sample.busy_scaled = core_busy;
|
||||||
|
|
|
@ -225,6 +225,9 @@ static int ccp_aes_cmac_export(struct ahash_request *req, void *out)
|
||||||
struct ccp_aes_cmac_req_ctx *rctx = ahash_request_ctx(req);
|
struct ccp_aes_cmac_req_ctx *rctx = ahash_request_ctx(req);
|
||||||
struct ccp_aes_cmac_exp_ctx state;
|
struct ccp_aes_cmac_exp_ctx state;
|
||||||
|
|
||||||
|
/* Don't let anything leak to 'out' */
|
||||||
|
memset(&state, 0, sizeof(state));
|
||||||
|
|
||||||
state.null_msg = rctx->null_msg;
|
state.null_msg = rctx->null_msg;
|
||||||
memcpy(state.iv, rctx->iv, sizeof(state.iv));
|
memcpy(state.iv, rctx->iv, sizeof(state.iv));
|
||||||
state.buf_count = rctx->buf_count;
|
state.buf_count = rctx->buf_count;
|
||||||
|
|
|
@ -212,6 +212,9 @@ static int ccp_sha_export(struct ahash_request *req, void *out)
|
||||||
struct ccp_sha_req_ctx *rctx = ahash_request_ctx(req);
|
struct ccp_sha_req_ctx *rctx = ahash_request_ctx(req);
|
||||||
struct ccp_sha_exp_ctx state;
|
struct ccp_sha_exp_ctx state;
|
||||||
|
|
||||||
|
/* Don't let anything leak to 'out' */
|
||||||
|
memset(&state, 0, sizeof(state));
|
||||||
|
|
||||||
state.type = rctx->type;
|
state.type = rctx->type;
|
||||||
state.msg_bits = rctx->msg_bits;
|
state.msg_bits = rctx->msg_bits;
|
||||||
state.first = rctx->first;
|
state.first = rctx->first;
|
||||||
|
|
|
@ -362,6 +362,7 @@ struct sbridge_pvt {
|
||||||
|
|
||||||
/* Memory type detection */
|
/* Memory type detection */
|
||||||
bool is_mirrored, is_lockstep, is_close_pg;
|
bool is_mirrored, is_lockstep, is_close_pg;
|
||||||
|
bool is_chan_hash;
|
||||||
|
|
||||||
/* Fifo double buffers */
|
/* Fifo double buffers */
|
||||||
struct mce mce_entry[MCE_LOG_LEN];
|
struct mce mce_entry[MCE_LOG_LEN];
|
||||||
|
@ -1060,6 +1061,20 @@ static inline u8 sad_pkg_ha(u8 pkg)
|
||||||
return (pkg >> 2) & 0x1;
|
return (pkg >> 2) & 0x1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int haswell_chan_hash(int idx, u64 addr)
|
||||||
|
{
|
||||||
|
int i;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* XOR even bits from 12:26 to bit0 of idx,
|
||||||
|
* odd bits from 13:27 to bit1
|
||||||
|
*/
|
||||||
|
for (i = 12; i < 28; i += 2)
|
||||||
|
idx ^= (addr >> i) & 3;
|
||||||
|
|
||||||
|
return idx;
|
||||||
|
}
|
||||||
|
|
||||||
/****************************************************************************
|
/****************************************************************************
|
||||||
Memory check routines
|
Memory check routines
|
||||||
****************************************************************************/
|
****************************************************************************/
|
||||||
|
@ -1616,6 +1631,10 @@ static int get_dimm_config(struct mem_ctl_info *mci)
|
||||||
KNL_MAX_CHANNELS : NUM_CHANNELS;
|
KNL_MAX_CHANNELS : NUM_CHANNELS;
|
||||||
u64 knl_mc_sizes[KNL_MAX_CHANNELS];
|
u64 knl_mc_sizes[KNL_MAX_CHANNELS];
|
||||||
|
|
||||||
|
if (pvt->info.type == HASWELL || pvt->info.type == BROADWELL) {
|
||||||
|
pci_read_config_dword(pvt->pci_ha0, HASWELL_HASYSDEFEATURE2, ®);
|
||||||
|
pvt->is_chan_hash = GET_BITFIELD(reg, 21, 21);
|
||||||
|
}
|
||||||
if (pvt->info.type == HASWELL || pvt->info.type == BROADWELL ||
|
if (pvt->info.type == HASWELL || pvt->info.type == BROADWELL ||
|
||||||
pvt->info.type == KNIGHTS_LANDING)
|
pvt->info.type == KNIGHTS_LANDING)
|
||||||
pci_read_config_dword(pvt->pci_sad1, SAD_TARGET, ®);
|
pci_read_config_dword(pvt->pci_sad1, SAD_TARGET, ®);
|
||||||
|
@ -2118,12 +2137,15 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
|
||||||
}
|
}
|
||||||
|
|
||||||
ch_way = TAD_CH(reg) + 1;
|
ch_way = TAD_CH(reg) + 1;
|
||||||
sck_way = 1 << TAD_SOCK(reg);
|
sck_way = TAD_SOCK(reg);
|
||||||
|
|
||||||
if (ch_way == 3)
|
if (ch_way == 3)
|
||||||
idx = addr >> 6;
|
idx = addr >> 6;
|
||||||
else
|
else {
|
||||||
idx = (addr >> (6 + sck_way + shiftup)) & 0x3;
|
idx = (addr >> (6 + sck_way + shiftup)) & 0x3;
|
||||||
|
if (pvt->is_chan_hash)
|
||||||
|
idx = haswell_chan_hash(idx, addr);
|
||||||
|
}
|
||||||
idx = idx % ch_way;
|
idx = idx % ch_way;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -2157,7 +2179,7 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
|
||||||
switch(ch_way) {
|
switch(ch_way) {
|
||||||
case 2:
|
case 2:
|
||||||
case 4:
|
case 4:
|
||||||
sck_xch = 1 << sck_way * (ch_way >> 1);
|
sck_xch = (1 << sck_way) * (ch_way >> 1);
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
sprintf(msg, "Invalid mirror set. Can't decode addr");
|
sprintf(msg, "Invalid mirror set. Can't decode addr");
|
||||||
|
@ -2193,7 +2215,7 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
|
||||||
|
|
||||||
ch_addr = addr - offset;
|
ch_addr = addr - offset;
|
||||||
ch_addr >>= (6 + shiftup);
|
ch_addr >>= (6 + shiftup);
|
||||||
ch_addr /= ch_way * sck_way;
|
ch_addr /= sck_xch;
|
||||||
ch_addr <<= (6 + shiftup);
|
ch_addr <<= (6 + shiftup);
|
||||||
ch_addr |= addr & ((1 << (6 + shiftup)) - 1);
|
ch_addr |= addr & ((1 << (6 + shiftup)) - 1);
|
||||||
|
|
||||||
|
|
|
@ -1591,6 +1591,7 @@ struct amdgpu_uvd {
|
||||||
struct amdgpu_bo *vcpu_bo;
|
struct amdgpu_bo *vcpu_bo;
|
||||||
void *cpu_addr;
|
void *cpu_addr;
|
||||||
uint64_t gpu_addr;
|
uint64_t gpu_addr;
|
||||||
|
unsigned fw_version;
|
||||||
void *saved_bo;
|
void *saved_bo;
|
||||||
atomic_t handles[AMDGPU_MAX_UVD_HANDLES];
|
atomic_t handles[AMDGPU_MAX_UVD_HANDLES];
|
||||||
struct drm_file *filp[AMDGPU_MAX_UVD_HANDLES];
|
struct drm_file *filp[AMDGPU_MAX_UVD_HANDLES];
|
||||||
|
|
|
@ -425,6 +425,10 @@ static int acp_resume(void *handle)
|
||||||
struct acp_pm_domain *apd;
|
struct acp_pm_domain *apd;
|
||||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||||
|
|
||||||
|
/* return early if no ACP */
|
||||||
|
if (!adev->acp.acp_genpd)
|
||||||
|
return 0;
|
||||||
|
|
||||||
/* SMU block will power on ACP irrespective of ACP runtime status.
|
/* SMU block will power on ACP irrespective of ACP runtime status.
|
||||||
* Power off explicitly based on genpd ACP runtime status so that ACP
|
* Power off explicitly based on genpd ACP runtime status so that ACP
|
||||||
* hw and ACP-genpd status are in sync.
|
* hw and ACP-genpd status are in sync.
|
||||||
|
|
|
@ -303,7 +303,7 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
|
||||||
fw_info.feature = adev->vce.fb_version;
|
fw_info.feature = adev->vce.fb_version;
|
||||||
break;
|
break;
|
||||||
case AMDGPU_INFO_FW_UVD:
|
case AMDGPU_INFO_FW_UVD:
|
||||||
fw_info.ver = 0;
|
fw_info.ver = adev->uvd.fw_version;
|
||||||
fw_info.feature = 0;
|
fw_info.feature = 0;
|
||||||
break;
|
break;
|
||||||
case AMDGPU_INFO_FW_GMC:
|
case AMDGPU_INFO_FW_GMC:
|
||||||
|
|
|
@ -53,7 +53,7 @@ struct amdgpu_hpd;
|
||||||
|
|
||||||
#define AMDGPU_MAX_HPD_PINS 6
|
#define AMDGPU_MAX_HPD_PINS 6
|
||||||
#define AMDGPU_MAX_CRTCS 6
|
#define AMDGPU_MAX_CRTCS 6
|
||||||
#define AMDGPU_MAX_AFMT_BLOCKS 7
|
#define AMDGPU_MAX_AFMT_BLOCKS 9
|
||||||
|
|
||||||
enum amdgpu_rmx_type {
|
enum amdgpu_rmx_type {
|
||||||
RMX_OFF,
|
RMX_OFF,
|
||||||
|
@ -309,8 +309,8 @@ struct amdgpu_mode_info {
|
||||||
struct atom_context *atom_context;
|
struct atom_context *atom_context;
|
||||||
struct card_info *atom_card_info;
|
struct card_info *atom_card_info;
|
||||||
bool mode_config_initialized;
|
bool mode_config_initialized;
|
||||||
struct amdgpu_crtc *crtcs[6];
|
struct amdgpu_crtc *crtcs[AMDGPU_MAX_CRTCS];
|
||||||
struct amdgpu_afmt *afmt[7];
|
struct amdgpu_afmt *afmt[AMDGPU_MAX_AFMT_BLOCKS];
|
||||||
/* DVI-I properties */
|
/* DVI-I properties */
|
||||||
struct drm_property *coherent_mode_property;
|
struct drm_property *coherent_mode_property;
|
||||||
/* DAC enable load detect */
|
/* DAC enable load detect */
|
||||||
|
|
|
@ -223,6 +223,8 @@ static int amdgpu_verify_access(struct ttm_buffer_object *bo, struct file *filp)
|
||||||
{
|
{
|
||||||
struct amdgpu_bo *rbo = container_of(bo, struct amdgpu_bo, tbo);
|
struct amdgpu_bo *rbo = container_of(bo, struct amdgpu_bo, tbo);
|
||||||
|
|
||||||
|
if (amdgpu_ttm_tt_get_usermm(bo->ttm))
|
||||||
|
return -EPERM;
|
||||||
return drm_vma_node_verify_access(&rbo->gem_base.vma_node, filp);
|
return drm_vma_node_verify_access(&rbo->gem_base.vma_node, filp);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -158,6 +158,9 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
|
||||||
DRM_INFO("Found UVD firmware Version: %hu.%hu Family ID: %hu\n",
|
DRM_INFO("Found UVD firmware Version: %hu.%hu Family ID: %hu\n",
|
||||||
version_major, version_minor, family_id);
|
version_major, version_minor, family_id);
|
||||||
|
|
||||||
|
adev->uvd.fw_version = ((version_major << 24) | (version_minor << 16) |
|
||||||
|
(family_id << 8));
|
||||||
|
|
||||||
bo_size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8)
|
bo_size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8)
|
||||||
+ AMDGPU_UVD_STACK_SIZE + AMDGPU_UVD_HEAP_SIZE;
|
+ AMDGPU_UVD_STACK_SIZE + AMDGPU_UVD_HEAP_SIZE;
|
||||||
r = amdgpu_bo_create(adev, bo_size, PAGE_SIZE, true,
|
r = amdgpu_bo_create(adev, bo_size, PAGE_SIZE, true,
|
||||||
|
@ -255,6 +258,8 @@ int amdgpu_uvd_suspend(struct amdgpu_device *adev)
|
||||||
if (i == AMDGPU_MAX_UVD_HANDLES)
|
if (i == AMDGPU_MAX_UVD_HANDLES)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
cancel_delayed_work_sync(&adev->uvd.idle_work);
|
||||||
|
|
||||||
size = amdgpu_bo_size(adev->uvd.vcpu_bo);
|
size = amdgpu_bo_size(adev->uvd.vcpu_bo);
|
||||||
ptr = adev->uvd.cpu_addr;
|
ptr = adev->uvd.cpu_addr;
|
||||||
|
|
||||||
|
|
|
@ -234,6 +234,7 @@ int amdgpu_vce_suspend(struct amdgpu_device *adev)
|
||||||
if (i == AMDGPU_MAX_VCE_HANDLES)
|
if (i == AMDGPU_MAX_VCE_HANDLES)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
cancel_delayed_work_sync(&adev->vce.idle_work);
|
||||||
/* TODO: suspending running encoding sessions isn't supported */
|
/* TODO: suspending running encoding sessions isn't supported */
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
|
@ -1672,13 +1672,19 @@ static int drm_dp_payload_send_msg(struct drm_dp_mst_topology_mgr *mgr,
|
||||||
u8 sinks[DRM_DP_MAX_SDP_STREAMS];
|
u8 sinks[DRM_DP_MAX_SDP_STREAMS];
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
|
port = drm_dp_get_validated_port_ref(mgr, port);
|
||||||
|
if (!port)
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
port_num = port->port_num;
|
port_num = port->port_num;
|
||||||
mstb = drm_dp_get_validated_mstb_ref(mgr, port->parent);
|
mstb = drm_dp_get_validated_mstb_ref(mgr, port->parent);
|
||||||
if (!mstb) {
|
if (!mstb) {
|
||||||
mstb = drm_dp_get_last_connected_port_and_mstb(mgr, port->parent, &port_num);
|
mstb = drm_dp_get_last_connected_port_and_mstb(mgr, port->parent, &port_num);
|
||||||
|
|
||||||
if (!mstb)
|
if (!mstb) {
|
||||||
|
drm_dp_put_port(port);
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
|
txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
|
||||||
|
@ -1707,6 +1713,7 @@ static int drm_dp_payload_send_msg(struct drm_dp_mst_topology_mgr *mgr,
|
||||||
kfree(txmsg);
|
kfree(txmsg);
|
||||||
fail_put:
|
fail_put:
|
||||||
drm_dp_put_mst_branch_device(mstb);
|
drm_dp_put_mst_branch_device(mstb);
|
||||||
|
drm_dp_put_port(port);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -2634,8 +2634,9 @@ struct drm_i915_cmd_table {
|
||||||
|
|
||||||
/* WaRsDisableCoarsePowerGating:skl,bxt */
|
/* WaRsDisableCoarsePowerGating:skl,bxt */
|
||||||
#define NEEDS_WaRsDisableCoarsePowerGating(dev) (IS_BXT_REVID(dev, 0, BXT_REVID_A1) || \
|
#define NEEDS_WaRsDisableCoarsePowerGating(dev) (IS_BXT_REVID(dev, 0, BXT_REVID_A1) || \
|
||||||
((IS_SKL_GT3(dev) || IS_SKL_GT4(dev)) && \
|
IS_SKL_GT3(dev) || \
|
||||||
IS_SKL_REVID(dev, 0, SKL_REVID_F0)))
|
IS_SKL_GT4(dev))
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* dp aux and gmbus irq on gen4 seems to be able to generate legacy interrupts
|
* dp aux and gmbus irq on gen4 seems to be able to generate legacy interrupts
|
||||||
* even when in MSI mode. This results in spurious interrupt warnings if the
|
* even when in MSI mode. This results in spurious interrupt warnings if the
|
||||||
|
|
|
@ -501,19 +501,24 @@ __i915_gem_userptr_get_pages_worker(struct work_struct *_work)
|
||||||
if (pvec != NULL) {
|
if (pvec != NULL) {
|
||||||
struct mm_struct *mm = obj->userptr.mm->mm;
|
struct mm_struct *mm = obj->userptr.mm->mm;
|
||||||
|
|
||||||
down_read(&mm->mmap_sem);
|
ret = -EFAULT;
|
||||||
while (pinned < npages) {
|
if (atomic_inc_not_zero(&mm->mm_users)) {
|
||||||
ret = get_user_pages_remote(work->task, mm,
|
down_read(&mm->mmap_sem);
|
||||||
obj->userptr.ptr + pinned * PAGE_SIZE,
|
while (pinned < npages) {
|
||||||
npages - pinned,
|
ret = get_user_pages_remote
|
||||||
!obj->userptr.read_only, 0,
|
(work->task, mm,
|
||||||
pvec + pinned, NULL);
|
obj->userptr.ptr + pinned * PAGE_SIZE,
|
||||||
if (ret < 0)
|
npages - pinned,
|
||||||
break;
|
!obj->userptr.read_only, 0,
|
||||||
|
pvec + pinned, NULL);
|
||||||
|
if (ret < 0)
|
||||||
|
break;
|
||||||
|
|
||||||
pinned += ret;
|
pinned += ret;
|
||||||
|
}
|
||||||
|
up_read(&mm->mmap_sem);
|
||||||
|
mmput(mm);
|
||||||
}
|
}
|
||||||
up_read(&mm->mmap_sem);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
mutex_lock(&dev->struct_mutex);
|
mutex_lock(&dev->struct_mutex);
|
||||||
|
|
|
@ -841,11 +841,11 @@ static int logical_ring_prepare(struct drm_i915_gem_request *req, int bytes)
|
||||||
if (unlikely(total_bytes > remain_usable)) {
|
if (unlikely(total_bytes > remain_usable)) {
|
||||||
/*
|
/*
|
||||||
* The base request will fit but the reserved space
|
* The base request will fit but the reserved space
|
||||||
* falls off the end. So only need to to wait for the
|
* falls off the end. So don't need an immediate wrap
|
||||||
* reserved size after flushing out the remainder.
|
* and only need to effectively wait for the reserved
|
||||||
|
* size space from the start of ringbuffer.
|
||||||
*/
|
*/
|
||||||
wait_bytes = remain_actual + ringbuf->reserved_size;
|
wait_bytes = remain_actual + ringbuf->reserved_size;
|
||||||
need_wrap = true;
|
|
||||||
} else if (total_bytes > ringbuf->space) {
|
} else if (total_bytes > ringbuf->space) {
|
||||||
/* No wrapping required, just waiting. */
|
/* No wrapping required, just waiting. */
|
||||||
wait_bytes = total_bytes;
|
wait_bytes = total_bytes;
|
||||||
|
@ -1913,15 +1913,18 @@ static int gen8_emit_request_render(struct drm_i915_gem_request *request)
|
||||||
struct intel_ringbuffer *ringbuf = request->ringbuf;
|
struct intel_ringbuffer *ringbuf = request->ringbuf;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
ret = intel_logical_ring_begin(request, 6 + WA_TAIL_DWORDS);
|
ret = intel_logical_ring_begin(request, 8 + WA_TAIL_DWORDS);
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
|
/* We're using qword write, seqno should be aligned to 8 bytes. */
|
||||||
|
BUILD_BUG_ON(I915_GEM_HWS_INDEX & 1);
|
||||||
|
|
||||||
/* w/a for post sync ops following a GPGPU operation we
|
/* w/a for post sync ops following a GPGPU operation we
|
||||||
* need a prior CS_STALL, which is emitted by the flush
|
* need a prior CS_STALL, which is emitted by the flush
|
||||||
* following the batch.
|
* following the batch.
|
||||||
*/
|
*/
|
||||||
intel_logical_ring_emit(ringbuf, GFX_OP_PIPE_CONTROL(5));
|
intel_logical_ring_emit(ringbuf, GFX_OP_PIPE_CONTROL(6));
|
||||||
intel_logical_ring_emit(ringbuf,
|
intel_logical_ring_emit(ringbuf,
|
||||||
(PIPE_CONTROL_GLOBAL_GTT_IVB |
|
(PIPE_CONTROL_GLOBAL_GTT_IVB |
|
||||||
PIPE_CONTROL_CS_STALL |
|
PIPE_CONTROL_CS_STALL |
|
||||||
|
@ -1929,7 +1932,10 @@ static int gen8_emit_request_render(struct drm_i915_gem_request *request)
|
||||||
intel_logical_ring_emit(ringbuf, hws_seqno_address(request->ring));
|
intel_logical_ring_emit(ringbuf, hws_seqno_address(request->ring));
|
||||||
intel_logical_ring_emit(ringbuf, 0);
|
intel_logical_ring_emit(ringbuf, 0);
|
||||||
intel_logical_ring_emit(ringbuf, i915_gem_request_get_seqno(request));
|
intel_logical_ring_emit(ringbuf, i915_gem_request_get_seqno(request));
|
||||||
|
/* We're thrashing one dword of HWS. */
|
||||||
|
intel_logical_ring_emit(ringbuf, 0);
|
||||||
intel_logical_ring_emit(ringbuf, MI_USER_INTERRUPT);
|
intel_logical_ring_emit(ringbuf, MI_USER_INTERRUPT);
|
||||||
|
intel_logical_ring_emit(ringbuf, MI_NOOP);
|
||||||
return intel_logical_ring_advance_and_submit(request);
|
return intel_logical_ring_advance_and_submit(request);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -2876,25 +2876,28 @@ skl_plane_relative_data_rate(const struct intel_crtc_state *cstate,
|
||||||
const struct drm_plane_state *pstate,
|
const struct drm_plane_state *pstate,
|
||||||
int y)
|
int y)
|
||||||
{
|
{
|
||||||
struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc);
|
struct intel_plane_state *intel_pstate = to_intel_plane_state(pstate);
|
||||||
struct drm_framebuffer *fb = pstate->fb;
|
struct drm_framebuffer *fb = pstate->fb;
|
||||||
|
uint32_t width = 0, height = 0;
|
||||||
|
|
||||||
|
width = drm_rect_width(&intel_pstate->src) >> 16;
|
||||||
|
height = drm_rect_height(&intel_pstate->src) >> 16;
|
||||||
|
|
||||||
|
if (intel_rotation_90_or_270(pstate->rotation))
|
||||||
|
swap(width, height);
|
||||||
|
|
||||||
/* for planar format */
|
/* for planar format */
|
||||||
if (fb->pixel_format == DRM_FORMAT_NV12) {
|
if (fb->pixel_format == DRM_FORMAT_NV12) {
|
||||||
if (y) /* y-plane data rate */
|
if (y) /* y-plane data rate */
|
||||||
return intel_crtc->config->pipe_src_w *
|
return width * height *
|
||||||
intel_crtc->config->pipe_src_h *
|
|
||||||
drm_format_plane_cpp(fb->pixel_format, 0);
|
drm_format_plane_cpp(fb->pixel_format, 0);
|
||||||
else /* uv-plane data rate */
|
else /* uv-plane data rate */
|
||||||
return (intel_crtc->config->pipe_src_w/2) *
|
return (width / 2) * (height / 2) *
|
||||||
(intel_crtc->config->pipe_src_h/2) *
|
|
||||||
drm_format_plane_cpp(fb->pixel_format, 1);
|
drm_format_plane_cpp(fb->pixel_format, 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* for packed formats */
|
/* for packed formats */
|
||||||
return intel_crtc->config->pipe_src_w *
|
return width * height * drm_format_plane_cpp(fb->pixel_format, 0);
|
||||||
intel_crtc->config->pipe_src_h *
|
|
||||||
drm_format_plane_cpp(fb->pixel_format, 0);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -2973,8 +2976,9 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
|
||||||
struct drm_framebuffer *fb = plane->state->fb;
|
struct drm_framebuffer *fb = plane->state->fb;
|
||||||
int id = skl_wm_plane_id(intel_plane);
|
int id = skl_wm_plane_id(intel_plane);
|
||||||
|
|
||||||
if (fb == NULL)
|
if (!to_intel_plane_state(plane->state)->visible)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
if (plane->type == DRM_PLANE_TYPE_CURSOR)
|
if (plane->type == DRM_PLANE_TYPE_CURSOR)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
|
@ -3000,7 +3004,7 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
|
||||||
uint16_t plane_blocks, y_plane_blocks = 0;
|
uint16_t plane_blocks, y_plane_blocks = 0;
|
||||||
int id = skl_wm_plane_id(intel_plane);
|
int id = skl_wm_plane_id(intel_plane);
|
||||||
|
|
||||||
if (pstate->fb == NULL)
|
if (!to_intel_plane_state(pstate)->visible)
|
||||||
continue;
|
continue;
|
||||||
if (plane->type == DRM_PLANE_TYPE_CURSOR)
|
if (plane->type == DRM_PLANE_TYPE_CURSOR)
|
||||||
continue;
|
continue;
|
||||||
|
@ -3123,26 +3127,36 @@ static bool skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
|
||||||
{
|
{
|
||||||
struct drm_plane *plane = &intel_plane->base;
|
struct drm_plane *plane = &intel_plane->base;
|
||||||
struct drm_framebuffer *fb = plane->state->fb;
|
struct drm_framebuffer *fb = plane->state->fb;
|
||||||
|
struct intel_plane_state *intel_pstate =
|
||||||
|
to_intel_plane_state(plane->state);
|
||||||
uint32_t latency = dev_priv->wm.skl_latency[level];
|
uint32_t latency = dev_priv->wm.skl_latency[level];
|
||||||
uint32_t method1, method2;
|
uint32_t method1, method2;
|
||||||
uint32_t plane_bytes_per_line, plane_blocks_per_line;
|
uint32_t plane_bytes_per_line, plane_blocks_per_line;
|
||||||
uint32_t res_blocks, res_lines;
|
uint32_t res_blocks, res_lines;
|
||||||
uint32_t selected_result;
|
uint32_t selected_result;
|
||||||
uint8_t cpp;
|
uint8_t cpp;
|
||||||
|
uint32_t width = 0, height = 0;
|
||||||
|
|
||||||
if (latency == 0 || !cstate->base.active || !fb)
|
if (latency == 0 || !cstate->base.active || !intel_pstate->visible)
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
|
width = drm_rect_width(&intel_pstate->src) >> 16;
|
||||||
|
height = drm_rect_height(&intel_pstate->src) >> 16;
|
||||||
|
|
||||||
|
if (intel_rotation_90_or_270(plane->state->rotation))
|
||||||
|
swap(width, height);
|
||||||
|
|
||||||
cpp = drm_format_plane_cpp(fb->pixel_format, 0);
|
cpp = drm_format_plane_cpp(fb->pixel_format, 0);
|
||||||
method1 = skl_wm_method1(skl_pipe_pixel_rate(cstate),
|
method1 = skl_wm_method1(skl_pipe_pixel_rate(cstate),
|
||||||
cpp, latency);
|
cpp, latency);
|
||||||
method2 = skl_wm_method2(skl_pipe_pixel_rate(cstate),
|
method2 = skl_wm_method2(skl_pipe_pixel_rate(cstate),
|
||||||
cstate->base.adjusted_mode.crtc_htotal,
|
cstate->base.adjusted_mode.crtc_htotal,
|
||||||
cstate->pipe_src_w,
|
width,
|
||||||
cpp, fb->modifier[0],
|
cpp,
|
||||||
|
fb->modifier[0],
|
||||||
latency);
|
latency);
|
||||||
|
|
||||||
plane_bytes_per_line = cstate->pipe_src_w * cpp;
|
plane_bytes_per_line = width * cpp;
|
||||||
plane_blocks_per_line = DIV_ROUND_UP(plane_bytes_per_line, 512);
|
plane_blocks_per_line = DIV_ROUND_UP(plane_bytes_per_line, 512);
|
||||||
|
|
||||||
if (fb->modifier[0] == I915_FORMAT_MOD_Y_TILED ||
|
if (fb->modifier[0] == I915_FORMAT_MOD_Y_TILED ||
|
||||||
|
|
|
@ -968,7 +968,7 @@ static int gen9_init_workarounds(struct intel_engine_cs *ring)
|
||||||
|
|
||||||
/* WaForceContextSaveRestoreNonCoherent:skl,bxt */
|
/* WaForceContextSaveRestoreNonCoherent:skl,bxt */
|
||||||
tmp = HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT;
|
tmp = HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT;
|
||||||
if (IS_SKL_REVID(dev, SKL_REVID_F0, SKL_REVID_F0) ||
|
if (IS_SKL_REVID(dev, SKL_REVID_F0, REVID_FOREVER) ||
|
||||||
IS_BXT_REVID(dev, BXT_REVID_B0, REVID_FOREVER))
|
IS_BXT_REVID(dev, BXT_REVID_B0, REVID_FOREVER))
|
||||||
tmp |= HDC_FORCE_CSR_NON_COHERENT_OVR_DISABLE;
|
tmp |= HDC_FORCE_CSR_NON_COHERENT_OVR_DISABLE;
|
||||||
WA_SET_BIT_MASKED(HDC_CHICKEN0, tmp);
|
WA_SET_BIT_MASKED(HDC_CHICKEN0, tmp);
|
||||||
|
@ -1085,7 +1085,8 @@ static int skl_init_workarounds(struct intel_engine_cs *ring)
|
||||||
WA_SET_BIT_MASKED(HIZ_CHICKEN,
|
WA_SET_BIT_MASKED(HIZ_CHICKEN,
|
||||||
BDW_HIZ_POWER_COMPILER_CLOCK_GATING_DISABLE);
|
BDW_HIZ_POWER_COMPILER_CLOCK_GATING_DISABLE);
|
||||||
|
|
||||||
if (IS_SKL_REVID(dev, 0, SKL_REVID_F0)) {
|
/* This is tied to WaForceContextSaveRestoreNonCoherent */
|
||||||
|
if (IS_SKL_REVID(dev, 0, REVID_FOREVER)) {
|
||||||
/*
|
/*
|
||||||
*Use Force Non-Coherent whenever executing a 3D context. This
|
*Use Force Non-Coherent whenever executing a 3D context. This
|
||||||
* is a workaround for a possible hang in the unlikely event
|
* is a workaround for a possible hang in the unlikely event
|
||||||
|
@ -2090,10 +2091,12 @@ int intel_pin_and_map_ringbuffer_obj(struct drm_device *dev,
|
||||||
{
|
{
|
||||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||||
struct drm_i915_gem_object *obj = ringbuf->obj;
|
struct drm_i915_gem_object *obj = ringbuf->obj;
|
||||||
|
/* Ring wraparound at offset 0 sometimes hangs. No idea why. */
|
||||||
|
unsigned flags = PIN_OFFSET_BIAS | 4096;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
if (HAS_LLC(dev_priv) && !obj->stolen) {
|
if (HAS_LLC(dev_priv) && !obj->stolen) {
|
||||||
ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE, 0);
|
ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE, flags);
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
|
@ -2109,7 +2112,8 @@ int intel_pin_and_map_ringbuffer_obj(struct drm_device *dev,
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE, PIN_MAPPABLE);
|
ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE,
|
||||||
|
flags | PIN_MAPPABLE);
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
|
@ -2454,11 +2458,11 @@ static int __intel_ring_prepare(struct intel_engine_cs *ring, int bytes)
|
||||||
if (unlikely(total_bytes > remain_usable)) {
|
if (unlikely(total_bytes > remain_usable)) {
|
||||||
/*
|
/*
|
||||||
* The base request will fit but the reserved space
|
* The base request will fit but the reserved space
|
||||||
* falls off the end. So only need to to wait for the
|
* falls off the end. So don't need an immediate wrap
|
||||||
* reserved size after flushing out the remainder.
|
* and only need to effectively wait for the reserved
|
||||||
|
* size space from the start of ringbuffer.
|
||||||
*/
|
*/
|
||||||
wait_bytes = remain_actual + ringbuf->reserved_size;
|
wait_bytes = remain_actual + ringbuf->reserved_size;
|
||||||
need_wrap = true;
|
|
||||||
} else if (total_bytes > ringbuf->space) {
|
} else if (total_bytes > ringbuf->space) {
|
||||||
/* No wrapping required, just waiting. */
|
/* No wrapping required, just waiting. */
|
||||||
wait_bytes = total_bytes;
|
wait_bytes = total_bytes;
|
||||||
|
|
|
@ -1189,7 +1189,11 @@ static void intel_uncore_fw_domains_init(struct drm_device *dev)
|
||||||
} else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
|
} else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
|
||||||
dev_priv->uncore.funcs.force_wake_get =
|
dev_priv->uncore.funcs.force_wake_get =
|
||||||
fw_domains_get_with_thread_status;
|
fw_domains_get_with_thread_status;
|
||||||
dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
|
if (IS_HASWELL(dev))
|
||||||
|
dev_priv->uncore.funcs.force_wake_put =
|
||||||
|
fw_domains_put_with_fifo;
|
||||||
|
else
|
||||||
|
dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
|
||||||
fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
|
fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
|
||||||
FORCEWAKE_MT, FORCEWAKE_ACK_HSW);
|
FORCEWAKE_MT, FORCEWAKE_ACK_HSW);
|
||||||
} else if (IS_IVYBRIDGE(dev)) {
|
} else if (IS_IVYBRIDGE(dev)) {
|
||||||
|
|
|
@ -1276,18 +1276,18 @@ nouveau_connector_create(struct drm_device *dev, int index)
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
if (disp->dithering_mode) {
|
if (disp->dithering_mode) {
|
||||||
|
nv_connector->dithering_mode = DITHERING_MODE_AUTO;
|
||||||
drm_object_attach_property(&connector->base,
|
drm_object_attach_property(&connector->base,
|
||||||
disp->dithering_mode,
|
disp->dithering_mode,
|
||||||
nv_connector->
|
nv_connector->
|
||||||
dithering_mode);
|
dithering_mode);
|
||||||
nv_connector->dithering_mode = DITHERING_MODE_AUTO;
|
|
||||||
}
|
}
|
||||||
if (disp->dithering_depth) {
|
if (disp->dithering_depth) {
|
||||||
|
nv_connector->dithering_depth = DITHERING_DEPTH_AUTO;
|
||||||
drm_object_attach_property(&connector->base,
|
drm_object_attach_property(&connector->base,
|
||||||
disp->dithering_depth,
|
disp->dithering_depth,
|
||||||
nv_connector->
|
nv_connector->
|
||||||
dithering_depth);
|
dithering_depth);
|
||||||
nv_connector->dithering_depth = DITHERING_DEPTH_AUTO;
|
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
|
@ -1832,6 +1832,8 @@ gf100_gr_init(struct gf100_gr *gr)
|
||||||
|
|
||||||
gf100_gr_mmio(gr, gr->func->mmio);
|
gf100_gr_mmio(gr, gr->func->mmio);
|
||||||
|
|
||||||
|
nvkm_mask(device, TPC_UNIT(0, 0, 0x05c), 0x00000001, 0x00000001);
|
||||||
|
|
||||||
memcpy(tpcnr, gr->tpc_nr, sizeof(gr->tpc_nr));
|
memcpy(tpcnr, gr->tpc_nr, sizeof(gr->tpc_nr));
|
||||||
for (i = 0, gpc = -1; i < gr->tpc_total; i++) {
|
for (i = 0, gpc = -1; i < gr->tpc_total; i++) {
|
||||||
do {
|
do {
|
||||||
|
|
|
@ -62,10 +62,6 @@ bool radeon_has_atpx(void) {
|
||||||
return radeon_atpx_priv.atpx_detected;
|
return radeon_atpx_priv.atpx_detected;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool radeon_has_atpx_dgpu_power_cntl(void) {
|
|
||||||
return radeon_atpx_priv.atpx.functions.power_cntl;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* radeon_atpx_call - call an ATPX method
|
* radeon_atpx_call - call an ATPX method
|
||||||
*
|
*
|
||||||
|
@ -145,6 +141,13 @@ static void radeon_atpx_parse_functions(struct radeon_atpx_functions *f, u32 mas
|
||||||
*/
|
*/
|
||||||
static int radeon_atpx_validate(struct radeon_atpx *atpx)
|
static int radeon_atpx_validate(struct radeon_atpx *atpx)
|
||||||
{
|
{
|
||||||
|
/* make sure required functions are enabled */
|
||||||
|
/* dGPU power control is required */
|
||||||
|
if (atpx->functions.power_cntl == false) {
|
||||||
|
printk("ATPX dGPU power cntl not present, forcing\n");
|
||||||
|
atpx->functions.power_cntl = true;
|
||||||
|
}
|
||||||
|
|
||||||
if (atpx->functions.px_params) {
|
if (atpx->functions.px_params) {
|
||||||
union acpi_object *info;
|
union acpi_object *info;
|
||||||
struct atpx_px_params output;
|
struct atpx_px_params output;
|
||||||
|
|
|
@ -2002,10 +2002,12 @@ radeon_add_atom_connector(struct drm_device *dev,
|
||||||
rdev->mode_info.dither_property,
|
rdev->mode_info.dither_property,
|
||||||
RADEON_FMT_DITHER_DISABLE);
|
RADEON_FMT_DITHER_DISABLE);
|
||||||
|
|
||||||
if (radeon_audio != 0)
|
if (radeon_audio != 0) {
|
||||||
drm_object_attach_property(&radeon_connector->base.base,
|
drm_object_attach_property(&radeon_connector->base.base,
|
||||||
rdev->mode_info.audio_property,
|
rdev->mode_info.audio_property,
|
||||||
RADEON_AUDIO_AUTO);
|
RADEON_AUDIO_AUTO);
|
||||||
|
radeon_connector->audio = RADEON_AUDIO_AUTO;
|
||||||
|
}
|
||||||
if (ASIC_IS_DCE5(rdev))
|
if (ASIC_IS_DCE5(rdev))
|
||||||
drm_object_attach_property(&radeon_connector->base.base,
|
drm_object_attach_property(&radeon_connector->base.base,
|
||||||
rdev->mode_info.output_csc_property,
|
rdev->mode_info.output_csc_property,
|
||||||
|
@ -2130,6 +2132,7 @@ radeon_add_atom_connector(struct drm_device *dev,
|
||||||
drm_object_attach_property(&radeon_connector->base.base,
|
drm_object_attach_property(&radeon_connector->base.base,
|
||||||
rdev->mode_info.audio_property,
|
rdev->mode_info.audio_property,
|
||||||
RADEON_AUDIO_AUTO);
|
RADEON_AUDIO_AUTO);
|
||||||
|
radeon_connector->audio = RADEON_AUDIO_AUTO;
|
||||||
}
|
}
|
||||||
if (connector_type == DRM_MODE_CONNECTOR_DVII) {
|
if (connector_type == DRM_MODE_CONNECTOR_DVII) {
|
||||||
radeon_connector->dac_load_detect = true;
|
radeon_connector->dac_load_detect = true;
|
||||||
|
@ -2185,6 +2188,7 @@ radeon_add_atom_connector(struct drm_device *dev,
|
||||||
drm_object_attach_property(&radeon_connector->base.base,
|
drm_object_attach_property(&radeon_connector->base.base,
|
||||||
rdev->mode_info.audio_property,
|
rdev->mode_info.audio_property,
|
||||||
RADEON_AUDIO_AUTO);
|
RADEON_AUDIO_AUTO);
|
||||||
|
radeon_connector->audio = RADEON_AUDIO_AUTO;
|
||||||
}
|
}
|
||||||
if (ASIC_IS_DCE5(rdev))
|
if (ASIC_IS_DCE5(rdev))
|
||||||
drm_object_attach_property(&radeon_connector->base.base,
|
drm_object_attach_property(&radeon_connector->base.base,
|
||||||
|
@ -2237,6 +2241,7 @@ radeon_add_atom_connector(struct drm_device *dev,
|
||||||
drm_object_attach_property(&radeon_connector->base.base,
|
drm_object_attach_property(&radeon_connector->base.base,
|
||||||
rdev->mode_info.audio_property,
|
rdev->mode_info.audio_property,
|
||||||
RADEON_AUDIO_AUTO);
|
RADEON_AUDIO_AUTO);
|
||||||
|
radeon_connector->audio = RADEON_AUDIO_AUTO;
|
||||||
}
|
}
|
||||||
if (ASIC_IS_DCE5(rdev))
|
if (ASIC_IS_DCE5(rdev))
|
||||||
drm_object_attach_property(&radeon_connector->base.base,
|
drm_object_attach_property(&radeon_connector->base.base,
|
||||||
|
|
|
@ -103,12 +103,6 @@ static const char radeon_family_name[][16] = {
|
||||||
"LAST",
|
"LAST",
|
||||||
};
|
};
|
||||||
|
|
||||||
#if defined(CONFIG_VGA_SWITCHEROO)
|
|
||||||
bool radeon_has_atpx_dgpu_power_cntl(void);
|
|
||||||
#else
|
|
||||||
static inline bool radeon_has_atpx_dgpu_power_cntl(void) { return false; }
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#define RADEON_PX_QUIRK_DISABLE_PX (1 << 0)
|
#define RADEON_PX_QUIRK_DISABLE_PX (1 << 0)
|
||||||
#define RADEON_PX_QUIRK_LONG_WAKEUP (1 << 1)
|
#define RADEON_PX_QUIRK_LONG_WAKEUP (1 << 1)
|
||||||
|
|
||||||
|
@ -1305,9 +1299,9 @@ int radeon_device_init(struct radeon_device *rdev,
|
||||||
}
|
}
|
||||||
rdev->fence_context = fence_context_alloc(RADEON_NUM_RINGS);
|
rdev->fence_context = fence_context_alloc(RADEON_NUM_RINGS);
|
||||||
|
|
||||||
DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X).\n",
|
DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X 0x%02X).\n",
|
||||||
radeon_family_name[rdev->family], pdev->vendor, pdev->device,
|
radeon_family_name[rdev->family], pdev->vendor, pdev->device,
|
||||||
pdev->subsystem_vendor, pdev->subsystem_device);
|
pdev->subsystem_vendor, pdev->subsystem_device, pdev->revision);
|
||||||
|
|
||||||
/* mutex initialization are all done here so we
|
/* mutex initialization are all done here so we
|
||||||
* can recall function without having locking issues */
|
* can recall function without having locking issues */
|
||||||
|
@ -1439,7 +1433,7 @@ int radeon_device_init(struct radeon_device *rdev,
|
||||||
* ignore it */
|
* ignore it */
|
||||||
vga_client_register(rdev->pdev, rdev, NULL, radeon_vga_set_decode);
|
vga_client_register(rdev->pdev, rdev, NULL, radeon_vga_set_decode);
|
||||||
|
|
||||||
if ((rdev->flags & RADEON_IS_PX) && radeon_has_atpx_dgpu_power_cntl())
|
if (rdev->flags & RADEON_IS_PX)
|
||||||
runtime = true;
|
runtime = true;
|
||||||
vga_switcheroo_register_client(rdev->pdev, &radeon_switcheroo_ops, runtime);
|
vga_switcheroo_register_client(rdev->pdev, &radeon_switcheroo_ops, runtime);
|
||||||
if (runtime)
|
if (runtime)
|
||||||
|
|
|
@ -235,6 +235,8 @@ static int radeon_verify_access(struct ttm_buffer_object *bo, struct file *filp)
|
||||||
{
|
{
|
||||||
struct radeon_bo *rbo = container_of(bo, struct radeon_bo, tbo);
|
struct radeon_bo *rbo = container_of(bo, struct radeon_bo, tbo);
|
||||||
|
|
||||||
|
if (radeon_ttm_tt_has_userptr(bo->ttm))
|
||||||
|
return -EPERM;
|
||||||
return drm_vma_node_verify_access(&rbo->gem_base.vma_node, filp);
|
return drm_vma_node_verify_access(&rbo->gem_base.vma_node, filp);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -2931,6 +2931,7 @@ static struct si_dpm_quirk si_dpm_quirk_list[] = {
|
||||||
{ PCI_VENDOR_ID_ATI, 0x6811, 0x1462, 0x2015, 0, 120000 },
|
{ PCI_VENDOR_ID_ATI, 0x6811, 0x1462, 0x2015, 0, 120000 },
|
||||||
{ PCI_VENDOR_ID_ATI, 0x6811, 0x1043, 0x2015, 0, 120000 },
|
{ PCI_VENDOR_ID_ATI, 0x6811, 0x1043, 0x2015, 0, 120000 },
|
||||||
{ PCI_VENDOR_ID_ATI, 0x6811, 0x148c, 0x2015, 0, 120000 },
|
{ PCI_VENDOR_ID_ATI, 0x6811, 0x148c, 0x2015, 0, 120000 },
|
||||||
|
{ PCI_VENDOR_ID_ATI, 0x6810, 0x1682, 0x9275, 0, 120000 },
|
||||||
{ 0, 0, 0, 0 },
|
{ 0, 0, 0, 0 },
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -153,6 +153,7 @@ static const struct xpad_device {
|
||||||
{ 0x0738, 0x4728, "Mad Catz Street Fighter IV FightPad", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
|
{ 0x0738, 0x4728, "Mad Catz Street Fighter IV FightPad", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
|
||||||
{ 0x0738, 0x4738, "Mad Catz Wired Xbox 360 Controller (SFIV)", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
|
{ 0x0738, 0x4738, "Mad Catz Wired Xbox 360 Controller (SFIV)", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
|
||||||
{ 0x0738, 0x4740, "Mad Catz Beat Pad", 0, XTYPE_XBOX360 },
|
{ 0x0738, 0x4740, "Mad Catz Beat Pad", 0, XTYPE_XBOX360 },
|
||||||
|
{ 0x0738, 0x4a01, "Mad Catz FightStick TE 2", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOXONE },
|
||||||
{ 0x0738, 0x6040, "Mad Catz Beat Pad Pro", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX },
|
{ 0x0738, 0x6040, "Mad Catz Beat Pad Pro", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX },
|
||||||
{ 0x0738, 0xb726, "Mad Catz Xbox controller - MW2", 0, XTYPE_XBOX360 },
|
{ 0x0738, 0xb726, "Mad Catz Xbox controller - MW2", 0, XTYPE_XBOX360 },
|
||||||
{ 0x0738, 0xbeef, "Mad Catz JOYTECH NEO SE Advanced GamePad", XTYPE_XBOX360 },
|
{ 0x0738, 0xbeef, "Mad Catz JOYTECH NEO SE Advanced GamePad", XTYPE_XBOX360 },
|
||||||
|
@ -304,6 +305,7 @@ static struct usb_device_id xpad_table[] = {
|
||||||
XPAD_XBOX360_VENDOR(0x046d), /* Logitech X-Box 360 style controllers */
|
XPAD_XBOX360_VENDOR(0x046d), /* Logitech X-Box 360 style controllers */
|
||||||
XPAD_XBOX360_VENDOR(0x0738), /* Mad Catz X-Box 360 controllers */
|
XPAD_XBOX360_VENDOR(0x0738), /* Mad Catz X-Box 360 controllers */
|
||||||
{ USB_DEVICE(0x0738, 0x4540) }, /* Mad Catz Beat Pad */
|
{ USB_DEVICE(0x0738, 0x4540) }, /* Mad Catz Beat Pad */
|
||||||
|
XPAD_XBOXONE_VENDOR(0x0738), /* Mad Catz FightStick TE 2 */
|
||||||
XPAD_XBOX360_VENDOR(0x0e6f), /* 0x0e6f X-Box 360 controllers */
|
XPAD_XBOX360_VENDOR(0x0e6f), /* 0x0e6f X-Box 360 controllers */
|
||||||
XPAD_XBOX360_VENDOR(0x12ab), /* X-Box 360 dance pads */
|
XPAD_XBOX360_VENDOR(0x12ab), /* X-Box 360 dance pads */
|
||||||
XPAD_XBOX360_VENDOR(0x1430), /* RedOctane X-Box 360 controllers */
|
XPAD_XBOX360_VENDOR(0x1430), /* RedOctane X-Box 360 controllers */
|
||||||
|
|
|
@ -178,7 +178,6 @@ static int arizona_haptics_probe(struct platform_device *pdev)
|
||||||
input_set_drvdata(haptics->input_dev, haptics);
|
input_set_drvdata(haptics->input_dev, haptics);
|
||||||
|
|
||||||
haptics->input_dev->name = "arizona:haptics";
|
haptics->input_dev->name = "arizona:haptics";
|
||||||
haptics->input_dev->dev.parent = pdev->dev.parent;
|
|
||||||
haptics->input_dev->close = arizona_haptics_close;
|
haptics->input_dev->close = arizona_haptics_close;
|
||||||
__set_bit(FF_RUMBLE, haptics->input_dev->ffbit);
|
__set_bit(FF_RUMBLE, haptics->input_dev->ffbit);
|
||||||
|
|
||||||
|
|
|
@ -353,7 +353,8 @@ static int pmic8xxx_pwrkey_probe(struct platform_device *pdev)
|
||||||
if (of_property_read_u32(pdev->dev.of_node, "debounce", &kpd_delay))
|
if (of_property_read_u32(pdev->dev.of_node, "debounce", &kpd_delay))
|
||||||
kpd_delay = 15625;
|
kpd_delay = 15625;
|
||||||
|
|
||||||
if (kpd_delay > 62500 || kpd_delay == 0) {
|
/* Valid range of pwr key trigger delay is 1/64 sec to 2 seconds. */
|
||||||
|
if (kpd_delay > USEC_PER_SEC * 2 || kpd_delay < USEC_PER_SEC / 64) {
|
||||||
dev_err(&pdev->dev, "invalid power key trigger delay\n");
|
dev_err(&pdev->dev, "invalid power key trigger delay\n");
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
@ -385,8 +386,8 @@ static int pmic8xxx_pwrkey_probe(struct platform_device *pdev)
|
||||||
pwr->name = "pmic8xxx_pwrkey";
|
pwr->name = "pmic8xxx_pwrkey";
|
||||||
pwr->phys = "pmic8xxx_pwrkey/input0";
|
pwr->phys = "pmic8xxx_pwrkey/input0";
|
||||||
|
|
||||||
delay = (kpd_delay << 10) / USEC_PER_SEC;
|
delay = (kpd_delay << 6) / USEC_PER_SEC;
|
||||||
delay = 1 + ilog2(delay);
|
delay = ilog2(delay);
|
||||||
|
|
||||||
err = regmap_read(regmap, PON_CNTL_1, &pon_cntl);
|
err = regmap_read(regmap, PON_CNTL_1, &pon_cntl);
|
||||||
if (err < 0) {
|
if (err < 0) {
|
||||||
|
|
|
@ -222,7 +222,6 @@ static int twl4030_vibra_probe(struct platform_device *pdev)
|
||||||
|
|
||||||
info->input_dev->name = "twl4030:vibrator";
|
info->input_dev->name = "twl4030:vibrator";
|
||||||
info->input_dev->id.version = 1;
|
info->input_dev->id.version = 1;
|
||||||
info->input_dev->dev.parent = pdev->dev.parent;
|
|
||||||
info->input_dev->close = twl4030_vibra_close;
|
info->input_dev->close = twl4030_vibra_close;
|
||||||
__set_bit(FF_RUMBLE, info->input_dev->ffbit);
|
__set_bit(FF_RUMBLE, info->input_dev->ffbit);
|
||||||
|
|
||||||
|
|
|
@ -45,7 +45,6 @@
|
||||||
struct vibra_info {
|
struct vibra_info {
|
||||||
struct device *dev;
|
struct device *dev;
|
||||||
struct input_dev *input_dev;
|
struct input_dev *input_dev;
|
||||||
struct workqueue_struct *workqueue;
|
|
||||||
struct work_struct play_work;
|
struct work_struct play_work;
|
||||||
struct mutex mutex;
|
struct mutex mutex;
|
||||||
int irq;
|
int irq;
|
||||||
|
@ -213,11 +212,7 @@ static int vibra_play(struct input_dev *input, void *data,
|
||||||
info->strong_speed = effect->u.rumble.strong_magnitude;
|
info->strong_speed = effect->u.rumble.strong_magnitude;
|
||||||
info->direction = effect->direction < EFFECT_DIR_180_DEG ? 1 : -1;
|
info->direction = effect->direction < EFFECT_DIR_180_DEG ? 1 : -1;
|
||||||
|
|
||||||
ret = queue_work(info->workqueue, &info->play_work);
|
schedule_work(&info->play_work);
|
||||||
if (!ret) {
|
|
||||||
dev_info(&input->dev, "work is already on queue\n");
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -362,7 +357,6 @@ static int twl6040_vibra_probe(struct platform_device *pdev)
|
||||||
|
|
||||||
info->input_dev->name = "twl6040:vibrator";
|
info->input_dev->name = "twl6040:vibrator";
|
||||||
info->input_dev->id.version = 1;
|
info->input_dev->id.version = 1;
|
||||||
info->input_dev->dev.parent = pdev->dev.parent;
|
|
||||||
info->input_dev->close = twl6040_vibra_close;
|
info->input_dev->close = twl6040_vibra_close;
|
||||||
__set_bit(FF_RUMBLE, info->input_dev->ffbit);
|
__set_bit(FF_RUMBLE, info->input_dev->ffbit);
|
||||||
|
|
||||||
|
|
|
@ -858,6 +858,14 @@ static int gtco_probe(struct usb_interface *usbinterface,
|
||||||
goto err_free_buf;
|
goto err_free_buf;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Sanity check that a device has an endpoint */
|
||||||
|
if (usbinterface->altsetting[0].desc.bNumEndpoints < 1) {
|
||||||
|
dev_err(&usbinterface->dev,
|
||||||
|
"Invalid number of endpoints\n");
|
||||||
|
error = -EINVAL;
|
||||||
|
goto err_free_urb;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* The endpoint is always altsetting 0, we know this since we know
|
* The endpoint is always altsetting 0, we know this since we know
|
||||||
* this device only has one interrupt endpoint
|
* this device only has one interrupt endpoint
|
||||||
|
@ -879,7 +887,7 @@ static int gtco_probe(struct usb_interface *usbinterface,
|
||||||
* HID report descriptor
|
* HID report descriptor
|
||||||
*/
|
*/
|
||||||
if (usb_get_extra_descriptor(usbinterface->cur_altsetting,
|
if (usb_get_extra_descriptor(usbinterface->cur_altsetting,
|
||||||
HID_DEVICE_TYPE, &hid_desc) != 0){
|
HID_DEVICE_TYPE, &hid_desc) != 0) {
|
||||||
dev_err(&usbinterface->dev,
|
dev_err(&usbinterface->dev,
|
||||||
"Can't retrieve exta USB descriptor to get hid report descriptor length\n");
|
"Can't retrieve exta USB descriptor to get hid report descriptor length\n");
|
||||||
error = -EIO;
|
error = -EIO;
|
||||||
|
|
|
@ -92,6 +92,7 @@ struct iommu_dev_data {
|
||||||
struct list_head dev_data_list; /* For global dev_data_list */
|
struct list_head dev_data_list; /* For global dev_data_list */
|
||||||
struct protection_domain *domain; /* Domain the device is bound to */
|
struct protection_domain *domain; /* Domain the device is bound to */
|
||||||
u16 devid; /* PCI Device ID */
|
u16 devid; /* PCI Device ID */
|
||||||
|
u16 alias; /* Alias Device ID */
|
||||||
bool iommu_v2; /* Device can make use of IOMMUv2 */
|
bool iommu_v2; /* Device can make use of IOMMUv2 */
|
||||||
bool passthrough; /* Device is identity mapped */
|
bool passthrough; /* Device is identity mapped */
|
||||||
struct {
|
struct {
|
||||||
|
@ -166,6 +167,13 @@ static struct protection_domain *to_pdomain(struct iommu_domain *dom)
|
||||||
return container_of(dom, struct protection_domain, domain);
|
return container_of(dom, struct protection_domain, domain);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline u16 get_device_id(struct device *dev)
|
||||||
|
{
|
||||||
|
struct pci_dev *pdev = to_pci_dev(dev);
|
||||||
|
|
||||||
|
return PCI_DEVID(pdev->bus->number, pdev->devfn);
|
||||||
|
}
|
||||||
|
|
||||||
static struct iommu_dev_data *alloc_dev_data(u16 devid)
|
static struct iommu_dev_data *alloc_dev_data(u16 devid)
|
||||||
{
|
{
|
||||||
struct iommu_dev_data *dev_data;
|
struct iommu_dev_data *dev_data;
|
||||||
|
@ -203,6 +211,68 @@ static struct iommu_dev_data *search_dev_data(u16 devid)
|
||||||
return dev_data;
|
return dev_data;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int __last_alias(struct pci_dev *pdev, u16 alias, void *data)
|
||||||
|
{
|
||||||
|
*(u16 *)data = alias;
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static u16 get_alias(struct device *dev)
|
||||||
|
{
|
||||||
|
struct pci_dev *pdev = to_pci_dev(dev);
|
||||||
|
u16 devid, ivrs_alias, pci_alias;
|
||||||
|
|
||||||
|
devid = get_device_id(dev);
|
||||||
|
ivrs_alias = amd_iommu_alias_table[devid];
|
||||||
|
pci_for_each_dma_alias(pdev, __last_alias, &pci_alias);
|
||||||
|
|
||||||
|
if (ivrs_alias == pci_alias)
|
||||||
|
return ivrs_alias;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* DMA alias showdown
|
||||||
|
*
|
||||||
|
* The IVRS is fairly reliable in telling us about aliases, but it
|
||||||
|
* can't know about every screwy device. If we don't have an IVRS
|
||||||
|
* reported alias, use the PCI reported alias. In that case we may
|
||||||
|
* still need to initialize the rlookup and dev_table entries if the
|
||||||
|
* alias is to a non-existent device.
|
||||||
|
*/
|
||||||
|
if (ivrs_alias == devid) {
|
||||||
|
if (!amd_iommu_rlookup_table[pci_alias]) {
|
||||||
|
amd_iommu_rlookup_table[pci_alias] =
|
||||||
|
amd_iommu_rlookup_table[devid];
|
||||||
|
memcpy(amd_iommu_dev_table[pci_alias].data,
|
||||||
|
amd_iommu_dev_table[devid].data,
|
||||||
|
sizeof(amd_iommu_dev_table[pci_alias].data));
|
||||||
|
}
|
||||||
|
|
||||||
|
return pci_alias;
|
||||||
|
}
|
||||||
|
|
||||||
|
pr_info("AMD-Vi: Using IVRS reported alias %02x:%02x.%d "
|
||||||
|
"for device %s[%04x:%04x], kernel reported alias "
|
||||||
|
"%02x:%02x.%d\n", PCI_BUS_NUM(ivrs_alias), PCI_SLOT(ivrs_alias),
|
||||||
|
PCI_FUNC(ivrs_alias), dev_name(dev), pdev->vendor, pdev->device,
|
||||||
|
PCI_BUS_NUM(pci_alias), PCI_SLOT(pci_alias),
|
||||||
|
PCI_FUNC(pci_alias));
|
||||||
|
|
||||||
|
/*
|
||||||
|
* If we don't have a PCI DMA alias and the IVRS alias is on the same
|
||||||
|
* bus, then the IVRS table may know about a quirk that we don't.
|
||||||
|
*/
|
||||||
|
if (pci_alias == devid &&
|
||||||
|
PCI_BUS_NUM(ivrs_alias) == pdev->bus->number) {
|
||||||
|
pdev->dev_flags |= PCI_DEV_FLAGS_DMA_ALIAS_DEVFN;
|
||||||
|
pdev->dma_alias_devfn = ivrs_alias & 0xff;
|
||||||
|
pr_info("AMD-Vi: Added PCI DMA alias %02x.%d for %s\n",
|
||||||
|
PCI_SLOT(ivrs_alias), PCI_FUNC(ivrs_alias),
|
||||||
|
dev_name(dev));
|
||||||
|
}
|
||||||
|
|
||||||
|
return ivrs_alias;
|
||||||
|
}
|
||||||
|
|
||||||
static struct iommu_dev_data *find_dev_data(u16 devid)
|
static struct iommu_dev_data *find_dev_data(u16 devid)
|
||||||
{
|
{
|
||||||
struct iommu_dev_data *dev_data;
|
struct iommu_dev_data *dev_data;
|
||||||
|
@ -215,13 +285,6 @@ static struct iommu_dev_data *find_dev_data(u16 devid)
|
||||||
return dev_data;
|
return dev_data;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline u16 get_device_id(struct device *dev)
|
|
||||||
{
|
|
||||||
struct pci_dev *pdev = to_pci_dev(dev);
|
|
||||||
|
|
||||||
return PCI_DEVID(pdev->bus->number, pdev->devfn);
|
|
||||||
}
|
|
||||||
|
|
||||||
static struct iommu_dev_data *get_dev_data(struct device *dev)
|
static struct iommu_dev_data *get_dev_data(struct device *dev)
|
||||||
{
|
{
|
||||||
return dev->archdata.iommu;
|
return dev->archdata.iommu;
|
||||||
|
@ -349,6 +412,8 @@ static int iommu_init_device(struct device *dev)
|
||||||
if (!dev_data)
|
if (!dev_data)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
|
dev_data->alias = get_alias(dev);
|
||||||
|
|
||||||
if (pci_iommuv2_capable(pdev)) {
|
if (pci_iommuv2_capable(pdev)) {
|
||||||
struct amd_iommu *iommu;
|
struct amd_iommu *iommu;
|
||||||
|
|
||||||
|
@ -369,7 +434,7 @@ static void iommu_ignore_device(struct device *dev)
|
||||||
u16 devid, alias;
|
u16 devid, alias;
|
||||||
|
|
||||||
devid = get_device_id(dev);
|
devid = get_device_id(dev);
|
||||||
alias = amd_iommu_alias_table[devid];
|
alias = get_alias(dev);
|
||||||
|
|
||||||
memset(&amd_iommu_dev_table[devid], 0, sizeof(struct dev_table_entry));
|
memset(&amd_iommu_dev_table[devid], 0, sizeof(struct dev_table_entry));
|
||||||
memset(&amd_iommu_dev_table[alias], 0, sizeof(struct dev_table_entry));
|
memset(&amd_iommu_dev_table[alias], 0, sizeof(struct dev_table_entry));
|
||||||
|
@ -1061,7 +1126,7 @@ static int device_flush_dte(struct iommu_dev_data *dev_data)
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
iommu = amd_iommu_rlookup_table[dev_data->devid];
|
iommu = amd_iommu_rlookup_table[dev_data->devid];
|
||||||
alias = amd_iommu_alias_table[dev_data->devid];
|
alias = dev_data->alias;
|
||||||
|
|
||||||
ret = iommu_flush_dte(iommu, dev_data->devid);
|
ret = iommu_flush_dte(iommu, dev_data->devid);
|
||||||
if (!ret && alias != dev_data->devid)
|
if (!ret && alias != dev_data->devid)
|
||||||
|
@ -2039,7 +2104,7 @@ static void do_attach(struct iommu_dev_data *dev_data,
|
||||||
bool ats;
|
bool ats;
|
||||||
|
|
||||||
iommu = amd_iommu_rlookup_table[dev_data->devid];
|
iommu = amd_iommu_rlookup_table[dev_data->devid];
|
||||||
alias = amd_iommu_alias_table[dev_data->devid];
|
alias = dev_data->alias;
|
||||||
ats = dev_data->ats.enabled;
|
ats = dev_data->ats.enabled;
|
||||||
|
|
||||||
/* Update data structures */
|
/* Update data structures */
|
||||||
|
@ -2073,7 +2138,7 @@ static void do_detach(struct iommu_dev_data *dev_data)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
iommu = amd_iommu_rlookup_table[dev_data->devid];
|
iommu = amd_iommu_rlookup_table[dev_data->devid];
|
||||||
alias = amd_iommu_alias_table[dev_data->devid];
|
alias = dev_data->alias;
|
||||||
|
|
||||||
/* decrease reference counters */
|
/* decrease reference counters */
|
||||||
dev_data->domain->dev_iommu[iommu->index] -= 1;
|
dev_data->domain->dev_iommu[iommu->index] -= 1;
|
||||||
|
|
|
@ -826,6 +826,12 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
|
||||||
if (smmu_domain->smmu)
|
if (smmu_domain->smmu)
|
||||||
goto out_unlock;
|
goto out_unlock;
|
||||||
|
|
||||||
|
/* We're bypassing these SIDs, so don't allocate an actual context */
|
||||||
|
if (domain->type == IOMMU_DOMAIN_DMA) {
|
||||||
|
smmu_domain->smmu = smmu;
|
||||||
|
goto out_unlock;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Mapping the requested stage onto what we support is surprisingly
|
* Mapping the requested stage onto what we support is surprisingly
|
||||||
* complicated, mainly because the spec allows S1+S2 SMMUs without
|
* complicated, mainly because the spec allows S1+S2 SMMUs without
|
||||||
|
@ -948,7 +954,7 @@ static void arm_smmu_destroy_domain_context(struct iommu_domain *domain)
|
||||||
void __iomem *cb_base;
|
void __iomem *cb_base;
|
||||||
int irq;
|
int irq;
|
||||||
|
|
||||||
if (!smmu)
|
if (!smmu || domain->type == IOMMU_DOMAIN_DMA)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -1089,18 +1095,20 @@ static int arm_smmu_domain_add_master(struct arm_smmu_domain *smmu_domain,
|
||||||
struct arm_smmu_device *smmu = smmu_domain->smmu;
|
struct arm_smmu_device *smmu = smmu_domain->smmu;
|
||||||
void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
|
void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* FIXME: This won't be needed once we have IOMMU-backed DMA ops
|
||||||
|
* for all devices behind the SMMU. Note that we need to take
|
||||||
|
* care configuring SMRs for devices both a platform_device and
|
||||||
|
* and a PCI device (i.e. a PCI host controller)
|
||||||
|
*/
|
||||||
|
if (smmu_domain->domain.type == IOMMU_DOMAIN_DMA)
|
||||||
|
return 0;
|
||||||
|
|
||||||
/* Devices in an IOMMU group may already be configured */
|
/* Devices in an IOMMU group may already be configured */
|
||||||
ret = arm_smmu_master_configure_smrs(smmu, cfg);
|
ret = arm_smmu_master_configure_smrs(smmu, cfg);
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret == -EEXIST ? 0 : ret;
|
return ret == -EEXIST ? 0 : ret;
|
||||||
|
|
||||||
/*
|
|
||||||
* FIXME: This won't be needed once we have IOMMU-backed DMA ops
|
|
||||||
* for all devices behind the SMMU.
|
|
||||||
*/
|
|
||||||
if (smmu_domain->domain.type == IOMMU_DOMAIN_DMA)
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
for (i = 0; i < cfg->num_streamids; ++i) {
|
for (i = 0; i < cfg->num_streamids; ++i) {
|
||||||
u32 idx, s2cr;
|
u32 idx, s2cr;
|
||||||
|
|
||||||
|
|
|
@ -467,7 +467,7 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *cpumask,
|
||||||
gic_map_to_vpe(irq, mips_cm_vp_id(cpumask_first(&tmp)));
|
gic_map_to_vpe(irq, mips_cm_vp_id(cpumask_first(&tmp)));
|
||||||
|
|
||||||
/* Update the pcpu_masks */
|
/* Update the pcpu_masks */
|
||||||
for (i = 0; i < gic_vpes; i++)
|
for (i = 0; i < min(gic_vpes, NR_CPUS); i++)
|
||||||
clear_bit(irq, pcpu_masks[i].pcpu_mask);
|
clear_bit(irq, pcpu_masks[i].pcpu_mask);
|
||||||
set_bit(irq, pcpu_masks[cpumask_first(&tmp)].pcpu_mask);
|
set_bit(irq, pcpu_masks[cpumask_first(&tmp)].pcpu_mask);
|
||||||
|
|
||||||
|
@ -707,7 +707,7 @@ static int gic_shared_irq_domain_map(struct irq_domain *d, unsigned int virq,
|
||||||
spin_lock_irqsave(&gic_lock, flags);
|
spin_lock_irqsave(&gic_lock, flags);
|
||||||
gic_map_to_pin(intr, gic_cpu_pin);
|
gic_map_to_pin(intr, gic_cpu_pin);
|
||||||
gic_map_to_vpe(intr, vpe);
|
gic_map_to_vpe(intr, vpe);
|
||||||
for (i = 0; i < gic_vpes; i++)
|
for (i = 0; i < min(gic_vpes, NR_CPUS); i++)
|
||||||
clear_bit(intr, pcpu_masks[i].pcpu_mask);
|
clear_bit(intr, pcpu_masks[i].pcpu_mask);
|
||||||
set_bit(intr, pcpu_masks[vpe].pcpu_mask);
|
set_bit(intr, pcpu_masks[vpe].pcpu_mask);
|
||||||
spin_unlock_irqrestore(&gic_lock, flags);
|
spin_unlock_irqrestore(&gic_lock, flags);
|
||||||
|
|
|
@ -715,6 +715,9 @@ base_sock_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
|
||||||
if (!maddr || maddr->family != AF_ISDN)
|
if (!maddr || maddr->family != AF_ISDN)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
|
if (addr_len < sizeof(struct sockaddr_mISDN))
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
lock_sock(sk);
|
lock_sock(sk);
|
||||||
|
|
||||||
if (_pms(sk)->dev) {
|
if (_pms(sk)->dev) {
|
||||||
|
|
|
@ -195,6 +195,7 @@ config GENEVE
|
||||||
|
|
||||||
config MACSEC
|
config MACSEC
|
||||||
tristate "IEEE 802.1AE MAC-level encryption (MACsec)"
|
tristate "IEEE 802.1AE MAC-level encryption (MACsec)"
|
||||||
|
select CRYPTO
|
||||||
select CRYPTO_AES
|
select CRYPTO_AES
|
||||||
select CRYPTO_GCM
|
select CRYPTO_GCM
|
||||||
---help---
|
---help---
|
||||||
|
|
|
@ -2181,27 +2181,10 @@ int mv88e6xxx_port_bridge_join(struct dsa_switch *ds, int port,
|
||||||
struct net_device *bridge)
|
struct net_device *bridge)
|
||||||
{
|
{
|
||||||
struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
|
struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
|
||||||
u16 fid;
|
|
||||||
int i, err;
|
int i, err;
|
||||||
|
|
||||||
mutex_lock(&ps->smi_mutex);
|
mutex_lock(&ps->smi_mutex);
|
||||||
|
|
||||||
/* Get or create the bridge FID and assign it to the port */
|
|
||||||
for (i = 0; i < ps->num_ports; ++i)
|
|
||||||
if (ps->ports[i].bridge_dev == bridge)
|
|
||||||
break;
|
|
||||||
|
|
||||||
if (i < ps->num_ports)
|
|
||||||
err = _mv88e6xxx_port_fid_get(ds, i, &fid);
|
|
||||||
else
|
|
||||||
err = _mv88e6xxx_fid_new(ds, &fid);
|
|
||||||
if (err)
|
|
||||||
goto unlock;
|
|
||||||
|
|
||||||
err = _mv88e6xxx_port_fid_set(ds, port, fid);
|
|
||||||
if (err)
|
|
||||||
goto unlock;
|
|
||||||
|
|
||||||
/* Assign the bridge and remap each port's VLANTable */
|
/* Assign the bridge and remap each port's VLANTable */
|
||||||
ps->ports[port].bridge_dev = bridge;
|
ps->ports[port].bridge_dev = bridge;
|
||||||
|
|
||||||
|
@ -2213,7 +2196,6 @@ int mv88e6xxx_port_bridge_join(struct dsa_switch *ds, int port,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
unlock:
|
|
||||||
mutex_unlock(&ps->smi_mutex);
|
mutex_unlock(&ps->smi_mutex);
|
||||||
|
|
||||||
return err;
|
return err;
|
||||||
|
@ -2223,16 +2205,10 @@ void mv88e6xxx_port_bridge_leave(struct dsa_switch *ds, int port)
|
||||||
{
|
{
|
||||||
struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
|
struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
|
||||||
struct net_device *bridge = ps->ports[port].bridge_dev;
|
struct net_device *bridge = ps->ports[port].bridge_dev;
|
||||||
u16 fid;
|
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
mutex_lock(&ps->smi_mutex);
|
mutex_lock(&ps->smi_mutex);
|
||||||
|
|
||||||
/* Give the port a fresh Filtering Information Database */
|
|
||||||
if (_mv88e6xxx_fid_new(ds, &fid) ||
|
|
||||||
_mv88e6xxx_port_fid_set(ds, port, fid))
|
|
||||||
netdev_warn(ds->ports[port], "failed to assign a new FID\n");
|
|
||||||
|
|
||||||
/* Unassign the bridge and remap each port's VLANTable */
|
/* Unassign the bridge and remap each port's VLANTable */
|
||||||
ps->ports[port].bridge_dev = NULL;
|
ps->ports[port].bridge_dev = NULL;
|
||||||
|
|
||||||
|
@ -2476,9 +2452,9 @@ static int mv88e6xxx_setup_port(struct dsa_switch *ds, int port)
|
||||||
* the other bits clear.
|
* the other bits clear.
|
||||||
*/
|
*/
|
||||||
reg = 1 << port;
|
reg = 1 << port;
|
||||||
/* Disable learning for DSA and CPU ports */
|
/* Disable learning for CPU port */
|
||||||
if (dsa_is_cpu_port(ds, port) || dsa_is_dsa_port(ds, port))
|
if (dsa_is_cpu_port(ds, port))
|
||||||
reg = PORT_ASSOC_VECTOR_LOCKED_PORT;
|
reg = 0;
|
||||||
|
|
||||||
ret = _mv88e6xxx_reg_write(ds, REG_PORT(port), PORT_ASSOC_VECTOR, reg);
|
ret = _mv88e6xxx_reg_write(ds, REG_PORT(port), PORT_ASSOC_VECTOR, reg);
|
||||||
if (ret)
|
if (ret)
|
||||||
|
@ -2558,11 +2534,11 @@ static int mv88e6xxx_setup_port(struct dsa_switch *ds, int port)
|
||||||
if (ret)
|
if (ret)
|
||||||
goto abort;
|
goto abort;
|
||||||
|
|
||||||
/* Port based VLAN map: give each port its own address
|
/* Port based VLAN map: give each port the same default address
|
||||||
* database, and allow bidirectional communication between the
|
* database, and allow bidirectional communication between the
|
||||||
* CPU and DSA port(s), and the other ports.
|
* CPU and DSA port(s), and the other ports.
|
||||||
*/
|
*/
|
||||||
ret = _mv88e6xxx_port_fid_set(ds, port, port + 1);
|
ret = _mv88e6xxx_port_fid_set(ds, port, 0);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto abort;
|
goto abort;
|
||||||
|
|
||||||
|
|
|
@ -1412,7 +1412,7 @@ static int atl2_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
||||||
|
|
||||||
err = -EIO;
|
err = -EIO;
|
||||||
|
|
||||||
netdev->hw_features = NETIF_F_SG | NETIF_F_HW_VLAN_CTAG_RX;
|
netdev->hw_features = NETIF_F_HW_VLAN_CTAG_RX;
|
||||||
netdev->features |= (NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX);
|
netdev->features |= (NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX);
|
||||||
|
|
||||||
/* Init PHY as early as possible due to power saving issue */
|
/* Init PHY as early as possible due to power saving issue */
|
||||||
|
|
|
@ -1572,6 +1572,11 @@ static int bgmac_probe(struct bcma_device *core)
|
||||||
dev_warn(&core->dev, "Using random MAC: %pM\n", mac);
|
dev_warn(&core->dev, "Using random MAC: %pM\n", mac);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* This (reset &) enable is not preset in specs or reference driver but
|
||||||
|
* Broadcom does it in arch PCI code when enabling fake PCI device.
|
||||||
|
*/
|
||||||
|
bcma_core_enable(core, 0);
|
||||||
|
|
||||||
/* Allocation and references */
|
/* Allocation and references */
|
||||||
net_dev = alloc_etherdev(sizeof(*bgmac));
|
net_dev = alloc_etherdev(sizeof(*bgmac));
|
||||||
if (!net_dev)
|
if (!net_dev)
|
||||||
|
|
|
@ -199,9 +199,9 @@
|
||||||
#define BGMAC_CMDCFG_TAI 0x00000200
|
#define BGMAC_CMDCFG_TAI 0x00000200
|
||||||
#define BGMAC_CMDCFG_HD 0x00000400 /* Set if in half duplex mode */
|
#define BGMAC_CMDCFG_HD 0x00000400 /* Set if in half duplex mode */
|
||||||
#define BGMAC_CMDCFG_HD_SHIFT 10
|
#define BGMAC_CMDCFG_HD_SHIFT 10
|
||||||
#define BGMAC_CMDCFG_SR_REV0 0x00000800 /* Set to reset mode, for other revs */
|
#define BGMAC_CMDCFG_SR_REV0 0x00000800 /* Set to reset mode, for core rev 0-3 */
|
||||||
#define BGMAC_CMDCFG_SR_REV4 0x00002000 /* Set to reset mode, only for core rev 4 */
|
#define BGMAC_CMDCFG_SR_REV4 0x00002000 /* Set to reset mode, for core rev >= 4 */
|
||||||
#define BGMAC_CMDCFG_SR(rev) ((rev == 4) ? BGMAC_CMDCFG_SR_REV4 : BGMAC_CMDCFG_SR_REV0)
|
#define BGMAC_CMDCFG_SR(rev) ((rev >= 4) ? BGMAC_CMDCFG_SR_REV4 : BGMAC_CMDCFG_SR_REV0)
|
||||||
#define BGMAC_CMDCFG_ML 0x00008000 /* Set to activate mac loopback mode */
|
#define BGMAC_CMDCFG_ML 0x00008000 /* Set to activate mac loopback mode */
|
||||||
#define BGMAC_CMDCFG_AE 0x00400000
|
#define BGMAC_CMDCFG_AE 0x00400000
|
||||||
#define BGMAC_CMDCFG_CFE 0x00800000
|
#define BGMAC_CMDCFG_CFE 0x00800000
|
||||||
|
|
|
@ -878,7 +878,11 @@ static void bcmgenet_get_ethtool_stats(struct net_device *dev,
|
||||||
else
|
else
|
||||||
p = (char *)priv;
|
p = (char *)priv;
|
||||||
p += s->stat_offset;
|
p += s->stat_offset;
|
||||||
data[i] = *(u32 *)p;
|
if (sizeof(unsigned long) != sizeof(u32) &&
|
||||||
|
s->stat_sizeof == sizeof(unsigned long))
|
||||||
|
data[i] = *(unsigned long *)p;
|
||||||
|
else
|
||||||
|
data[i] = *(u32 *)p;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1011,10 +1011,11 @@ static int bgx_init_of_phy(struct bgx *bgx)
|
||||||
}
|
}
|
||||||
|
|
||||||
lmac++;
|
lmac++;
|
||||||
if (lmac == MAX_LMAC_PER_BGX)
|
if (lmac == MAX_LMAC_PER_BGX) {
|
||||||
|
of_node_put(node);
|
||||||
break;
|
break;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
of_node_put(node);
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
defer:
|
defer:
|
||||||
|
|
|
@ -1451,6 +1451,9 @@ int t4_mdio_rd(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
|
||||||
unsigned int mmd, unsigned int reg, u16 *valp);
|
unsigned int mmd, unsigned int reg, u16 *valp);
|
||||||
int t4_mdio_wr(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
|
int t4_mdio_wr(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
|
||||||
unsigned int mmd, unsigned int reg, u16 val);
|
unsigned int mmd, unsigned int reg, u16 val);
|
||||||
|
int t4_iq_stop(struct adapter *adap, unsigned int mbox, unsigned int pf,
|
||||||
|
unsigned int vf, unsigned int iqtype, unsigned int iqid,
|
||||||
|
unsigned int fl0id, unsigned int fl1id);
|
||||||
int t4_iq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
|
int t4_iq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
|
||||||
unsigned int vf, unsigned int iqtype, unsigned int iqid,
|
unsigned int vf, unsigned int iqtype, unsigned int iqid,
|
||||||
unsigned int fl0id, unsigned int fl1id);
|
unsigned int fl0id, unsigned int fl1id);
|
||||||
|
|
|
@ -2981,14 +2981,28 @@ void t4_free_ofld_rxqs(struct adapter *adap, int n, struct sge_ofld_rxq *q)
|
||||||
void t4_free_sge_resources(struct adapter *adap)
|
void t4_free_sge_resources(struct adapter *adap)
|
||||||
{
|
{
|
||||||
int i;
|
int i;
|
||||||
struct sge_eth_rxq *eq = adap->sge.ethrxq;
|
struct sge_eth_rxq *eq;
|
||||||
struct sge_eth_txq *etq = adap->sge.ethtxq;
|
struct sge_eth_txq *etq;
|
||||||
|
|
||||||
|
/* stop all Rx queues in order to start them draining */
|
||||||
|
for (i = 0; i < adap->sge.ethqsets; i++) {
|
||||||
|
eq = &adap->sge.ethrxq[i];
|
||||||
|
if (eq->rspq.desc)
|
||||||
|
t4_iq_stop(adap, adap->mbox, adap->pf, 0,
|
||||||
|
FW_IQ_TYPE_FL_INT_CAP,
|
||||||
|
eq->rspq.cntxt_id,
|
||||||
|
eq->fl.size ? eq->fl.cntxt_id : 0xffff,
|
||||||
|
0xffff);
|
||||||
|
}
|
||||||
|
|
||||||
/* clean up Ethernet Tx/Rx queues */
|
/* clean up Ethernet Tx/Rx queues */
|
||||||
for (i = 0; i < adap->sge.ethqsets; i++, eq++, etq++) {
|
for (i = 0; i < adap->sge.ethqsets; i++) {
|
||||||
|
eq = &adap->sge.ethrxq[i];
|
||||||
if (eq->rspq.desc)
|
if (eq->rspq.desc)
|
||||||
free_rspq_fl(adap, &eq->rspq,
|
free_rspq_fl(adap, &eq->rspq,
|
||||||
eq->fl.size ? &eq->fl : NULL);
|
eq->fl.size ? &eq->fl : NULL);
|
||||||
|
|
||||||
|
etq = &adap->sge.ethtxq[i];
|
||||||
if (etq->q.desc) {
|
if (etq->q.desc) {
|
||||||
t4_eth_eq_free(adap, adap->mbox, adap->pf, 0,
|
t4_eth_eq_free(adap, adap->mbox, adap->pf, 0,
|
||||||
etq->q.cntxt_id);
|
etq->q.cntxt_id);
|
||||||
|
|
|
@ -2557,6 +2557,7 @@ void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size)
|
||||||
}
|
}
|
||||||
|
|
||||||
#define EEPROM_STAT_ADDR 0x7bfc
|
#define EEPROM_STAT_ADDR 0x7bfc
|
||||||
|
#define VPD_SIZE 0x800
|
||||||
#define VPD_BASE 0x400
|
#define VPD_BASE 0x400
|
||||||
#define VPD_BASE_OLD 0
|
#define VPD_BASE_OLD 0
|
||||||
#define VPD_LEN 1024
|
#define VPD_LEN 1024
|
||||||
|
@ -2594,6 +2595,15 @@ int t4_get_raw_vpd_params(struct adapter *adapter, struct vpd_params *p)
|
||||||
if (!vpd)
|
if (!vpd)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
|
/* We have two VPD data structures stored in the adapter VPD area.
|
||||||
|
* By default, Linux calculates the size of the VPD area by traversing
|
||||||
|
* the first VPD area at offset 0x0, so we need to tell the OS what
|
||||||
|
* our real VPD size is.
|
||||||
|
*/
|
||||||
|
ret = pci_set_vpd_size(adapter->pdev, VPD_SIZE);
|
||||||
|
if (ret < 0)
|
||||||
|
goto out;
|
||||||
|
|
||||||
/* Card information normally starts at VPD_BASE but early cards had
|
/* Card information normally starts at VPD_BASE but early cards had
|
||||||
* it at 0.
|
* it at 0.
|
||||||
*/
|
*/
|
||||||
|
@ -6939,6 +6949,39 @@ int t4_identify_port(struct adapter *adap, unsigned int mbox, unsigned int viid,
|
||||||
return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
|
return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* t4_iq_stop - stop an ingress queue and its FLs
|
||||||
|
* @adap: the adapter
|
||||||
|
* @mbox: mailbox to use for the FW command
|
||||||
|
* @pf: the PF owning the queues
|
||||||
|
* @vf: the VF owning the queues
|
||||||
|
* @iqtype: the ingress queue type (FW_IQ_TYPE_FL_INT_CAP, etc.)
|
||||||
|
* @iqid: ingress queue id
|
||||||
|
* @fl0id: FL0 queue id or 0xffff if no attached FL0
|
||||||
|
* @fl1id: FL1 queue id or 0xffff if no attached FL1
|
||||||
|
*
|
||||||
|
* Stops an ingress queue and its associated FLs, if any. This causes
|
||||||
|
* any current or future data/messages destined for these queues to be
|
||||||
|
* tossed.
|
||||||
|
*/
|
||||||
|
int t4_iq_stop(struct adapter *adap, unsigned int mbox, unsigned int pf,
|
||||||
|
unsigned int vf, unsigned int iqtype, unsigned int iqid,
|
||||||
|
unsigned int fl0id, unsigned int fl1id)
|
||||||
|
{
|
||||||
|
struct fw_iq_cmd c;
|
||||||
|
|
||||||
|
memset(&c, 0, sizeof(c));
|
||||||
|
c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_IQ_CMD) | FW_CMD_REQUEST_F |
|
||||||
|
FW_CMD_EXEC_F | FW_IQ_CMD_PFN_V(pf) |
|
||||||
|
FW_IQ_CMD_VFN_V(vf));
|
||||||
|
c.alloc_to_len16 = cpu_to_be32(FW_IQ_CMD_IQSTOP_F | FW_LEN16(c));
|
||||||
|
c.type_to_iqandstindex = cpu_to_be32(FW_IQ_CMD_TYPE_V(iqtype));
|
||||||
|
c.iqid = cpu_to_be16(iqid);
|
||||||
|
c.fl0id = cpu_to_be16(fl0id);
|
||||||
|
c.fl1id = cpu_to_be16(fl1id);
|
||||||
|
return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* t4_iq_free - free an ingress queue and its FLs
|
* t4_iq_free - free an ingress queue and its FLs
|
||||||
* @adap: the adapter
|
* @adap: the adapter
|
||||||
|
|
|
@ -1223,18 +1223,32 @@ s32 fm10k_iov_msg_mac_vlan_pf(struct fm10k_hw *hw, u32 **results,
|
||||||
if (err)
|
if (err)
|
||||||
return err;
|
return err;
|
||||||
|
|
||||||
/* verify upper 16 bits are zero */
|
|
||||||
if (vid >> 16)
|
|
||||||
return FM10K_ERR_PARAM;
|
|
||||||
|
|
||||||
set = !(vid & FM10K_VLAN_CLEAR);
|
set = !(vid & FM10K_VLAN_CLEAR);
|
||||||
vid &= ~FM10K_VLAN_CLEAR;
|
vid &= ~FM10K_VLAN_CLEAR;
|
||||||
|
|
||||||
err = fm10k_iov_select_vid(vf_info, (u16)vid);
|
/* if the length field has been set, this is a multi-bit
|
||||||
if (err < 0)
|
* update request. For multi-bit requests, simply disallow
|
||||||
return err;
|
* them when the pf_vid has been set. In this case, the PF
|
||||||
|
* should have already cleared the VLAN_TABLE, and if we
|
||||||
|
* allowed them, it could allow a rogue VF to receive traffic
|
||||||
|
* on a VLAN it was not assigned. In the single-bit case, we
|
||||||
|
* need to modify requests for VLAN 0 to use the default PF or
|
||||||
|
* SW vid when assigned.
|
||||||
|
*/
|
||||||
|
|
||||||
vid = err;
|
if (vid >> 16) {
|
||||||
|
/* prevent multi-bit requests when PF has
|
||||||
|
* administratively set the VLAN for this VF
|
||||||
|
*/
|
||||||
|
if (vf_info->pf_vid)
|
||||||
|
return FM10K_ERR_PARAM;
|
||||||
|
} else {
|
||||||
|
err = fm10k_iov_select_vid(vf_info, (u16)vid);
|
||||||
|
if (err < 0)
|
||||||
|
return err;
|
||||||
|
|
||||||
|
vid = err;
|
||||||
|
}
|
||||||
|
|
||||||
/* update VSI info for VF in regards to VLAN table */
|
/* update VSI info for VF in regards to VLAN table */
|
||||||
err = hw->mac.ops.update_vlan(hw, vid, vf_info->vsi, set);
|
err = hw->mac.ops.update_vlan(hw, vid, vf_info->vsi, set);
|
||||||
|
|
|
@ -2594,35 +2594,34 @@ int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* __i40e_chk_linearize - Check if there are more than 8 fragments per packet
|
* __i40e_chk_linearize - Check if there are more than 8 buffers per packet
|
||||||
* @skb: send buffer
|
* @skb: send buffer
|
||||||
*
|
*
|
||||||
* Note: Our HW can't scatter-gather more than 8 fragments to build
|
* Note: Our HW can't DMA more than 8 buffers to build a packet on the wire
|
||||||
* a packet on the wire and so we need to figure out the cases where we
|
* and so we need to figure out the cases where we need to linearize the skb.
|
||||||
* need to linearize the skb.
|
*
|
||||||
|
* For TSO we need to count the TSO header and segment payload separately.
|
||||||
|
* As such we need to check cases where we have 7 fragments or more as we
|
||||||
|
* can potentially require 9 DMA transactions, 1 for the TSO header, 1 for
|
||||||
|
* the segment payload in the first descriptor, and another 7 for the
|
||||||
|
* fragments.
|
||||||
**/
|
**/
|
||||||
bool __i40e_chk_linearize(struct sk_buff *skb)
|
bool __i40e_chk_linearize(struct sk_buff *skb)
|
||||||
{
|
{
|
||||||
const struct skb_frag_struct *frag, *stale;
|
const struct skb_frag_struct *frag, *stale;
|
||||||
int gso_size, nr_frags, sum;
|
int nr_frags, sum;
|
||||||
|
|
||||||
/* check to see if TSO is enabled, if so we may get a repreive */
|
/* no need to check if number of frags is less than 7 */
|
||||||
gso_size = skb_shinfo(skb)->gso_size;
|
|
||||||
if (unlikely(!gso_size))
|
|
||||||
return true;
|
|
||||||
|
|
||||||
/* no need to check if number of frags is less than 8 */
|
|
||||||
nr_frags = skb_shinfo(skb)->nr_frags;
|
nr_frags = skb_shinfo(skb)->nr_frags;
|
||||||
if (nr_frags < I40E_MAX_BUFFER_TXD)
|
if (nr_frags < (I40E_MAX_BUFFER_TXD - 1))
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
/* We need to walk through the list and validate that each group
|
/* We need to walk through the list and validate that each group
|
||||||
* of 6 fragments totals at least gso_size. However we don't need
|
* of 6 fragments totals at least gso_size. However we don't need
|
||||||
* to perform such validation on the first or last 6 since the first
|
* to perform such validation on the last 6 since the last 6 cannot
|
||||||
* 6 cannot inherit any data from a descriptor before them, and the
|
* inherit any data from a descriptor after them.
|
||||||
* last 6 cannot inherit any data from a descriptor after them.
|
|
||||||
*/
|
*/
|
||||||
nr_frags -= I40E_MAX_BUFFER_TXD - 1;
|
nr_frags -= I40E_MAX_BUFFER_TXD - 2;
|
||||||
frag = &skb_shinfo(skb)->frags[0];
|
frag = &skb_shinfo(skb)->frags[0];
|
||||||
|
|
||||||
/* Initialize size to the negative value of gso_size minus 1. We
|
/* Initialize size to the negative value of gso_size minus 1. We
|
||||||
|
@ -2631,21 +2630,21 @@ bool __i40e_chk_linearize(struct sk_buff *skb)
|
||||||
* descriptors for a single transmit as the header and previous
|
* descriptors for a single transmit as the header and previous
|
||||||
* fragment are already consuming 2 descriptors.
|
* fragment are already consuming 2 descriptors.
|
||||||
*/
|
*/
|
||||||
sum = 1 - gso_size;
|
sum = 1 - skb_shinfo(skb)->gso_size;
|
||||||
|
|
||||||
/* Add size of frags 1 through 5 to create our initial sum */
|
/* Add size of frags 0 through 4 to create our initial sum */
|
||||||
sum += skb_frag_size(++frag);
|
sum += skb_frag_size(frag++);
|
||||||
sum += skb_frag_size(++frag);
|
sum += skb_frag_size(frag++);
|
||||||
sum += skb_frag_size(++frag);
|
sum += skb_frag_size(frag++);
|
||||||
sum += skb_frag_size(++frag);
|
sum += skb_frag_size(frag++);
|
||||||
sum += skb_frag_size(++frag);
|
sum += skb_frag_size(frag++);
|
||||||
|
|
||||||
/* Walk through fragments adding latest fragment, testing it, and
|
/* Walk through fragments adding latest fragment, testing it, and
|
||||||
* then removing stale fragments from the sum.
|
* then removing stale fragments from the sum.
|
||||||
*/
|
*/
|
||||||
stale = &skb_shinfo(skb)->frags[0];
|
stale = &skb_shinfo(skb)->frags[0];
|
||||||
for (;;) {
|
for (;;) {
|
||||||
sum += skb_frag_size(++frag);
|
sum += skb_frag_size(frag++);
|
||||||
|
|
||||||
/* if sum is negative we failed to make sufficient progress */
|
/* if sum is negative we failed to make sufficient progress */
|
||||||
if (sum < 0)
|
if (sum < 0)
|
||||||
|
@ -2655,7 +2654,7 @@ bool __i40e_chk_linearize(struct sk_buff *skb)
|
||||||
if (!--nr_frags)
|
if (!--nr_frags)
|
||||||
break;
|
break;
|
||||||
|
|
||||||
sum -= skb_frag_size(++stale);
|
sum -= skb_frag_size(stale++);
|
||||||
}
|
}
|
||||||
|
|
||||||
return false;
|
return false;
|
||||||
|
|
|
@ -413,10 +413,14 @@ static inline int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
|
||||||
**/
|
**/
|
||||||
static inline bool i40e_chk_linearize(struct sk_buff *skb, int count)
|
static inline bool i40e_chk_linearize(struct sk_buff *skb, int count)
|
||||||
{
|
{
|
||||||
/* we can only support up to 8 data buffers for a single send */
|
/* Both TSO and single send will work if count is less than 8 */
|
||||||
if (likely(count <= I40E_MAX_BUFFER_TXD))
|
if (likely(count < I40E_MAX_BUFFER_TXD))
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
return __i40e_chk_linearize(skb);
|
if (skb_is_gso(skb))
|
||||||
|
return __i40e_chk_linearize(skb);
|
||||||
|
|
||||||
|
/* we can support up to 8 data buffers for a single send */
|
||||||
|
return count != I40E_MAX_BUFFER_TXD;
|
||||||
}
|
}
|
||||||
#endif /* _I40E_TXRX_H_ */
|
#endif /* _I40E_TXRX_H_ */
|
||||||
|
|
|
@ -1796,35 +1796,34 @@ static void i40e_create_tx_ctx(struct i40e_ring *tx_ring,
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* __i40evf_chk_linearize - Check if there are more than 8 fragments per packet
|
* __i40evf_chk_linearize - Check if there are more than 8 buffers per packet
|
||||||
* @skb: send buffer
|
* @skb: send buffer
|
||||||
*
|
*
|
||||||
* Note: Our HW can't scatter-gather more than 8 fragments to build
|
* Note: Our HW can't DMA more than 8 buffers to build a packet on the wire
|
||||||
* a packet on the wire and so we need to figure out the cases where we
|
* and so we need to figure out the cases where we need to linearize the skb.
|
||||||
* need to linearize the skb.
|
*
|
||||||
|
* For TSO we need to count the TSO header and segment payload separately.
|
||||||
|
* As such we need to check cases where we have 7 fragments or more as we
|
||||||
|
* can potentially require 9 DMA transactions, 1 for the TSO header, 1 for
|
||||||
|
* the segment payload in the first descriptor, and another 7 for the
|
||||||
|
* fragments.
|
||||||
**/
|
**/
|
||||||
bool __i40evf_chk_linearize(struct sk_buff *skb)
|
bool __i40evf_chk_linearize(struct sk_buff *skb)
|
||||||
{
|
{
|
||||||
const struct skb_frag_struct *frag, *stale;
|
const struct skb_frag_struct *frag, *stale;
|
||||||
int gso_size, nr_frags, sum;
|
int nr_frags, sum;
|
||||||
|
|
||||||
/* check to see if TSO is enabled, if so we may get a repreive */
|
/* no need to check if number of frags is less than 7 */
|
||||||
gso_size = skb_shinfo(skb)->gso_size;
|
|
||||||
if (unlikely(!gso_size))
|
|
||||||
return true;
|
|
||||||
|
|
||||||
/* no need to check if number of frags is less than 8 */
|
|
||||||
nr_frags = skb_shinfo(skb)->nr_frags;
|
nr_frags = skb_shinfo(skb)->nr_frags;
|
||||||
if (nr_frags < I40E_MAX_BUFFER_TXD)
|
if (nr_frags < (I40E_MAX_BUFFER_TXD - 1))
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
/* We need to walk through the list and validate that each group
|
/* We need to walk through the list and validate that each group
|
||||||
* of 6 fragments totals at least gso_size. However we don't need
|
* of 6 fragments totals at least gso_size. However we don't need
|
||||||
* to perform such validation on the first or last 6 since the first
|
* to perform such validation on the last 6 since the last 6 cannot
|
||||||
* 6 cannot inherit any data from a descriptor before them, and the
|
* inherit any data from a descriptor after them.
|
||||||
* last 6 cannot inherit any data from a descriptor after them.
|
|
||||||
*/
|
*/
|
||||||
nr_frags -= I40E_MAX_BUFFER_TXD - 1;
|
nr_frags -= I40E_MAX_BUFFER_TXD - 2;
|
||||||
frag = &skb_shinfo(skb)->frags[0];
|
frag = &skb_shinfo(skb)->frags[0];
|
||||||
|
|
||||||
/* Initialize size to the negative value of gso_size minus 1. We
|
/* Initialize size to the negative value of gso_size minus 1. We
|
||||||
|
@ -1833,21 +1832,21 @@ bool __i40evf_chk_linearize(struct sk_buff *skb)
|
||||||
* descriptors for a single transmit as the header and previous
|
* descriptors for a single transmit as the header and previous
|
||||||
* fragment are already consuming 2 descriptors.
|
* fragment are already consuming 2 descriptors.
|
||||||
*/
|
*/
|
||||||
sum = 1 - gso_size;
|
sum = 1 - skb_shinfo(skb)->gso_size;
|
||||||
|
|
||||||
/* Add size of frags 1 through 5 to create our initial sum */
|
/* Add size of frags 0 through 4 to create our initial sum */
|
||||||
sum += skb_frag_size(++frag);
|
sum += skb_frag_size(frag++);
|
||||||
sum += skb_frag_size(++frag);
|
sum += skb_frag_size(frag++);
|
||||||
sum += skb_frag_size(++frag);
|
sum += skb_frag_size(frag++);
|
||||||
sum += skb_frag_size(++frag);
|
sum += skb_frag_size(frag++);
|
||||||
sum += skb_frag_size(++frag);
|
sum += skb_frag_size(frag++);
|
||||||
|
|
||||||
/* Walk through fragments adding latest fragment, testing it, and
|
/* Walk through fragments adding latest fragment, testing it, and
|
||||||
* then removing stale fragments from the sum.
|
* then removing stale fragments from the sum.
|
||||||
*/
|
*/
|
||||||
stale = &skb_shinfo(skb)->frags[0];
|
stale = &skb_shinfo(skb)->frags[0];
|
||||||
for (;;) {
|
for (;;) {
|
||||||
sum += skb_frag_size(++frag);
|
sum += skb_frag_size(frag++);
|
||||||
|
|
||||||
/* if sum is negative we failed to make sufficient progress */
|
/* if sum is negative we failed to make sufficient progress */
|
||||||
if (sum < 0)
|
if (sum < 0)
|
||||||
|
@ -1857,7 +1856,7 @@ bool __i40evf_chk_linearize(struct sk_buff *skb)
|
||||||
if (!--nr_frags)
|
if (!--nr_frags)
|
||||||
break;
|
break;
|
||||||
|
|
||||||
sum -= skb_frag_size(++stale);
|
sum -= skb_frag_size(stale++);
|
||||||
}
|
}
|
||||||
|
|
||||||
return false;
|
return false;
|
||||||
|
|
|
@ -395,10 +395,14 @@ static inline int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
|
||||||
**/
|
**/
|
||||||
static inline bool i40e_chk_linearize(struct sk_buff *skb, int count)
|
static inline bool i40e_chk_linearize(struct sk_buff *skb, int count)
|
||||||
{
|
{
|
||||||
/* we can only support up to 8 data buffers for a single send */
|
/* Both TSO and single send will work if count is less than 8 */
|
||||||
if (likely(count <= I40E_MAX_BUFFER_TXD))
|
if (likely(count < I40E_MAX_BUFFER_TXD))
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
return __i40evf_chk_linearize(skb);
|
if (skb_is_gso(skb))
|
||||||
|
return __i40evf_chk_linearize(skb);
|
||||||
|
|
||||||
|
/* we can support up to 8 data buffers for a single send */
|
||||||
|
return count != I40E_MAX_BUFFER_TXD;
|
||||||
}
|
}
|
||||||
#endif /* _I40E_TXRX_H_ */
|
#endif /* _I40E_TXRX_H_ */
|
||||||
|
|
|
@ -337,7 +337,7 @@ static int mlx4_en_get_sset_count(struct net_device *dev, int sset)
|
||||||
case ETH_SS_STATS:
|
case ETH_SS_STATS:
|
||||||
return bitmap_iterator_count(&it) +
|
return bitmap_iterator_count(&it) +
|
||||||
(priv->tx_ring_num * 2) +
|
(priv->tx_ring_num * 2) +
|
||||||
(priv->rx_ring_num * 2);
|
(priv->rx_ring_num * 3);
|
||||||
case ETH_SS_TEST:
|
case ETH_SS_TEST:
|
||||||
return MLX4_EN_NUM_SELF_TEST - !(priv->mdev->dev->caps.flags
|
return MLX4_EN_NUM_SELF_TEST - !(priv->mdev->dev->caps.flags
|
||||||
& MLX4_DEV_CAP_FLAG_UC_LOOPBACK) * 2;
|
& MLX4_DEV_CAP_FLAG_UC_LOOPBACK) * 2;
|
||||||
|
@ -404,6 +404,7 @@ static void mlx4_en_get_ethtool_stats(struct net_device *dev,
|
||||||
for (i = 0; i < priv->rx_ring_num; i++) {
|
for (i = 0; i < priv->rx_ring_num; i++) {
|
||||||
data[index++] = priv->rx_ring[i]->packets;
|
data[index++] = priv->rx_ring[i]->packets;
|
||||||
data[index++] = priv->rx_ring[i]->bytes;
|
data[index++] = priv->rx_ring[i]->bytes;
|
||||||
|
data[index++] = priv->rx_ring[i]->dropped;
|
||||||
}
|
}
|
||||||
spin_unlock_bh(&priv->stats_lock);
|
spin_unlock_bh(&priv->stats_lock);
|
||||||
|
|
||||||
|
@ -477,6 +478,8 @@ static void mlx4_en_get_strings(struct net_device *dev,
|
||||||
"rx%d_packets", i);
|
"rx%d_packets", i);
|
||||||
sprintf(data + (index++) * ETH_GSTRING_LEN,
|
sprintf(data + (index++) * ETH_GSTRING_LEN,
|
||||||
"rx%d_bytes", i);
|
"rx%d_bytes", i);
|
||||||
|
sprintf(data + (index++) * ETH_GSTRING_LEN,
|
||||||
|
"rx%d_dropped", i);
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
case ETH_SS_PRIV_FLAGS:
|
case ETH_SS_PRIV_FLAGS:
|
||||||
|
|
|
@ -158,6 +158,7 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
|
||||||
u64 in_mod = reset << 8 | port;
|
u64 in_mod = reset << 8 | port;
|
||||||
int err;
|
int err;
|
||||||
int i, counter_index;
|
int i, counter_index;
|
||||||
|
unsigned long sw_rx_dropped = 0;
|
||||||
|
|
||||||
mailbox = mlx4_alloc_cmd_mailbox(mdev->dev);
|
mailbox = mlx4_alloc_cmd_mailbox(mdev->dev);
|
||||||
if (IS_ERR(mailbox))
|
if (IS_ERR(mailbox))
|
||||||
|
@ -180,6 +181,7 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
|
||||||
for (i = 0; i < priv->rx_ring_num; i++) {
|
for (i = 0; i < priv->rx_ring_num; i++) {
|
||||||
stats->rx_packets += priv->rx_ring[i]->packets;
|
stats->rx_packets += priv->rx_ring[i]->packets;
|
||||||
stats->rx_bytes += priv->rx_ring[i]->bytes;
|
stats->rx_bytes += priv->rx_ring[i]->bytes;
|
||||||
|
sw_rx_dropped += priv->rx_ring[i]->dropped;
|
||||||
priv->port_stats.rx_chksum_good += priv->rx_ring[i]->csum_ok;
|
priv->port_stats.rx_chksum_good += priv->rx_ring[i]->csum_ok;
|
||||||
priv->port_stats.rx_chksum_none += priv->rx_ring[i]->csum_none;
|
priv->port_stats.rx_chksum_none += priv->rx_ring[i]->csum_none;
|
||||||
priv->port_stats.rx_chksum_complete += priv->rx_ring[i]->csum_complete;
|
priv->port_stats.rx_chksum_complete += priv->rx_ring[i]->csum_complete;
|
||||||
|
@ -236,7 +238,8 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
|
||||||
&mlx4_en_stats->MCAST_prio_1,
|
&mlx4_en_stats->MCAST_prio_1,
|
||||||
NUM_PRIORITIES);
|
NUM_PRIORITIES);
|
||||||
stats->collisions = 0;
|
stats->collisions = 0;
|
||||||
stats->rx_dropped = be32_to_cpu(mlx4_en_stats->RDROP);
|
stats->rx_dropped = be32_to_cpu(mlx4_en_stats->RDROP) +
|
||||||
|
sw_rx_dropped;
|
||||||
stats->rx_length_errors = be32_to_cpu(mlx4_en_stats->RdropLength);
|
stats->rx_length_errors = be32_to_cpu(mlx4_en_stats->RdropLength);
|
||||||
stats->rx_over_errors = 0;
|
stats->rx_over_errors = 0;
|
||||||
stats->rx_crc_errors = be32_to_cpu(mlx4_en_stats->RCRC);
|
stats->rx_crc_errors = be32_to_cpu(mlx4_en_stats->RCRC);
|
||||||
|
|
|
@ -61,7 +61,7 @@ static int mlx4_alloc_pages(struct mlx4_en_priv *priv,
|
||||||
gfp_t gfp = _gfp;
|
gfp_t gfp = _gfp;
|
||||||
|
|
||||||
if (order)
|
if (order)
|
||||||
gfp |= __GFP_COMP | __GFP_NOWARN;
|
gfp |= __GFP_COMP | __GFP_NOWARN | __GFP_NOMEMALLOC;
|
||||||
page = alloc_pages(gfp, order);
|
page = alloc_pages(gfp, order);
|
||||||
if (likely(page))
|
if (likely(page))
|
||||||
break;
|
break;
|
||||||
|
@ -126,7 +126,9 @@ static int mlx4_en_alloc_frags(struct mlx4_en_priv *priv,
|
||||||
dma_unmap_page(priv->ddev, page_alloc[i].dma,
|
dma_unmap_page(priv->ddev, page_alloc[i].dma,
|
||||||
page_alloc[i].page_size, PCI_DMA_FROMDEVICE);
|
page_alloc[i].page_size, PCI_DMA_FROMDEVICE);
|
||||||
page = page_alloc[i].page;
|
page = page_alloc[i].page;
|
||||||
set_page_count(page, 1);
|
/* Revert changes done by mlx4_alloc_pages */
|
||||||
|
page_ref_sub(page, page_alloc[i].page_size /
|
||||||
|
priv->frag_info[i].frag_stride - 1);
|
||||||
put_page(page);
|
put_page(page);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -176,7 +178,9 @@ static int mlx4_en_init_allocator(struct mlx4_en_priv *priv,
|
||||||
dma_unmap_page(priv->ddev, page_alloc->dma,
|
dma_unmap_page(priv->ddev, page_alloc->dma,
|
||||||
page_alloc->page_size, PCI_DMA_FROMDEVICE);
|
page_alloc->page_size, PCI_DMA_FROMDEVICE);
|
||||||
page = page_alloc->page;
|
page = page_alloc->page;
|
||||||
set_page_count(page, 1);
|
/* Revert changes done by mlx4_alloc_pages */
|
||||||
|
page_ref_sub(page, page_alloc->page_size /
|
||||||
|
priv->frag_info[i].frag_stride - 1);
|
||||||
put_page(page);
|
put_page(page);
|
||||||
page_alloc->page = NULL;
|
page_alloc->page = NULL;
|
||||||
}
|
}
|
||||||
|
@ -939,7 +943,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
|
||||||
/* GRO not possible, complete processing here */
|
/* GRO not possible, complete processing here */
|
||||||
skb = mlx4_en_rx_skb(priv, rx_desc, frags, length);
|
skb = mlx4_en_rx_skb(priv, rx_desc, frags, length);
|
||||||
if (!skb) {
|
if (!skb) {
|
||||||
priv->stats.rx_dropped++;
|
ring->dropped++;
|
||||||
goto next;
|
goto next;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -3172,6 +3172,34 @@ static int mlx4_check_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int mlx4_pci_enable_device(struct mlx4_dev *dev)
|
||||||
|
{
|
||||||
|
struct pci_dev *pdev = dev->persist->pdev;
|
||||||
|
int err = 0;
|
||||||
|
|
||||||
|
mutex_lock(&dev->persist->pci_status_mutex);
|
||||||
|
if (dev->persist->pci_status == MLX4_PCI_STATUS_DISABLED) {
|
||||||
|
err = pci_enable_device(pdev);
|
||||||
|
if (!err)
|
||||||
|
dev->persist->pci_status = MLX4_PCI_STATUS_ENABLED;
|
||||||
|
}
|
||||||
|
mutex_unlock(&dev->persist->pci_status_mutex);
|
||||||
|
|
||||||
|
return err;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void mlx4_pci_disable_device(struct mlx4_dev *dev)
|
||||||
|
{
|
||||||
|
struct pci_dev *pdev = dev->persist->pdev;
|
||||||
|
|
||||||
|
mutex_lock(&dev->persist->pci_status_mutex);
|
||||||
|
if (dev->persist->pci_status == MLX4_PCI_STATUS_ENABLED) {
|
||||||
|
pci_disable_device(pdev);
|
||||||
|
dev->persist->pci_status = MLX4_PCI_STATUS_DISABLED;
|
||||||
|
}
|
||||||
|
mutex_unlock(&dev->persist->pci_status_mutex);
|
||||||
|
}
|
||||||
|
|
||||||
static int mlx4_load_one(struct pci_dev *pdev, int pci_dev_data,
|
static int mlx4_load_one(struct pci_dev *pdev, int pci_dev_data,
|
||||||
int total_vfs, int *nvfs, struct mlx4_priv *priv,
|
int total_vfs, int *nvfs, struct mlx4_priv *priv,
|
||||||
int reset_flow)
|
int reset_flow)
|
||||||
|
@ -3582,7 +3610,7 @@ static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data,
|
||||||
|
|
||||||
pr_info(DRV_NAME ": Initializing %s\n", pci_name(pdev));
|
pr_info(DRV_NAME ": Initializing %s\n", pci_name(pdev));
|
||||||
|
|
||||||
err = pci_enable_device(pdev);
|
err = mlx4_pci_enable_device(&priv->dev);
|
||||||
if (err) {
|
if (err) {
|
||||||
dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
|
dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
|
||||||
return err;
|
return err;
|
||||||
|
@ -3715,7 +3743,7 @@ static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data,
|
||||||
pci_release_regions(pdev);
|
pci_release_regions(pdev);
|
||||||
|
|
||||||
err_disable_pdev:
|
err_disable_pdev:
|
||||||
pci_disable_device(pdev);
|
mlx4_pci_disable_device(&priv->dev);
|
||||||
pci_set_drvdata(pdev, NULL);
|
pci_set_drvdata(pdev, NULL);
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
@ -3775,6 +3803,7 @@ static int mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
|
||||||
priv->pci_dev_data = id->driver_data;
|
priv->pci_dev_data = id->driver_data;
|
||||||
mutex_init(&dev->persist->device_state_mutex);
|
mutex_init(&dev->persist->device_state_mutex);
|
||||||
mutex_init(&dev->persist->interface_state_mutex);
|
mutex_init(&dev->persist->interface_state_mutex);
|
||||||
|
mutex_init(&dev->persist->pci_status_mutex);
|
||||||
|
|
||||||
ret = devlink_register(devlink, &pdev->dev);
|
ret = devlink_register(devlink, &pdev->dev);
|
||||||
if (ret)
|
if (ret)
|
||||||
|
@ -3923,7 +3952,7 @@ static void mlx4_remove_one(struct pci_dev *pdev)
|
||||||
}
|
}
|
||||||
|
|
||||||
pci_release_regions(pdev);
|
pci_release_regions(pdev);
|
||||||
pci_disable_device(pdev);
|
mlx4_pci_disable_device(dev);
|
||||||
devlink_unregister(devlink);
|
devlink_unregister(devlink);
|
||||||
kfree(dev->persist);
|
kfree(dev->persist);
|
||||||
devlink_free(devlink);
|
devlink_free(devlink);
|
||||||
|
@ -4042,7 +4071,7 @@ static pci_ers_result_t mlx4_pci_err_detected(struct pci_dev *pdev,
|
||||||
if (state == pci_channel_io_perm_failure)
|
if (state == pci_channel_io_perm_failure)
|
||||||
return PCI_ERS_RESULT_DISCONNECT;
|
return PCI_ERS_RESULT_DISCONNECT;
|
||||||
|
|
||||||
pci_disable_device(pdev);
|
mlx4_pci_disable_device(persist->dev);
|
||||||
return PCI_ERS_RESULT_NEED_RESET;
|
return PCI_ERS_RESULT_NEED_RESET;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -4050,45 +4079,53 @@ static pci_ers_result_t mlx4_pci_slot_reset(struct pci_dev *pdev)
|
||||||
{
|
{
|
||||||
struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev);
|
struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev);
|
||||||
struct mlx4_dev *dev = persist->dev;
|
struct mlx4_dev *dev = persist->dev;
|
||||||
struct mlx4_priv *priv = mlx4_priv(dev);
|
int err;
|
||||||
int ret;
|
|
||||||
int nvfs[MLX4_MAX_PORTS + 1] = {0, 0, 0};
|
|
||||||
int total_vfs;
|
|
||||||
|
|
||||||
mlx4_err(dev, "mlx4_pci_slot_reset was called\n");
|
mlx4_err(dev, "mlx4_pci_slot_reset was called\n");
|
||||||
ret = pci_enable_device(pdev);
|
err = mlx4_pci_enable_device(dev);
|
||||||
if (ret) {
|
if (err) {
|
||||||
mlx4_err(dev, "Can not re-enable device, ret=%d\n", ret);
|
mlx4_err(dev, "Can not re-enable device, err=%d\n", err);
|
||||||
return PCI_ERS_RESULT_DISCONNECT;
|
return PCI_ERS_RESULT_DISCONNECT;
|
||||||
}
|
}
|
||||||
|
|
||||||
pci_set_master(pdev);
|
pci_set_master(pdev);
|
||||||
pci_restore_state(pdev);
|
pci_restore_state(pdev);
|
||||||
pci_save_state(pdev);
|
pci_save_state(pdev);
|
||||||
|
return PCI_ERS_RESULT_RECOVERED;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void mlx4_pci_resume(struct pci_dev *pdev)
|
||||||
|
{
|
||||||
|
struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev);
|
||||||
|
struct mlx4_dev *dev = persist->dev;
|
||||||
|
struct mlx4_priv *priv = mlx4_priv(dev);
|
||||||
|
int nvfs[MLX4_MAX_PORTS + 1] = {0, 0, 0};
|
||||||
|
int total_vfs;
|
||||||
|
int err;
|
||||||
|
|
||||||
|
mlx4_err(dev, "%s was called\n", __func__);
|
||||||
total_vfs = dev->persist->num_vfs;
|
total_vfs = dev->persist->num_vfs;
|
||||||
memcpy(nvfs, dev->persist->nvfs, sizeof(dev->persist->nvfs));
|
memcpy(nvfs, dev->persist->nvfs, sizeof(dev->persist->nvfs));
|
||||||
|
|
||||||
mutex_lock(&persist->interface_state_mutex);
|
mutex_lock(&persist->interface_state_mutex);
|
||||||
if (!(persist->interface_state & MLX4_INTERFACE_STATE_UP)) {
|
if (!(persist->interface_state & MLX4_INTERFACE_STATE_UP)) {
|
||||||
ret = mlx4_load_one(pdev, priv->pci_dev_data, total_vfs, nvfs,
|
err = mlx4_load_one(pdev, priv->pci_dev_data, total_vfs, nvfs,
|
||||||
priv, 1);
|
priv, 1);
|
||||||
if (ret) {
|
if (err) {
|
||||||
mlx4_err(dev, "%s: mlx4_load_one failed, ret=%d\n",
|
mlx4_err(dev, "%s: mlx4_load_one failed, err=%d\n",
|
||||||
__func__, ret);
|
__func__, err);
|
||||||
goto end;
|
goto end;
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = restore_current_port_types(dev, dev->persist->
|
err = restore_current_port_types(dev, dev->persist->
|
||||||
curr_port_type, dev->persist->
|
curr_port_type, dev->persist->
|
||||||
curr_port_poss_type);
|
curr_port_poss_type);
|
||||||
if (ret)
|
if (err)
|
||||||
mlx4_err(dev, "could not restore original port types (%d)\n", ret);
|
mlx4_err(dev, "could not restore original port types (%d)\n", err);
|
||||||
}
|
}
|
||||||
end:
|
end:
|
||||||
mutex_unlock(&persist->interface_state_mutex);
|
mutex_unlock(&persist->interface_state_mutex);
|
||||||
|
|
||||||
return ret ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void mlx4_shutdown(struct pci_dev *pdev)
|
static void mlx4_shutdown(struct pci_dev *pdev)
|
||||||
|
@ -4105,6 +4142,7 @@ static void mlx4_shutdown(struct pci_dev *pdev)
|
||||||
static const struct pci_error_handlers mlx4_err_handler = {
|
static const struct pci_error_handlers mlx4_err_handler = {
|
||||||
.error_detected = mlx4_pci_err_detected,
|
.error_detected = mlx4_pci_err_detected,
|
||||||
.slot_reset = mlx4_pci_slot_reset,
|
.slot_reset = mlx4_pci_slot_reset,
|
||||||
|
.resume = mlx4_pci_resume,
|
||||||
};
|
};
|
||||||
|
|
||||||
static struct pci_driver mlx4_driver = {
|
static struct pci_driver mlx4_driver = {
|
||||||
|
|
|
@ -586,6 +586,8 @@ struct mlx4_mfunc_master_ctx {
|
||||||
struct mlx4_master_qp0_state qp0_state[MLX4_MAX_PORTS + 1];
|
struct mlx4_master_qp0_state qp0_state[MLX4_MAX_PORTS + 1];
|
||||||
int init_port_ref[MLX4_MAX_PORTS + 1];
|
int init_port_ref[MLX4_MAX_PORTS + 1];
|
||||||
u16 max_mtu[MLX4_MAX_PORTS + 1];
|
u16 max_mtu[MLX4_MAX_PORTS + 1];
|
||||||
|
u8 pptx;
|
||||||
|
u8 pprx;
|
||||||
int disable_mcast_ref[MLX4_MAX_PORTS + 1];
|
int disable_mcast_ref[MLX4_MAX_PORTS + 1];
|
||||||
struct mlx4_resource_tracker res_tracker;
|
struct mlx4_resource_tracker res_tracker;
|
||||||
struct workqueue_struct *comm_wq;
|
struct workqueue_struct *comm_wq;
|
||||||
|
|
|
@ -323,6 +323,7 @@ struct mlx4_en_rx_ring {
|
||||||
unsigned long csum_ok;
|
unsigned long csum_ok;
|
||||||
unsigned long csum_none;
|
unsigned long csum_none;
|
||||||
unsigned long csum_complete;
|
unsigned long csum_complete;
|
||||||
|
unsigned long dropped;
|
||||||
int hwtstamp_rx_filter;
|
int hwtstamp_rx_filter;
|
||||||
cpumask_var_t affinity_mask;
|
cpumask_var_t affinity_mask;
|
||||||
};
|
};
|
||||||
|
|
|
@ -1317,6 +1317,19 @@ static int mlx4_common_set_port(struct mlx4_dev *dev, int slave, u32 in_mod,
|
||||||
}
|
}
|
||||||
|
|
||||||
gen_context->mtu = cpu_to_be16(master->max_mtu[port]);
|
gen_context->mtu = cpu_to_be16(master->max_mtu[port]);
|
||||||
|
/* Slave cannot change Global Pause configuration */
|
||||||
|
if (slave != mlx4_master_func_num(dev) &&
|
||||||
|
((gen_context->pptx != master->pptx) ||
|
||||||
|
(gen_context->pprx != master->pprx))) {
|
||||||
|
gen_context->pptx = master->pptx;
|
||||||
|
gen_context->pprx = master->pprx;
|
||||||
|
mlx4_warn(dev,
|
||||||
|
"denying Global Pause change for slave:%d\n",
|
||||||
|
slave);
|
||||||
|
} else {
|
||||||
|
master->pptx = gen_context->pptx;
|
||||||
|
master->pprx = gen_context->pprx;
|
||||||
|
}
|
||||||
break;
|
break;
|
||||||
case MLX4_SET_PORT_GID_TABLE:
|
case MLX4_SET_PORT_GID_TABLE:
|
||||||
/* change to MULTIPLE entries: number of guest's gids
|
/* change to MULTIPLE entries: number of guest's gids
|
||||||
|
|
|
@ -750,6 +750,12 @@ static bool qede_has_tx_work(struct qede_fastpath *fp)
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline void qede_rx_bd_ring_consume(struct qede_rx_queue *rxq)
|
||||||
|
{
|
||||||
|
qed_chain_consume(&rxq->rx_bd_ring);
|
||||||
|
rxq->sw_rx_cons++;
|
||||||
|
}
|
||||||
|
|
||||||
/* This function reuses the buffer(from an offset) from
|
/* This function reuses the buffer(from an offset) from
|
||||||
* consumer index to producer index in the bd ring
|
* consumer index to producer index in the bd ring
|
||||||
*/
|
*/
|
||||||
|
@ -773,6 +779,21 @@ static inline void qede_reuse_page(struct qede_dev *edev,
|
||||||
curr_cons->data = NULL;
|
curr_cons->data = NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* In case of allocation failures reuse buffers
|
||||||
|
* from consumer index to produce buffers for firmware
|
||||||
|
*/
|
||||||
|
static void qede_recycle_rx_bd_ring(struct qede_rx_queue *rxq,
|
||||||
|
struct qede_dev *edev, u8 count)
|
||||||
|
{
|
||||||
|
struct sw_rx_data *curr_cons;
|
||||||
|
|
||||||
|
for (; count > 0; count--) {
|
||||||
|
curr_cons = &rxq->sw_rx_ring[rxq->sw_rx_cons & NUM_RX_BDS_MAX];
|
||||||
|
qede_reuse_page(edev, rxq, curr_cons);
|
||||||
|
qede_rx_bd_ring_consume(rxq);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
static inline int qede_realloc_rx_buffer(struct qede_dev *edev,
|
static inline int qede_realloc_rx_buffer(struct qede_dev *edev,
|
||||||
struct qede_rx_queue *rxq,
|
struct qede_rx_queue *rxq,
|
||||||
struct sw_rx_data *curr_cons)
|
struct sw_rx_data *curr_cons)
|
||||||
|
@ -781,8 +802,14 @@ static inline int qede_realloc_rx_buffer(struct qede_dev *edev,
|
||||||
curr_cons->page_offset += rxq->rx_buf_seg_size;
|
curr_cons->page_offset += rxq->rx_buf_seg_size;
|
||||||
|
|
||||||
if (curr_cons->page_offset == PAGE_SIZE) {
|
if (curr_cons->page_offset == PAGE_SIZE) {
|
||||||
if (unlikely(qede_alloc_rx_buffer(edev, rxq)))
|
if (unlikely(qede_alloc_rx_buffer(edev, rxq))) {
|
||||||
|
/* Since we failed to allocate new buffer
|
||||||
|
* current buffer can be used again.
|
||||||
|
*/
|
||||||
|
curr_cons->page_offset -= rxq->rx_buf_seg_size;
|
||||||
|
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
}
|
||||||
|
|
||||||
dma_unmap_page(&edev->pdev->dev, curr_cons->mapping,
|
dma_unmap_page(&edev->pdev->dev, curr_cons->mapping,
|
||||||
PAGE_SIZE, DMA_FROM_DEVICE);
|
PAGE_SIZE, DMA_FROM_DEVICE);
|
||||||
|
@ -901,7 +928,10 @@ static int qede_fill_frag_skb(struct qede_dev *edev,
|
||||||
len_on_bd);
|
len_on_bd);
|
||||||
|
|
||||||
if (unlikely(qede_realloc_rx_buffer(edev, rxq, current_bd))) {
|
if (unlikely(qede_realloc_rx_buffer(edev, rxq, current_bd))) {
|
||||||
tpa_info->agg_state = QEDE_AGG_STATE_ERROR;
|
/* Incr page ref count to reuse on allocation failure
|
||||||
|
* so that it doesn't get freed while freeing SKB.
|
||||||
|
*/
|
||||||
|
atomic_inc(¤t_bd->data->_count);
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -915,6 +945,8 @@ static int qede_fill_frag_skb(struct qede_dev *edev,
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
out:
|
out:
|
||||||
|
tpa_info->agg_state = QEDE_AGG_STATE_ERROR;
|
||||||
|
qede_recycle_rx_bd_ring(rxq, edev, 1);
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -966,8 +998,9 @@ static void qede_tpa_start(struct qede_dev *edev,
|
||||||
tpa_info->skb = netdev_alloc_skb(edev->ndev,
|
tpa_info->skb = netdev_alloc_skb(edev->ndev,
|
||||||
le16_to_cpu(cqe->len_on_first_bd));
|
le16_to_cpu(cqe->len_on_first_bd));
|
||||||
if (unlikely(!tpa_info->skb)) {
|
if (unlikely(!tpa_info->skb)) {
|
||||||
|
DP_NOTICE(edev, "Failed to allocate SKB for gro\n");
|
||||||
tpa_info->agg_state = QEDE_AGG_STATE_ERROR;
|
tpa_info->agg_state = QEDE_AGG_STATE_ERROR;
|
||||||
return;
|
goto cons_buf;
|
||||||
}
|
}
|
||||||
|
|
||||||
skb_put(tpa_info->skb, le16_to_cpu(cqe->len_on_first_bd));
|
skb_put(tpa_info->skb, le16_to_cpu(cqe->len_on_first_bd));
|
||||||
|
@ -990,6 +1023,7 @@ static void qede_tpa_start(struct qede_dev *edev,
|
||||||
/* This is needed in order to enable forwarding support */
|
/* This is needed in order to enable forwarding support */
|
||||||
qede_set_gro_params(edev, tpa_info->skb, cqe);
|
qede_set_gro_params(edev, tpa_info->skb, cqe);
|
||||||
|
|
||||||
|
cons_buf: /* We still need to handle bd_len_list to consume buffers */
|
||||||
if (likely(cqe->ext_bd_len_list[0]))
|
if (likely(cqe->ext_bd_len_list[0]))
|
||||||
qede_fill_frag_skb(edev, rxq, cqe->tpa_agg_index,
|
qede_fill_frag_skb(edev, rxq, cqe->tpa_agg_index,
|
||||||
le16_to_cpu(cqe->ext_bd_len_list[0]));
|
le16_to_cpu(cqe->ext_bd_len_list[0]));
|
||||||
|
@ -1007,7 +1041,6 @@ static void qede_gro_ip_csum(struct sk_buff *skb)
|
||||||
const struct iphdr *iph = ip_hdr(skb);
|
const struct iphdr *iph = ip_hdr(skb);
|
||||||
struct tcphdr *th;
|
struct tcphdr *th;
|
||||||
|
|
||||||
skb_set_network_header(skb, 0);
|
|
||||||
skb_set_transport_header(skb, sizeof(struct iphdr));
|
skb_set_transport_header(skb, sizeof(struct iphdr));
|
||||||
th = tcp_hdr(skb);
|
th = tcp_hdr(skb);
|
||||||
|
|
||||||
|
@ -1022,7 +1055,6 @@ static void qede_gro_ipv6_csum(struct sk_buff *skb)
|
||||||
struct ipv6hdr *iph = ipv6_hdr(skb);
|
struct ipv6hdr *iph = ipv6_hdr(skb);
|
||||||
struct tcphdr *th;
|
struct tcphdr *th;
|
||||||
|
|
||||||
skb_set_network_header(skb, 0);
|
|
||||||
skb_set_transport_header(skb, sizeof(struct ipv6hdr));
|
skb_set_transport_header(skb, sizeof(struct ipv6hdr));
|
||||||
th = tcp_hdr(skb);
|
th = tcp_hdr(skb);
|
||||||
|
|
||||||
|
@ -1037,8 +1069,21 @@ static void qede_gro_receive(struct qede_dev *edev,
|
||||||
struct sk_buff *skb,
|
struct sk_buff *skb,
|
||||||
u16 vlan_tag)
|
u16 vlan_tag)
|
||||||
{
|
{
|
||||||
|
/* FW can send a single MTU sized packet from gro flow
|
||||||
|
* due to aggregation timeout/last segment etc. which
|
||||||
|
* is not expected to be a gro packet. If a skb has zero
|
||||||
|
* frags then simply push it in the stack as non gso skb.
|
||||||
|
*/
|
||||||
|
if (unlikely(!skb->data_len)) {
|
||||||
|
skb_shinfo(skb)->gso_type = 0;
|
||||||
|
skb_shinfo(skb)->gso_size = 0;
|
||||||
|
goto send_skb;
|
||||||
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_INET
|
#ifdef CONFIG_INET
|
||||||
if (skb_shinfo(skb)->gso_size) {
|
if (skb_shinfo(skb)->gso_size) {
|
||||||
|
skb_set_network_header(skb, 0);
|
||||||
|
|
||||||
switch (skb->protocol) {
|
switch (skb->protocol) {
|
||||||
case htons(ETH_P_IP):
|
case htons(ETH_P_IP):
|
||||||
qede_gro_ip_csum(skb);
|
qede_gro_ip_csum(skb);
|
||||||
|
@ -1053,6 +1098,8 @@ static void qede_gro_receive(struct qede_dev *edev,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
send_skb:
|
||||||
skb_record_rx_queue(skb, fp->rss_id);
|
skb_record_rx_queue(skb, fp->rss_id);
|
||||||
qede_skb_receive(edev, fp, skb, vlan_tag);
|
qede_skb_receive(edev, fp, skb, vlan_tag);
|
||||||
}
|
}
|
||||||
|
@ -1244,17 +1291,17 @@ static int qede_rx_int(struct qede_fastpath *fp, int budget)
|
||||||
"CQE in CONS = %u has error, flags = %x, dropping incoming packet\n",
|
"CQE in CONS = %u has error, flags = %x, dropping incoming packet\n",
|
||||||
sw_comp_cons, parse_flag);
|
sw_comp_cons, parse_flag);
|
||||||
rxq->rx_hw_errors++;
|
rxq->rx_hw_errors++;
|
||||||
qede_reuse_page(edev, rxq, sw_rx_data);
|
qede_recycle_rx_bd_ring(rxq, edev, fp_cqe->bd_num);
|
||||||
goto next_rx;
|
goto next_cqe;
|
||||||
}
|
}
|
||||||
|
|
||||||
skb = netdev_alloc_skb(edev->ndev, QEDE_RX_HDR_SIZE);
|
skb = netdev_alloc_skb(edev->ndev, QEDE_RX_HDR_SIZE);
|
||||||
if (unlikely(!skb)) {
|
if (unlikely(!skb)) {
|
||||||
DP_NOTICE(edev,
|
DP_NOTICE(edev,
|
||||||
"Build_skb failed, dropping incoming packet\n");
|
"Build_skb failed, dropping incoming packet\n");
|
||||||
qede_reuse_page(edev, rxq, sw_rx_data);
|
qede_recycle_rx_bd_ring(rxq, edev, fp_cqe->bd_num);
|
||||||
rxq->rx_alloc_errors++;
|
rxq->rx_alloc_errors++;
|
||||||
goto next_rx;
|
goto next_cqe;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Copy data into SKB */
|
/* Copy data into SKB */
|
||||||
|
@ -1288,11 +1335,22 @@ static int qede_rx_int(struct qede_fastpath *fp, int budget)
|
||||||
if (unlikely(qede_realloc_rx_buffer(edev, rxq,
|
if (unlikely(qede_realloc_rx_buffer(edev, rxq,
|
||||||
sw_rx_data))) {
|
sw_rx_data))) {
|
||||||
DP_ERR(edev, "Failed to allocate rx buffer\n");
|
DP_ERR(edev, "Failed to allocate rx buffer\n");
|
||||||
|
/* Incr page ref count to reuse on allocation
|
||||||
|
* failure so that it doesn't get freed while
|
||||||
|
* freeing SKB.
|
||||||
|
*/
|
||||||
|
|
||||||
|
atomic_inc(&sw_rx_data->data->_count);
|
||||||
rxq->rx_alloc_errors++;
|
rxq->rx_alloc_errors++;
|
||||||
|
qede_recycle_rx_bd_ring(rxq, edev,
|
||||||
|
fp_cqe->bd_num);
|
||||||
|
dev_kfree_skb_any(skb);
|
||||||
goto next_cqe;
|
goto next_cqe;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
qede_rx_bd_ring_consume(rxq);
|
||||||
|
|
||||||
if (fp_cqe->bd_num != 1) {
|
if (fp_cqe->bd_num != 1) {
|
||||||
u16 pkt_len = le16_to_cpu(fp_cqe->pkt_len);
|
u16 pkt_len = le16_to_cpu(fp_cqe->pkt_len);
|
||||||
u8 num_frags;
|
u8 num_frags;
|
||||||
|
@ -1303,18 +1361,27 @@ static int qede_rx_int(struct qede_fastpath *fp, int budget)
|
||||||
num_frags--) {
|
num_frags--) {
|
||||||
u16 cur_size = pkt_len > rxq->rx_buf_size ?
|
u16 cur_size = pkt_len > rxq->rx_buf_size ?
|
||||||
rxq->rx_buf_size : pkt_len;
|
rxq->rx_buf_size : pkt_len;
|
||||||
|
if (unlikely(!cur_size)) {
|
||||||
WARN_ONCE(!cur_size,
|
DP_ERR(edev,
|
||||||
"Still got %d BDs for mapping jumbo, but length became 0\n",
|
"Still got %d BDs for mapping jumbo, but length became 0\n",
|
||||||
num_frags);
|
num_frags);
|
||||||
|
qede_recycle_rx_bd_ring(rxq, edev,
|
||||||
if (unlikely(qede_alloc_rx_buffer(edev, rxq)))
|
num_frags);
|
||||||
|
dev_kfree_skb_any(skb);
|
||||||
goto next_cqe;
|
goto next_cqe;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (unlikely(qede_alloc_rx_buffer(edev, rxq))) {
|
||||||
|
qede_recycle_rx_bd_ring(rxq, edev,
|
||||||
|
num_frags);
|
||||||
|
dev_kfree_skb_any(skb);
|
||||||
|
goto next_cqe;
|
||||||
|
}
|
||||||
|
|
||||||
rxq->sw_rx_cons++;
|
|
||||||
sw_rx_index = rxq->sw_rx_cons & NUM_RX_BDS_MAX;
|
sw_rx_index = rxq->sw_rx_cons & NUM_RX_BDS_MAX;
|
||||||
sw_rx_data = &rxq->sw_rx_ring[sw_rx_index];
|
sw_rx_data = &rxq->sw_rx_ring[sw_rx_index];
|
||||||
qed_chain_consume(&rxq->rx_bd_ring);
|
qede_rx_bd_ring_consume(rxq);
|
||||||
|
|
||||||
dma_unmap_page(&edev->pdev->dev,
|
dma_unmap_page(&edev->pdev->dev,
|
||||||
sw_rx_data->mapping,
|
sw_rx_data->mapping,
|
||||||
PAGE_SIZE, DMA_FROM_DEVICE);
|
PAGE_SIZE, DMA_FROM_DEVICE);
|
||||||
|
@ -1330,7 +1397,7 @@ static int qede_rx_int(struct qede_fastpath *fp, int budget)
|
||||||
pkt_len -= cur_size;
|
pkt_len -= cur_size;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (pkt_len)
|
if (unlikely(pkt_len))
|
||||||
DP_ERR(edev,
|
DP_ERR(edev,
|
||||||
"Mapped all BDs of jumbo, but still have %d bytes\n",
|
"Mapped all BDs of jumbo, but still have %d bytes\n",
|
||||||
pkt_len);
|
pkt_len);
|
||||||
|
@ -1349,10 +1416,6 @@ static int qede_rx_int(struct qede_fastpath *fp, int budget)
|
||||||
skb_record_rx_queue(skb, fp->rss_id);
|
skb_record_rx_queue(skb, fp->rss_id);
|
||||||
|
|
||||||
qede_skb_receive(edev, fp, skb, le16_to_cpu(fp_cqe->vlan_tag));
|
qede_skb_receive(edev, fp, skb, le16_to_cpu(fp_cqe->vlan_tag));
|
||||||
|
|
||||||
qed_chain_consume(&rxq->rx_bd_ring);
|
|
||||||
next_rx:
|
|
||||||
rxq->sw_rx_cons++;
|
|
||||||
next_rx_only:
|
next_rx_only:
|
||||||
rx_pkt++;
|
rx_pkt++;
|
||||||
|
|
||||||
|
@ -2257,7 +2320,7 @@ static void qede_free_sge_mem(struct qede_dev *edev,
|
||||||
struct qede_agg_info *tpa_info = &rxq->tpa_info[i];
|
struct qede_agg_info *tpa_info = &rxq->tpa_info[i];
|
||||||
struct sw_rx_data *replace_buf = &tpa_info->replace_buf;
|
struct sw_rx_data *replace_buf = &tpa_info->replace_buf;
|
||||||
|
|
||||||
if (replace_buf) {
|
if (replace_buf->data) {
|
||||||
dma_unmap_page(&edev->pdev->dev,
|
dma_unmap_page(&edev->pdev->dev,
|
||||||
dma_unmap_addr(replace_buf, mapping),
|
dma_unmap_addr(replace_buf, mapping),
|
||||||
PAGE_SIZE, DMA_FROM_DEVICE);
|
PAGE_SIZE, DMA_FROM_DEVICE);
|
||||||
|
@ -2377,7 +2440,7 @@ static int qede_alloc_sge_mem(struct qede_dev *edev,
|
||||||
static int qede_alloc_mem_rxq(struct qede_dev *edev,
|
static int qede_alloc_mem_rxq(struct qede_dev *edev,
|
||||||
struct qede_rx_queue *rxq)
|
struct qede_rx_queue *rxq)
|
||||||
{
|
{
|
||||||
int i, rc, size, num_allocated;
|
int i, rc, size;
|
||||||
|
|
||||||
rxq->num_rx_buffers = edev->q_num_rx_buffers;
|
rxq->num_rx_buffers = edev->q_num_rx_buffers;
|
||||||
|
|
||||||
|
@ -2394,6 +2457,7 @@ static int qede_alloc_mem_rxq(struct qede_dev *edev,
|
||||||
rxq->sw_rx_ring = kzalloc(size, GFP_KERNEL);
|
rxq->sw_rx_ring = kzalloc(size, GFP_KERNEL);
|
||||||
if (!rxq->sw_rx_ring) {
|
if (!rxq->sw_rx_ring) {
|
||||||
DP_ERR(edev, "Rx buffers ring allocation failed\n");
|
DP_ERR(edev, "Rx buffers ring allocation failed\n");
|
||||||
|
rc = -ENOMEM;
|
||||||
goto err;
|
goto err;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2421,26 +2485,16 @@ static int qede_alloc_mem_rxq(struct qede_dev *edev,
|
||||||
/* Allocate buffers for the Rx ring */
|
/* Allocate buffers for the Rx ring */
|
||||||
for (i = 0; i < rxq->num_rx_buffers; i++) {
|
for (i = 0; i < rxq->num_rx_buffers; i++) {
|
||||||
rc = qede_alloc_rx_buffer(edev, rxq);
|
rc = qede_alloc_rx_buffer(edev, rxq);
|
||||||
if (rc)
|
if (rc) {
|
||||||
break;
|
DP_ERR(edev,
|
||||||
}
|
"Rx buffers allocation failed at index %d\n", i);
|
||||||
num_allocated = i;
|
goto err;
|
||||||
if (!num_allocated) {
|
}
|
||||||
DP_ERR(edev, "Rx buffers allocation failed\n");
|
|
||||||
goto err;
|
|
||||||
} else if (num_allocated < rxq->num_rx_buffers) {
|
|
||||||
DP_NOTICE(edev,
|
|
||||||
"Allocated less buffers than desired (%d allocated)\n",
|
|
||||||
num_allocated);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
qede_alloc_sge_mem(edev, rxq);
|
rc = qede_alloc_sge_mem(edev, rxq);
|
||||||
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
err:
|
err:
|
||||||
qede_free_mem_rxq(edev, rxq);
|
return rc;
|
||||||
return -ENOMEM;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void qede_free_mem_txq(struct qede_dev *edev,
|
static void qede_free_mem_txq(struct qede_dev *edev,
|
||||||
|
@ -2523,10 +2577,8 @@ static int qede_alloc_mem_fp(struct qede_dev *edev,
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
err:
|
err:
|
||||||
qede_free_mem_fp(edev, fp);
|
return rc;
|
||||||
return -ENOMEM;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void qede_free_mem_load(struct qede_dev *edev)
|
static void qede_free_mem_load(struct qede_dev *edev)
|
||||||
|
@ -2549,22 +2601,13 @@ static int qede_alloc_mem_load(struct qede_dev *edev)
|
||||||
struct qede_fastpath *fp = &edev->fp_array[rss_id];
|
struct qede_fastpath *fp = &edev->fp_array[rss_id];
|
||||||
|
|
||||||
rc = qede_alloc_mem_fp(edev, fp);
|
rc = qede_alloc_mem_fp(edev, fp);
|
||||||
if (rc)
|
if (rc) {
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (rss_id != QEDE_RSS_CNT(edev)) {
|
|
||||||
/* Failed allocating memory for all the queues */
|
|
||||||
if (!rss_id) {
|
|
||||||
DP_ERR(edev,
|
DP_ERR(edev,
|
||||||
"Failed to allocate memory for the leading queue\n");
|
"Failed to allocate memory for fastpath - rss id = %d\n",
|
||||||
rc = -ENOMEM;
|
rss_id);
|
||||||
} else {
|
qede_free_mem_load(edev);
|
||||||
DP_NOTICE(edev,
|
return rc;
|
||||||
"Failed to allocate memory for all of RSS queues\n Desired: %d queues, allocated: %d queues\n",
|
|
||||||
QEDE_RSS_CNT(edev), rss_id);
|
|
||||||
}
|
}
|
||||||
edev->num_rss = rss_id;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
|
@ -1691,6 +1691,9 @@ static int ravb_set_gti(struct net_device *ndev)
|
||||||
rate = clk_get_rate(clk);
|
rate = clk_get_rate(clk);
|
||||||
clk_put(clk);
|
clk_put(clk);
|
||||||
|
|
||||||
|
if (!rate)
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
inc = 1000000000ULL << 20;
|
inc = 1000000000ULL << 20;
|
||||||
do_div(inc, rate);
|
do_div(inc, rate);
|
||||||
|
|
||||||
|
|
|
@ -2194,17 +2194,13 @@ static int sh_eth_set_ringparam(struct net_device *ndev,
|
||||||
__func__);
|
__func__);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
ret = sh_eth_dev_init(ndev, false);
|
ret = sh_eth_dev_init(ndev, true);
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
netdev_err(ndev, "%s: sh_eth_dev_init failed.\n",
|
netdev_err(ndev, "%s: sh_eth_dev_init failed.\n",
|
||||||
__func__);
|
__func__);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
mdp->irq_enabled = true;
|
|
||||||
sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR);
|
|
||||||
/* Setting the Rx mode will start the Rx process. */
|
|
||||||
sh_eth_write(ndev, EDRRR_R, EDRRR);
|
|
||||||
netif_device_attach(ndev);
|
netif_device_attach(ndev);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -34,6 +34,9 @@
|
||||||
#define SYSMGR_EMACGRP_CTRL_PHYSEL_MASK 0x00000003
|
#define SYSMGR_EMACGRP_CTRL_PHYSEL_MASK 0x00000003
|
||||||
#define SYSMGR_EMACGRP_CTRL_PTP_REF_CLK_MASK 0x00000010
|
#define SYSMGR_EMACGRP_CTRL_PTP_REF_CLK_MASK 0x00000010
|
||||||
|
|
||||||
|
#define SYSMGR_FPGAGRP_MODULE_REG 0x00000028
|
||||||
|
#define SYSMGR_FPGAGRP_MODULE_EMAC 0x00000004
|
||||||
|
|
||||||
#define EMAC_SPLITTER_CTRL_REG 0x0
|
#define EMAC_SPLITTER_CTRL_REG 0x0
|
||||||
#define EMAC_SPLITTER_CTRL_SPEED_MASK 0x3
|
#define EMAC_SPLITTER_CTRL_SPEED_MASK 0x3
|
||||||
#define EMAC_SPLITTER_CTRL_SPEED_10 0x2
|
#define EMAC_SPLITTER_CTRL_SPEED_10 0x2
|
||||||
|
@ -148,7 +151,7 @@ static int socfpga_dwmac_setup(struct socfpga_dwmac *dwmac)
|
||||||
int phymode = dwmac->interface;
|
int phymode = dwmac->interface;
|
||||||
u32 reg_offset = dwmac->reg_offset;
|
u32 reg_offset = dwmac->reg_offset;
|
||||||
u32 reg_shift = dwmac->reg_shift;
|
u32 reg_shift = dwmac->reg_shift;
|
||||||
u32 ctrl, val;
|
u32 ctrl, val, module;
|
||||||
|
|
||||||
switch (phymode) {
|
switch (phymode) {
|
||||||
case PHY_INTERFACE_MODE_RGMII:
|
case PHY_INTERFACE_MODE_RGMII:
|
||||||
|
@ -175,12 +178,19 @@ static int socfpga_dwmac_setup(struct socfpga_dwmac *dwmac)
|
||||||
ctrl &= ~(SYSMGR_EMACGRP_CTRL_PHYSEL_MASK << reg_shift);
|
ctrl &= ~(SYSMGR_EMACGRP_CTRL_PHYSEL_MASK << reg_shift);
|
||||||
ctrl |= val << reg_shift;
|
ctrl |= val << reg_shift;
|
||||||
|
|
||||||
if (dwmac->f2h_ptp_ref_clk)
|
if (dwmac->f2h_ptp_ref_clk) {
|
||||||
ctrl |= SYSMGR_EMACGRP_CTRL_PTP_REF_CLK_MASK << (reg_shift / 2);
|
ctrl |= SYSMGR_EMACGRP_CTRL_PTP_REF_CLK_MASK << (reg_shift / 2);
|
||||||
else
|
regmap_read(sys_mgr_base_addr, SYSMGR_FPGAGRP_MODULE_REG,
|
||||||
|
&module);
|
||||||
|
module |= (SYSMGR_FPGAGRP_MODULE_EMAC << (reg_shift / 2));
|
||||||
|
regmap_write(sys_mgr_base_addr, SYSMGR_FPGAGRP_MODULE_REG,
|
||||||
|
module);
|
||||||
|
} else {
|
||||||
ctrl &= ~(SYSMGR_EMACGRP_CTRL_PTP_REF_CLK_MASK << (reg_shift / 2));
|
ctrl &= ~(SYSMGR_EMACGRP_CTRL_PTP_REF_CLK_MASK << (reg_shift / 2));
|
||||||
|
}
|
||||||
|
|
||||||
regmap_write(sys_mgr_base_addr, reg_offset, ctrl);
|
regmap_write(sys_mgr_base_addr, reg_offset, ctrl);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1251,12 +1251,12 @@ static int cpsw_ndo_open(struct net_device *ndev)
|
||||||
int i, ret;
|
int i, ret;
|
||||||
u32 reg;
|
u32 reg;
|
||||||
|
|
||||||
|
pm_runtime_get_sync(&priv->pdev->dev);
|
||||||
|
|
||||||
if (!cpsw_common_res_usage_state(priv))
|
if (!cpsw_common_res_usage_state(priv))
|
||||||
cpsw_intr_disable(priv);
|
cpsw_intr_disable(priv);
|
||||||
netif_carrier_off(ndev);
|
netif_carrier_off(ndev);
|
||||||
|
|
||||||
pm_runtime_get_sync(&priv->pdev->dev);
|
|
||||||
|
|
||||||
reg = priv->version;
|
reg = priv->version;
|
||||||
|
|
||||||
dev_info(priv->dev, "initializing cpsw version %d.%d (%d)\n",
|
dev_info(priv->dev, "initializing cpsw version %d.%d (%d)\n",
|
||||||
|
|
|
@ -1878,8 +1878,6 @@ davinci_emac_of_get_pdata(struct platform_device *pdev, struct emac_priv *priv)
|
||||||
pdata->hw_ram_addr = auxdata->hw_ram_addr;
|
pdata->hw_ram_addr = auxdata->hw_ram_addr;
|
||||||
}
|
}
|
||||||
|
|
||||||
pdev->dev.platform_data = pdata;
|
|
||||||
|
|
||||||
return pdata;
|
return pdata;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2101,6 +2099,7 @@ static int davinci_emac_remove(struct platform_device *pdev)
|
||||||
cpdma_ctlr_destroy(priv->dma);
|
cpdma_ctlr_destroy(priv->dma);
|
||||||
|
|
||||||
unregister_netdev(ndev);
|
unregister_netdev(ndev);
|
||||||
|
pm_runtime_disable(&pdev->dev);
|
||||||
free_netdev(ndev);
|
free_netdev(ndev);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
|
@ -441,7 +441,7 @@ static int ks8995_probe(struct spi_device *spi)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
mutex_init(&ks->lock);
|
mutex_init(&ks->lock);
|
||||||
ks->spi = spi_dev_get(spi);
|
ks->spi = spi;
|
||||||
ks->chip = &ks8995_chip[variant];
|
ks->chip = &ks8995_chip[variant];
|
||||||
|
|
||||||
if (ks->spi->dev.of_node) {
|
if (ks->spi->dev.of_node) {
|
||||||
|
|
|
@ -617,8 +617,13 @@ static const struct usb_device_id mbim_devs[] = {
|
||||||
{ USB_VENDOR_AND_INTERFACE_INFO(0x0bdb, USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE),
|
{ USB_VENDOR_AND_INTERFACE_INFO(0x0bdb, USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE),
|
||||||
.driver_info = (unsigned long)&cdc_mbim_info,
|
.driver_info = (unsigned long)&cdc_mbim_info,
|
||||||
},
|
},
|
||||||
/* Huawei E3372 fails unless NDP comes after the IP packets */
|
|
||||||
{ USB_DEVICE_AND_INTERFACE_INFO(0x12d1, 0x157d, USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE),
|
/* Some Huawei devices, ME906s-158 (12d1:15c1) and E3372
|
||||||
|
* (12d1:157d), are known to fail unless the NDP is placed
|
||||||
|
* after the IP packets. Applying the quirk to all Huawei
|
||||||
|
* devices is broader than necessary, but harmless.
|
||||||
|
*/
|
||||||
|
{ USB_VENDOR_AND_INTERFACE_INFO(0x12d1, USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE),
|
||||||
.driver_info = (unsigned long)&cdc_mbim_info_ndp_to_end,
|
.driver_info = (unsigned long)&cdc_mbim_info_ndp_to_end,
|
||||||
},
|
},
|
||||||
/* default entry */
|
/* default entry */
|
||||||
|
|
|
@ -1152,12 +1152,16 @@ vmxnet3_rx_csum(struct vmxnet3_adapter *adapter,
|
||||||
union Vmxnet3_GenericDesc *gdesc)
|
union Vmxnet3_GenericDesc *gdesc)
|
||||||
{
|
{
|
||||||
if (!gdesc->rcd.cnc && adapter->netdev->features & NETIF_F_RXCSUM) {
|
if (!gdesc->rcd.cnc && adapter->netdev->features & NETIF_F_RXCSUM) {
|
||||||
/* typical case: TCP/UDP over IP and both csums are correct */
|
if (gdesc->rcd.v4 &&
|
||||||
if ((le32_to_cpu(gdesc->dword[3]) & VMXNET3_RCD_CSUM_OK) ==
|
(le32_to_cpu(gdesc->dword[3]) &
|
||||||
VMXNET3_RCD_CSUM_OK) {
|
VMXNET3_RCD_CSUM_OK) == VMXNET3_RCD_CSUM_OK) {
|
||||||
|
skb->ip_summed = CHECKSUM_UNNECESSARY;
|
||||||
|
BUG_ON(!(gdesc->rcd.tcp || gdesc->rcd.udp));
|
||||||
|
BUG_ON(gdesc->rcd.frg);
|
||||||
|
} else if (gdesc->rcd.v6 && (le32_to_cpu(gdesc->dword[3]) &
|
||||||
|
(1 << VMXNET3_RCD_TUC_SHIFT))) {
|
||||||
skb->ip_summed = CHECKSUM_UNNECESSARY;
|
skb->ip_summed = CHECKSUM_UNNECESSARY;
|
||||||
BUG_ON(!(gdesc->rcd.tcp || gdesc->rcd.udp));
|
BUG_ON(!(gdesc->rcd.tcp || gdesc->rcd.udp));
|
||||||
BUG_ON(!(gdesc->rcd.v4 || gdesc->rcd.v6));
|
|
||||||
BUG_ON(gdesc->rcd.frg);
|
BUG_ON(gdesc->rcd.frg);
|
||||||
} else {
|
} else {
|
||||||
if (gdesc->rcd.csum) {
|
if (gdesc->rcd.csum) {
|
||||||
|
|
|
@ -69,10 +69,10 @@
|
||||||
/*
|
/*
|
||||||
* Version numbers
|
* Version numbers
|
||||||
*/
|
*/
|
||||||
#define VMXNET3_DRIVER_VERSION_STRING "1.4.6.0-k"
|
#define VMXNET3_DRIVER_VERSION_STRING "1.4.7.0-k"
|
||||||
|
|
||||||
/* a 32-bit int, each byte encode a verion number in VMXNET3_DRIVER_VERSION */
|
/* a 32-bit int, each byte encode a verion number in VMXNET3_DRIVER_VERSION */
|
||||||
#define VMXNET3_DRIVER_VERSION_NUM 0x01040600
|
#define VMXNET3_DRIVER_VERSION_NUM 0x01040700
|
||||||
|
|
||||||
#if defined(CONFIG_PCI_MSI)
|
#if defined(CONFIG_PCI_MSI)
|
||||||
/* RSS only makes sense if MSI-X is supported. */
|
/* RSS only makes sense if MSI-X is supported. */
|
||||||
|
|
|
@ -60,41 +60,6 @@ struct pcpu_dstats {
|
||||||
struct u64_stats_sync syncp;
|
struct u64_stats_sync syncp;
|
||||||
};
|
};
|
||||||
|
|
||||||
static struct dst_entry *vrf_ip_check(struct dst_entry *dst, u32 cookie)
|
|
||||||
{
|
|
||||||
return dst;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int vrf_ip_local_out(struct net *net, struct sock *sk, struct sk_buff *skb)
|
|
||||||
{
|
|
||||||
return ip_local_out(net, sk, skb);
|
|
||||||
}
|
|
||||||
|
|
||||||
static unsigned int vrf_v4_mtu(const struct dst_entry *dst)
|
|
||||||
{
|
|
||||||
/* TO-DO: return max ethernet size? */
|
|
||||||
return dst->dev->mtu;
|
|
||||||
}
|
|
||||||
|
|
||||||
static void vrf_dst_destroy(struct dst_entry *dst)
|
|
||||||
{
|
|
||||||
/* our dst lives forever - or until the device is closed */
|
|
||||||
}
|
|
||||||
|
|
||||||
static unsigned int vrf_default_advmss(const struct dst_entry *dst)
|
|
||||||
{
|
|
||||||
return 65535 - 40;
|
|
||||||
}
|
|
||||||
|
|
||||||
static struct dst_ops vrf_dst_ops = {
|
|
||||||
.family = AF_INET,
|
|
||||||
.local_out = vrf_ip_local_out,
|
|
||||||
.check = vrf_ip_check,
|
|
||||||
.mtu = vrf_v4_mtu,
|
|
||||||
.destroy = vrf_dst_destroy,
|
|
||||||
.default_advmss = vrf_default_advmss,
|
|
||||||
};
|
|
||||||
|
|
||||||
/* neighbor handling is done with actual device; do not want
|
/* neighbor handling is done with actual device; do not want
|
||||||
* to flip skb->dev for those ndisc packets. This really fails
|
* to flip skb->dev for those ndisc packets. This really fails
|
||||||
* for multiple next protocols (e.g., NEXTHDR_HOP). But it is
|
* for multiple next protocols (e.g., NEXTHDR_HOP). But it is
|
||||||
|
@ -349,46 +314,6 @@ static netdev_tx_t vrf_xmit(struct sk_buff *skb, struct net_device *dev)
|
||||||
}
|
}
|
||||||
|
|
||||||
#if IS_ENABLED(CONFIG_IPV6)
|
#if IS_ENABLED(CONFIG_IPV6)
|
||||||
static struct dst_entry *vrf_ip6_check(struct dst_entry *dst, u32 cookie)
|
|
||||||
{
|
|
||||||
return dst;
|
|
||||||
}
|
|
||||||
|
|
||||||
static struct dst_ops vrf_dst_ops6 = {
|
|
||||||
.family = AF_INET6,
|
|
||||||
.local_out = ip6_local_out,
|
|
||||||
.check = vrf_ip6_check,
|
|
||||||
.mtu = vrf_v4_mtu,
|
|
||||||
.destroy = vrf_dst_destroy,
|
|
||||||
.default_advmss = vrf_default_advmss,
|
|
||||||
};
|
|
||||||
|
|
||||||
static int init_dst_ops6_kmem_cachep(void)
|
|
||||||
{
|
|
||||||
vrf_dst_ops6.kmem_cachep = kmem_cache_create("vrf_ip6_dst_cache",
|
|
||||||
sizeof(struct rt6_info),
|
|
||||||
0,
|
|
||||||
SLAB_HWCACHE_ALIGN,
|
|
||||||
NULL);
|
|
||||||
|
|
||||||
if (!vrf_dst_ops6.kmem_cachep)
|
|
||||||
return -ENOMEM;
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
static void free_dst_ops6_kmem_cachep(void)
|
|
||||||
{
|
|
||||||
kmem_cache_destroy(vrf_dst_ops6.kmem_cachep);
|
|
||||||
}
|
|
||||||
|
|
||||||
static int vrf_input6(struct sk_buff *skb)
|
|
||||||
{
|
|
||||||
skb->dev->stats.rx_errors++;
|
|
||||||
kfree_skb(skb);
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* modelled after ip6_finish_output2 */
|
/* modelled after ip6_finish_output2 */
|
||||||
static int vrf_finish_output6(struct net *net, struct sock *sk,
|
static int vrf_finish_output6(struct net *net, struct sock *sk,
|
||||||
struct sk_buff *skb)
|
struct sk_buff *skb)
|
||||||
|
@ -429,67 +354,34 @@ static int vrf_output6(struct net *net, struct sock *sk, struct sk_buff *skb)
|
||||||
!(IP6CB(skb)->flags & IP6SKB_REROUTED));
|
!(IP6CB(skb)->flags & IP6SKB_REROUTED));
|
||||||
}
|
}
|
||||||
|
|
||||||
static void vrf_rt6_destroy(struct net_vrf *vrf)
|
static void vrf_rt6_release(struct net_vrf *vrf)
|
||||||
{
|
{
|
||||||
dst_destroy(&vrf->rt6->dst);
|
dst_release(&vrf->rt6->dst);
|
||||||
free_percpu(vrf->rt6->rt6i_pcpu);
|
|
||||||
vrf->rt6 = NULL;
|
vrf->rt6 = NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int vrf_rt6_create(struct net_device *dev)
|
static int vrf_rt6_create(struct net_device *dev)
|
||||||
{
|
{
|
||||||
struct net_vrf *vrf = netdev_priv(dev);
|
struct net_vrf *vrf = netdev_priv(dev);
|
||||||
struct dst_entry *dst;
|
struct net *net = dev_net(dev);
|
||||||
struct rt6_info *rt6;
|
struct rt6_info *rt6;
|
||||||
int cpu;
|
|
||||||
int rc = -ENOMEM;
|
int rc = -ENOMEM;
|
||||||
|
|
||||||
rt6 = dst_alloc(&vrf_dst_ops6, dev, 0,
|
rt6 = ip6_dst_alloc(net, dev,
|
||||||
DST_OBSOLETE_NONE,
|
DST_HOST | DST_NOPOLICY | DST_NOXFRM | DST_NOCACHE);
|
||||||
(DST_HOST | DST_NOPOLICY | DST_NOXFRM));
|
|
||||||
if (!rt6)
|
if (!rt6)
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
dst = &rt6->dst;
|
|
||||||
|
|
||||||
rt6->rt6i_pcpu = alloc_percpu_gfp(struct rt6_info *, GFP_KERNEL);
|
|
||||||
if (!rt6->rt6i_pcpu) {
|
|
||||||
dst_destroy(dst);
|
|
||||||
goto out;
|
|
||||||
}
|
|
||||||
for_each_possible_cpu(cpu) {
|
|
||||||
struct rt6_info **p = per_cpu_ptr(rt6->rt6i_pcpu, cpu);
|
|
||||||
*p = NULL;
|
|
||||||
}
|
|
||||||
|
|
||||||
memset(dst + 1, 0, sizeof(*rt6) - sizeof(*dst));
|
|
||||||
|
|
||||||
INIT_LIST_HEAD(&rt6->rt6i_siblings);
|
|
||||||
INIT_LIST_HEAD(&rt6->rt6i_uncached);
|
|
||||||
|
|
||||||
rt6->dst.input = vrf_input6;
|
|
||||||
rt6->dst.output = vrf_output6;
|
rt6->dst.output = vrf_output6;
|
||||||
|
rt6->rt6i_table = fib6_get_table(net, vrf->tb_id);
|
||||||
rt6->rt6i_table = fib6_get_table(dev_net(dev), vrf->tb_id);
|
dst_hold(&rt6->dst);
|
||||||
|
|
||||||
atomic_set(&rt6->dst.__refcnt, 2);
|
|
||||||
|
|
||||||
vrf->rt6 = rt6;
|
vrf->rt6 = rt6;
|
||||||
rc = 0;
|
rc = 0;
|
||||||
out:
|
out:
|
||||||
return rc;
|
return rc;
|
||||||
}
|
}
|
||||||
#else
|
#else
|
||||||
static int init_dst_ops6_kmem_cachep(void)
|
static void vrf_rt6_release(struct net_vrf *vrf)
|
||||||
{
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
static void free_dst_ops6_kmem_cachep(void)
|
|
||||||
{
|
|
||||||
}
|
|
||||||
|
|
||||||
static void vrf_rt6_destroy(struct net_vrf *vrf)
|
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -557,11 +449,11 @@ static int vrf_output(struct net *net, struct sock *sk, struct sk_buff *skb)
|
||||||
!(IPCB(skb)->flags & IPSKB_REROUTED));
|
!(IPCB(skb)->flags & IPSKB_REROUTED));
|
||||||
}
|
}
|
||||||
|
|
||||||
static void vrf_rtable_destroy(struct net_vrf *vrf)
|
static void vrf_rtable_release(struct net_vrf *vrf)
|
||||||
{
|
{
|
||||||
struct dst_entry *dst = (struct dst_entry *)vrf->rth;
|
struct dst_entry *dst = (struct dst_entry *)vrf->rth;
|
||||||
|
|
||||||
dst_destroy(dst);
|
dst_release(dst);
|
||||||
vrf->rth = NULL;
|
vrf->rth = NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -570,22 +462,10 @@ static struct rtable *vrf_rtable_create(struct net_device *dev)
|
||||||
struct net_vrf *vrf = netdev_priv(dev);
|
struct net_vrf *vrf = netdev_priv(dev);
|
||||||
struct rtable *rth;
|
struct rtable *rth;
|
||||||
|
|
||||||
rth = dst_alloc(&vrf_dst_ops, dev, 2,
|
rth = rt_dst_alloc(dev, 0, RTN_UNICAST, 1, 1, 0);
|
||||||
DST_OBSOLETE_NONE,
|
|
||||||
(DST_HOST | DST_NOPOLICY | DST_NOXFRM));
|
|
||||||
if (rth) {
|
if (rth) {
|
||||||
rth->dst.output = vrf_output;
|
rth->dst.output = vrf_output;
|
||||||
rth->rt_genid = rt_genid_ipv4(dev_net(dev));
|
|
||||||
rth->rt_flags = 0;
|
|
||||||
rth->rt_type = RTN_UNICAST;
|
|
||||||
rth->rt_is_input = 0;
|
|
||||||
rth->rt_iif = 0;
|
|
||||||
rth->rt_pmtu = 0;
|
|
||||||
rth->rt_gateway = 0;
|
|
||||||
rth->rt_uses_gateway = 0;
|
|
||||||
rth->rt_table_id = vrf->tb_id;
|
rth->rt_table_id = vrf->tb_id;
|
||||||
INIT_LIST_HEAD(&rth->rt_uncached);
|
|
||||||
rth->rt_uncached_list = NULL;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return rth;
|
return rth;
|
||||||
|
@ -673,8 +553,8 @@ static void vrf_dev_uninit(struct net_device *dev)
|
||||||
struct net_device *port_dev;
|
struct net_device *port_dev;
|
||||||
struct list_head *iter;
|
struct list_head *iter;
|
||||||
|
|
||||||
vrf_rtable_destroy(vrf);
|
vrf_rtable_release(vrf);
|
||||||
vrf_rt6_destroy(vrf);
|
vrf_rt6_release(vrf);
|
||||||
|
|
||||||
netdev_for_each_lower_dev(dev, port_dev, iter)
|
netdev_for_each_lower_dev(dev, port_dev, iter)
|
||||||
vrf_del_slave(dev, port_dev);
|
vrf_del_slave(dev, port_dev);
|
||||||
|
@ -704,7 +584,7 @@ static int vrf_dev_init(struct net_device *dev)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
out_rth:
|
out_rth:
|
||||||
vrf_rtable_destroy(vrf);
|
vrf_rtable_release(vrf);
|
||||||
out_stats:
|
out_stats:
|
||||||
free_percpu(dev->dstats);
|
free_percpu(dev->dstats);
|
||||||
dev->dstats = NULL;
|
dev->dstats = NULL;
|
||||||
|
@ -737,7 +617,7 @@ static struct rtable *vrf_get_rtable(const struct net_device *dev,
|
||||||
struct net_vrf *vrf = netdev_priv(dev);
|
struct net_vrf *vrf = netdev_priv(dev);
|
||||||
|
|
||||||
rth = vrf->rth;
|
rth = vrf->rth;
|
||||||
atomic_inc(&rth->dst.__refcnt);
|
dst_hold(&rth->dst);
|
||||||
}
|
}
|
||||||
|
|
||||||
return rth;
|
return rth;
|
||||||
|
@ -788,7 +668,7 @@ static struct dst_entry *vrf_get_rt6_dst(const struct net_device *dev,
|
||||||
struct net_vrf *vrf = netdev_priv(dev);
|
struct net_vrf *vrf = netdev_priv(dev);
|
||||||
|
|
||||||
rt = vrf->rt6;
|
rt = vrf->rt6;
|
||||||
atomic_inc(&rt->dst.__refcnt);
|
dst_hold(&rt->dst);
|
||||||
}
|
}
|
||||||
|
|
||||||
return (struct dst_entry *)rt;
|
return (struct dst_entry *)rt;
|
||||||
|
@ -946,19 +826,6 @@ static int __init vrf_init_module(void)
|
||||||
{
|
{
|
||||||
int rc;
|
int rc;
|
||||||
|
|
||||||
vrf_dst_ops.kmem_cachep =
|
|
||||||
kmem_cache_create("vrf_ip_dst_cache",
|
|
||||||
sizeof(struct rtable), 0,
|
|
||||||
SLAB_HWCACHE_ALIGN,
|
|
||||||
NULL);
|
|
||||||
|
|
||||||
if (!vrf_dst_ops.kmem_cachep)
|
|
||||||
return -ENOMEM;
|
|
||||||
|
|
||||||
rc = init_dst_ops6_kmem_cachep();
|
|
||||||
if (rc != 0)
|
|
||||||
goto error2;
|
|
||||||
|
|
||||||
register_netdevice_notifier(&vrf_notifier_block);
|
register_netdevice_notifier(&vrf_notifier_block);
|
||||||
|
|
||||||
rc = rtnl_link_register(&vrf_link_ops);
|
rc = rtnl_link_register(&vrf_link_ops);
|
||||||
|
@ -969,22 +836,10 @@ static int __init vrf_init_module(void)
|
||||||
|
|
||||||
error:
|
error:
|
||||||
unregister_netdevice_notifier(&vrf_notifier_block);
|
unregister_netdevice_notifier(&vrf_notifier_block);
|
||||||
free_dst_ops6_kmem_cachep();
|
|
||||||
error2:
|
|
||||||
kmem_cache_destroy(vrf_dst_ops.kmem_cachep);
|
|
||||||
return rc;
|
return rc;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void __exit vrf_cleanup_module(void)
|
|
||||||
{
|
|
||||||
rtnl_link_unregister(&vrf_link_ops);
|
|
||||||
unregister_netdevice_notifier(&vrf_notifier_block);
|
|
||||||
kmem_cache_destroy(vrf_dst_ops.kmem_cachep);
|
|
||||||
free_dst_ops6_kmem_cachep();
|
|
||||||
}
|
|
||||||
|
|
||||||
module_init(vrf_init_module);
|
module_init(vrf_init_module);
|
||||||
module_exit(vrf_cleanup_module);
|
|
||||||
MODULE_AUTHOR("Shrijeet Mukherjee, David Ahern");
|
MODULE_AUTHOR("Shrijeet Mukherjee, David Ahern");
|
||||||
MODULE_DESCRIPTION("Device driver to instantiate VRF domains");
|
MODULE_DESCRIPTION("Device driver to instantiate VRF domains");
|
||||||
MODULE_LICENSE("GPL");
|
MODULE_LICENSE("GPL");
|
||||||
|
|
|
@ -5680,11 +5680,12 @@ static int b43_bcma_probe(struct bcma_device *core)
|
||||||
INIT_WORK(&wl->firmware_load, b43_request_firmware);
|
INIT_WORK(&wl->firmware_load, b43_request_firmware);
|
||||||
schedule_work(&wl->firmware_load);
|
schedule_work(&wl->firmware_load);
|
||||||
|
|
||||||
bcma_out:
|
|
||||||
return err;
|
return err;
|
||||||
|
|
||||||
bcma_err_wireless_exit:
|
bcma_err_wireless_exit:
|
||||||
ieee80211_free_hw(wl->hw);
|
ieee80211_free_hw(wl->hw);
|
||||||
|
bcma_out:
|
||||||
|
kfree(dev);
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -5712,8 +5713,8 @@ static void b43_bcma_remove(struct bcma_device *core)
|
||||||
b43_rng_exit(wl);
|
b43_rng_exit(wl);
|
||||||
|
|
||||||
b43_leds_unregister(wl);
|
b43_leds_unregister(wl);
|
||||||
|
|
||||||
ieee80211_free_hw(wl->hw);
|
ieee80211_free_hw(wl->hw);
|
||||||
|
kfree(wldev->dev);
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct bcma_driver b43_bcma_driver = {
|
static struct bcma_driver b43_bcma_driver = {
|
||||||
|
@ -5796,6 +5797,7 @@ static void b43_ssb_remove(struct ssb_device *sdev)
|
||||||
|
|
||||||
b43_leds_unregister(wl);
|
b43_leds_unregister(wl);
|
||||||
b43_wireless_exit(dev, wl);
|
b43_wireless_exit(dev, wl);
|
||||||
|
kfree(dev);
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct ssb_driver b43_ssb_driver = {
|
static struct ssb_driver b43_ssb_driver = {
|
||||||
|
|
|
@ -1147,6 +1147,8 @@ void __iwl_mvm_mac_stop(struct iwl_mvm *mvm)
|
||||||
/* the fw is stopped, the aux sta is dead: clean up driver state */
|
/* the fw is stopped, the aux sta is dead: clean up driver state */
|
||||||
iwl_mvm_del_aux_sta(mvm);
|
iwl_mvm_del_aux_sta(mvm);
|
||||||
|
|
||||||
|
iwl_free_fw_paging(mvm);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Clear IN_HW_RESTART flag when stopping the hw (as restart_complete()
|
* Clear IN_HW_RESTART flag when stopping the hw (as restart_complete()
|
||||||
* won't be called in this case).
|
* won't be called in this case).
|
||||||
|
|
|
@ -761,8 +761,6 @@ static void iwl_op_mode_mvm_stop(struct iwl_op_mode *op_mode)
|
||||||
for (i = 0; i < NVM_MAX_NUM_SECTIONS; i++)
|
for (i = 0; i < NVM_MAX_NUM_SECTIONS; i++)
|
||||||
kfree(mvm->nvm_sections[i].data);
|
kfree(mvm->nvm_sections[i].data);
|
||||||
|
|
||||||
iwl_free_fw_paging(mvm);
|
|
||||||
|
|
||||||
iwl_mvm_tof_clean(mvm);
|
iwl_mvm_tof_clean(mvm);
|
||||||
|
|
||||||
ieee80211_free_hw(mvm->hw);
|
ieee80211_free_hw(mvm->hw);
|
||||||
|
|
|
@ -732,8 +732,8 @@ static int iwl_pcie_rsa_race_bug_wa(struct iwl_trans *trans)
|
||||||
*/
|
*/
|
||||||
val = iwl_read_prph(trans, PREG_AUX_BUS_WPROT_0);
|
val = iwl_read_prph(trans, PREG_AUX_BUS_WPROT_0);
|
||||||
if (val & (BIT(1) | BIT(17))) {
|
if (val & (BIT(1) | BIT(17))) {
|
||||||
IWL_INFO(trans,
|
IWL_DEBUG_INFO(trans,
|
||||||
"can't access the RSA semaphore it is write protected\n");
|
"can't access the RSA semaphore it is write protected\n");
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -2488,9 +2488,9 @@ void rtl8821ae_dm_txpower_tracking_callback_thermalmeter(
|
||||||
for (p = RF90_PATH_A; p < MAX_PATH_NUM_8821A; p++)
|
for (p = RF90_PATH_A; p < MAX_PATH_NUM_8821A; p++)
|
||||||
rtldm->swing_idx_ofdm_base[p] = rtldm->swing_idx_ofdm[p];
|
rtldm->swing_idx_ofdm_base[p] = rtldm->swing_idx_ofdm[p];
|
||||||
|
|
||||||
RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
|
RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
|
||||||
"pDM_Odm->RFCalibrateInfo.ThermalValue = %d ThermalValue= %d\n",
|
"pDM_Odm->RFCalibrateInfo.ThermalValue = %d ThermalValue= %d\n",
|
||||||
rtldm->thermalvalue, thermal_value);
|
rtldm->thermalvalue, thermal_value);
|
||||||
/*Record last Power Tracking Thermal Value*/
|
/*Record last Power Tracking Thermal Value*/
|
||||||
rtldm->thermalvalue = thermal_value;
|
rtldm->thermalvalue = thermal_value;
|
||||||
}
|
}
|
||||||
|
|
|
@ -275,6 +275,19 @@ ssize_t pci_write_vpd(struct pci_dev *dev, loff_t pos, size_t count, const void
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(pci_write_vpd);
|
EXPORT_SYMBOL(pci_write_vpd);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* pci_set_vpd_size - Set size of Vital Product Data space
|
||||||
|
* @dev: pci device struct
|
||||||
|
* @len: size of vpd space
|
||||||
|
*/
|
||||||
|
int pci_set_vpd_size(struct pci_dev *dev, size_t len)
|
||||||
|
{
|
||||||
|
if (!dev->vpd || !dev->vpd->ops)
|
||||||
|
return -ENODEV;
|
||||||
|
return dev->vpd->ops->set_size(dev, len);
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(pci_set_vpd_size);
|
||||||
|
|
||||||
#define PCI_VPD_MAX_SIZE (PCI_VPD_ADDR_MASK + 1)
|
#define PCI_VPD_MAX_SIZE (PCI_VPD_ADDR_MASK + 1)
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -498,9 +511,23 @@ static ssize_t pci_vpd_write(struct pci_dev *dev, loff_t pos, size_t count,
|
||||||
return ret ? ret : count;
|
return ret ? ret : count;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int pci_vpd_set_size(struct pci_dev *dev, size_t len)
|
||||||
|
{
|
||||||
|
struct pci_vpd *vpd = dev->vpd;
|
||||||
|
|
||||||
|
if (len == 0 || len > PCI_VPD_MAX_SIZE)
|
||||||
|
return -EIO;
|
||||||
|
|
||||||
|
vpd->valid = 1;
|
||||||
|
vpd->len = len;
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
static const struct pci_vpd_ops pci_vpd_ops = {
|
static const struct pci_vpd_ops pci_vpd_ops = {
|
||||||
.read = pci_vpd_read,
|
.read = pci_vpd_read,
|
||||||
.write = pci_vpd_write,
|
.write = pci_vpd_write,
|
||||||
|
.set_size = pci_vpd_set_size,
|
||||||
};
|
};
|
||||||
|
|
||||||
static ssize_t pci_vpd_f0_read(struct pci_dev *dev, loff_t pos, size_t count,
|
static ssize_t pci_vpd_f0_read(struct pci_dev *dev, loff_t pos, size_t count,
|
||||||
|
@ -533,9 +560,24 @@ static ssize_t pci_vpd_f0_write(struct pci_dev *dev, loff_t pos, size_t count,
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int pci_vpd_f0_set_size(struct pci_dev *dev, size_t len)
|
||||||
|
{
|
||||||
|
struct pci_dev *tdev = pci_get_slot(dev->bus,
|
||||||
|
PCI_DEVFN(PCI_SLOT(dev->devfn), 0));
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
if (!tdev)
|
||||||
|
return -ENODEV;
|
||||||
|
|
||||||
|
ret = pci_set_vpd_size(tdev, len);
|
||||||
|
pci_dev_put(tdev);
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
static const struct pci_vpd_ops pci_vpd_f0_ops = {
|
static const struct pci_vpd_ops pci_vpd_f0_ops = {
|
||||||
.read = pci_vpd_f0_read,
|
.read = pci_vpd_f0_read,
|
||||||
.write = pci_vpd_f0_write,
|
.write = pci_vpd_f0_write,
|
||||||
|
.set_size = pci_vpd_f0_set_size,
|
||||||
};
|
};
|
||||||
|
|
||||||
int pci_vpd_init(struct pci_dev *dev)
|
int pci_vpd_init(struct pci_dev *dev)
|
||||||
|
|
|
@ -32,7 +32,7 @@
|
||||||
#define to_imx6_pcie(x) container_of(x, struct imx6_pcie, pp)
|
#define to_imx6_pcie(x) container_of(x, struct imx6_pcie, pp)
|
||||||
|
|
||||||
struct imx6_pcie {
|
struct imx6_pcie {
|
||||||
struct gpio_desc *reset_gpio;
|
int reset_gpio;
|
||||||
struct clk *pcie_bus;
|
struct clk *pcie_bus;
|
||||||
struct clk *pcie_phy;
|
struct clk *pcie_phy;
|
||||||
struct clk *pcie;
|
struct clk *pcie;
|
||||||
|
@ -309,10 +309,10 @@ static int imx6_pcie_deassert_core_reset(struct pcie_port *pp)
|
||||||
usleep_range(200, 500);
|
usleep_range(200, 500);
|
||||||
|
|
||||||
/* Some boards don't have PCIe reset GPIO. */
|
/* Some boards don't have PCIe reset GPIO. */
|
||||||
if (imx6_pcie->reset_gpio) {
|
if (gpio_is_valid(imx6_pcie->reset_gpio)) {
|
||||||
gpiod_set_value_cansleep(imx6_pcie->reset_gpio, 0);
|
gpio_set_value_cansleep(imx6_pcie->reset_gpio, 0);
|
||||||
msleep(100);
|
msleep(100);
|
||||||
gpiod_set_value_cansleep(imx6_pcie->reset_gpio, 1);
|
gpio_set_value_cansleep(imx6_pcie->reset_gpio, 1);
|
||||||
}
|
}
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
@ -523,6 +523,7 @@ static int __init imx6_pcie_probe(struct platform_device *pdev)
|
||||||
{
|
{
|
||||||
struct imx6_pcie *imx6_pcie;
|
struct imx6_pcie *imx6_pcie;
|
||||||
struct pcie_port *pp;
|
struct pcie_port *pp;
|
||||||
|
struct device_node *np = pdev->dev.of_node;
|
||||||
struct resource *dbi_base;
|
struct resource *dbi_base;
|
||||||
struct device_node *node = pdev->dev.of_node;
|
struct device_node *node = pdev->dev.of_node;
|
||||||
int ret;
|
int ret;
|
||||||
|
@ -544,8 +545,15 @@ static int __init imx6_pcie_probe(struct platform_device *pdev)
|
||||||
return PTR_ERR(pp->dbi_base);
|
return PTR_ERR(pp->dbi_base);
|
||||||
|
|
||||||
/* Fetch GPIOs */
|
/* Fetch GPIOs */
|
||||||
imx6_pcie->reset_gpio = devm_gpiod_get_optional(&pdev->dev, "reset",
|
imx6_pcie->reset_gpio = of_get_named_gpio(np, "reset-gpio", 0);
|
||||||
GPIOD_OUT_LOW);
|
if (gpio_is_valid(imx6_pcie->reset_gpio)) {
|
||||||
|
ret = devm_gpio_request_one(&pdev->dev, imx6_pcie->reset_gpio,
|
||||||
|
GPIOF_OUT_INIT_LOW, "PCIe reset");
|
||||||
|
if (ret) {
|
||||||
|
dev_err(&pdev->dev, "unable to get reset gpio\n");
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/* Fetch clocks */
|
/* Fetch clocks */
|
||||||
imx6_pcie->pcie_phy = devm_clk_get(&pdev->dev, "pcie_phy");
|
imx6_pcie->pcie_phy = devm_clk_get(&pdev->dev, "pcie_phy");
|
||||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue